1 //===- JumpThreading.cpp - Thread control through conditional blocks ------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file implements the Jump Threading pass. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #define DEBUG_TYPE "jump-threading" 15 #include "llvm/Transforms/Scalar.h" 16 #include "llvm/IntrinsicInst.h" 17 #include "llvm/LLVMContext.h" 18 #include "llvm/Pass.h" 19 #include "llvm/Analysis/InstructionSimplify.h" 20 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 21 #include "llvm/Transforms/Utils/Local.h" 22 #include "llvm/Transforms/Utils/SSAUpdater.h" 23 #include "llvm/Target/TargetData.h" 24 #include "llvm/ADT/DenseMap.h" 25 #include "llvm/ADT/Statistic.h" 26 #include "llvm/ADT/STLExtras.h" 27 #include "llvm/ADT/SmallPtrSet.h" 28 #include "llvm/ADT/SmallSet.h" 29 #include "llvm/Support/CommandLine.h" 30 #include "llvm/Support/Debug.h" 31 #include "llvm/Support/raw_ostream.h" 32 using namespace llvm; 33 34 STATISTIC(NumThreads, "Number of jumps threaded"); 35 STATISTIC(NumFolds, "Number of terminators folded"); 36 STATISTIC(NumDupes, "Number of branch blocks duplicated to eliminate phi"); 37 38 static cl::opt<unsigned> 39 Threshold("jump-threading-threshold", 40 cl::desc("Max block size to duplicate for jump threading"), 41 cl::init(6), cl::Hidden); 42 43 namespace { 44 /// This pass performs 'jump threading', which looks at blocks that have 45 /// multiple predecessors and multiple successors. If one or more of the 46 /// predecessors of the block can be proven to always jump to one of the 47 /// successors, we forward the edge from the predecessor to the successor by 48 /// duplicating the contents of this block. 49 /// 50 /// An example of when this can occur is code like this: 51 /// 52 /// if () { ... 53 /// X = 4; 54 /// } 55 /// if (X < 3) { 56 /// 57 /// In this case, the unconditional branch at the end of the first if can be 58 /// revectored to the false side of the second if. 59 /// 60 class JumpThreading : public FunctionPass { 61 TargetData *TD; 62 #ifdef NDEBUG 63 SmallPtrSet<BasicBlock*, 16> LoopHeaders; 64 #else 65 SmallSet<AssertingVH<BasicBlock>, 16> LoopHeaders; 66 #endif 67 public: 68 static char ID; // Pass identification 69 JumpThreading() : FunctionPass(&ID) {} 70 71 bool runOnFunction(Function &F); 72 void FindLoopHeaders(Function &F); 73 74 bool ProcessBlock(BasicBlock *BB); 75 bool ThreadEdge(BasicBlock *BB, const SmallVectorImpl<BasicBlock*> &PredBBs, 76 BasicBlock *SuccBB); 77 bool DuplicateCondBranchOnPHIIntoPred(BasicBlock *BB, 78 BasicBlock *PredBB); 79 80 typedef SmallVectorImpl<std::pair<ConstantInt*, 81 BasicBlock*> > PredValueInfo; 82 83 bool ComputeValueKnownInPredecessors(Value *V, BasicBlock *BB, 84 PredValueInfo &Result); 85 bool ProcessThreadableEdges(Instruction *CondInst, BasicBlock *BB); 86 87 88 bool ProcessBranchOnDuplicateCond(BasicBlock *PredBB, BasicBlock *DestBB); 89 bool ProcessSwitchOnDuplicateCond(BasicBlock *PredBB, BasicBlock *DestBB); 90 91 bool ProcessJumpOnPHI(PHINode *PN); 92 93 bool SimplifyPartiallyRedundantLoad(LoadInst *LI); 94 }; 95 } 96 97 char JumpThreading::ID = 0; 98 static RegisterPass<JumpThreading> 99 X("jump-threading", "Jump Threading"); 100 101 // Public interface to the Jump Threading pass 102 FunctionPass *llvm::createJumpThreadingPass() { return new JumpThreading(); } 103 104 /// runOnFunction - Top level algorithm. 105 /// 106 bool JumpThreading::runOnFunction(Function &F) { 107 DEBUG(errs() << "Jump threading on function '" << F.getName() << "'\n"); 108 TD = getAnalysisIfAvailable<TargetData>(); 109 110 FindLoopHeaders(F); 111 112 bool AnotherIteration = true, EverChanged = false; 113 while (AnotherIteration) { 114 AnotherIteration = false; 115 bool Changed = false; 116 for (Function::iterator I = F.begin(), E = F.end(); I != E;) { 117 BasicBlock *BB = I; 118 // Thread all of the branches we can over this block. 119 while (ProcessBlock(BB)) 120 Changed = true; 121 122 ++I; 123 124 // If the block is trivially dead, zap it. This eliminates the successor 125 // edges which simplifies the CFG. 126 if (pred_begin(BB) == pred_end(BB) && 127 BB != &BB->getParent()->getEntryBlock()) { 128 DEBUG(errs() << " JT: Deleting dead block '" << BB->getName() 129 << "' with terminator: " << *BB->getTerminator() << '\n'); 130 LoopHeaders.erase(BB); 131 DeleteDeadBlock(BB); 132 Changed = true; 133 } else if (BranchInst *BI = dyn_cast<BranchInst>(BB->getTerminator())) { 134 // Can't thread an unconditional jump, but if the block is "almost 135 // empty", we can replace uses of it with uses of the successor and make 136 // this dead. 137 if (BI->isUnconditional() && 138 BB != &BB->getParent()->getEntryBlock()) { 139 BasicBlock::iterator BBI = BB->getFirstNonPHI(); 140 // Ignore dbg intrinsics. 141 while (isa<DbgInfoIntrinsic>(BBI)) 142 ++BBI; 143 // If the terminator is the only non-phi instruction, try to nuke it. 144 if (BBI->isTerminator()) { 145 // Since TryToSimplifyUncondBranchFromEmptyBlock may delete the 146 // block, we have to make sure it isn't in the LoopHeaders set. We 147 // reinsert afterward in the rare case when the block isn't deleted. 148 bool ErasedFromLoopHeaders = LoopHeaders.erase(BB); 149 150 if (TryToSimplifyUncondBranchFromEmptyBlock(BB)) 151 Changed = true; 152 else if (ErasedFromLoopHeaders) 153 LoopHeaders.insert(BB); 154 } 155 } 156 } 157 } 158 AnotherIteration = Changed; 159 EverChanged |= Changed; 160 } 161 162 LoopHeaders.clear(); 163 return EverChanged; 164 } 165 166 /// getJumpThreadDuplicationCost - Return the cost of duplicating this block to 167 /// thread across it. 168 static unsigned getJumpThreadDuplicationCost(const BasicBlock *BB) { 169 /// Ignore PHI nodes, these will be flattened when duplication happens. 170 BasicBlock::const_iterator I = BB->getFirstNonPHI(); 171 172 // Sum up the cost of each instruction until we get to the terminator. Don't 173 // include the terminator because the copy won't include it. 174 unsigned Size = 0; 175 for (; !isa<TerminatorInst>(I); ++I) { 176 // Debugger intrinsics don't incur code size. 177 if (isa<DbgInfoIntrinsic>(I)) continue; 178 179 // If this is a pointer->pointer bitcast, it is free. 180 if (isa<BitCastInst>(I) && isa<PointerType>(I->getType())) 181 continue; 182 183 // All other instructions count for at least one unit. 184 ++Size; 185 186 // Calls are more expensive. If they are non-intrinsic calls, we model them 187 // as having cost of 4. If they are a non-vector intrinsic, we model them 188 // as having cost of 2 total, and if they are a vector intrinsic, we model 189 // them as having cost 1. 190 if (const CallInst *CI = dyn_cast<CallInst>(I)) { 191 if (!isa<IntrinsicInst>(CI)) 192 Size += 3; 193 else if (!isa<VectorType>(CI->getType())) 194 Size += 1; 195 } 196 } 197 198 // Threading through a switch statement is particularly profitable. If this 199 // block ends in a switch, decrease its cost to make it more likely to happen. 200 if (isa<SwitchInst>(I)) 201 Size = Size > 6 ? Size-6 : 0; 202 203 return Size; 204 } 205 206 /// FindLoopHeaders - We do not want jump threading to turn proper loop 207 /// structures into irreducible loops. Doing this breaks up the loop nesting 208 /// hierarchy and pessimizes later transformations. To prevent this from 209 /// happening, we first have to find the loop headers. Here we approximate this 210 /// by finding targets of backedges in the CFG. 211 /// 212 /// Note that there definitely are cases when we want to allow threading of 213 /// edges across a loop header. For example, threading a jump from outside the 214 /// loop (the preheader) to an exit block of the loop is definitely profitable. 215 /// It is also almost always profitable to thread backedges from within the loop 216 /// to exit blocks, and is often profitable to thread backedges to other blocks 217 /// within the loop (forming a nested loop). This simple analysis is not rich 218 /// enough to track all of these properties and keep it up-to-date as the CFG 219 /// mutates, so we don't allow any of these transformations. 220 /// 221 void JumpThreading::FindLoopHeaders(Function &F) { 222 SmallVector<std::pair<const BasicBlock*,const BasicBlock*>, 32> Edges; 223 FindFunctionBackedges(F, Edges); 224 225 for (unsigned i = 0, e = Edges.size(); i != e; ++i) 226 LoopHeaders.insert(const_cast<BasicBlock*>(Edges[i].second)); 227 } 228 229 /// ComputeValueKnownInPredecessors - Given a basic block BB and a value V, see 230 /// if we can infer that the value is a known ConstantInt in any of our 231 /// predecessors. If so, return the known list of value and pred BB in the 232 /// result vector. If a value is known to be undef, it is returned as null. 233 /// 234 /// The BB basic block is known to start with a PHI node. 235 /// 236 /// This returns true if there were any known values. 237 /// 238 /// 239 /// TODO: Per PR2563, we could infer value range information about a predecessor 240 /// based on its terminator. 241 bool JumpThreading:: 242 ComputeValueKnownInPredecessors(Value *V, BasicBlock *BB,PredValueInfo &Result){ 243 PHINode *TheFirstPHI = cast<PHINode>(BB->begin()); 244 245 // If V is a constantint, then it is known in all predecessors. 246 if (isa<ConstantInt>(V) || isa<UndefValue>(V)) { 247 ConstantInt *CI = dyn_cast<ConstantInt>(V); 248 Result.resize(TheFirstPHI->getNumIncomingValues()); 249 for (unsigned i = 0, e = Result.size(); i != e; ++i) 250 Result[i] = std::make_pair(CI, TheFirstPHI->getIncomingBlock(i)); 251 return true; 252 } 253 254 // If V is a non-instruction value, or an instruction in a different block, 255 // then it can't be derived from a PHI. 256 Instruction *I = dyn_cast<Instruction>(V); 257 if (I == 0 || I->getParent() != BB) 258 return false; 259 260 /// If I is a PHI node, then we know the incoming values for any constants. 261 if (PHINode *PN = dyn_cast<PHINode>(I)) { 262 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 263 Value *InVal = PN->getIncomingValue(i); 264 if (isa<ConstantInt>(InVal) || isa<UndefValue>(InVal)) { 265 ConstantInt *CI = dyn_cast<ConstantInt>(InVal); 266 Result.push_back(std::make_pair(CI, PN->getIncomingBlock(i))); 267 } 268 } 269 return !Result.empty(); 270 } 271 272 SmallVector<std::pair<ConstantInt*, BasicBlock*>, 8> LHSVals, RHSVals; 273 274 // Handle some boolean conditions. 275 if (I->getType()->getPrimitiveSizeInBits() == 1) { 276 // X | true -> true 277 // X & false -> false 278 if (I->getOpcode() == Instruction::Or || 279 I->getOpcode() == Instruction::And) { 280 ComputeValueKnownInPredecessors(I->getOperand(0), BB, LHSVals); 281 ComputeValueKnownInPredecessors(I->getOperand(1), BB, RHSVals); 282 283 if (LHSVals.empty() && RHSVals.empty()) 284 return false; 285 286 ConstantInt *InterestingVal; 287 if (I->getOpcode() == Instruction::Or) 288 InterestingVal = ConstantInt::getTrue(I->getContext()); 289 else 290 InterestingVal = ConstantInt::getFalse(I->getContext()); 291 292 // Scan for the sentinel. 293 for (unsigned i = 0, e = LHSVals.size(); i != e; ++i) 294 if (LHSVals[i].first == InterestingVal || LHSVals[i].first == 0) 295 Result.push_back(LHSVals[i]); 296 for (unsigned i = 0, e = RHSVals.size(); i != e; ++i) 297 if (RHSVals[i].first == InterestingVal || RHSVals[i].first == 0) 298 Result.push_back(RHSVals[i]); 299 return !Result.empty(); 300 } 301 302 // Handle the NOT form of XOR. 303 if (I->getOpcode() == Instruction::Xor && 304 isa<ConstantInt>(I->getOperand(1)) && 305 cast<ConstantInt>(I->getOperand(1))->isOne()) { 306 ComputeValueKnownInPredecessors(I->getOperand(0), BB, Result); 307 if (Result.empty()) 308 return false; 309 310 // Invert the known values. 311 for (unsigned i = 0, e = Result.size(); i != e; ++i) 312 Result[i].first = 313 cast<ConstantInt>(ConstantExpr::getNot(Result[i].first)); 314 return true; 315 } 316 } 317 318 // Handle compare with phi operand, where the PHI is defined in this block. 319 if (CmpInst *Cmp = dyn_cast<CmpInst>(I)) { 320 PHINode *PN = dyn_cast<PHINode>(Cmp->getOperand(0)); 321 if (PN && PN->getParent() == BB) { 322 // We can do this simplification if any comparisons fold to true or false. 323 // See if any do. 324 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 325 BasicBlock *PredBB = PN->getIncomingBlock(i); 326 Value *LHS = PN->getIncomingValue(i); 327 Value *RHS = Cmp->getOperand(1)->DoPHITranslation(BB, PredBB); 328 329 Value *Res = SimplifyCmpInst(Cmp->getPredicate(), LHS, RHS); 330 if (Res == 0) continue; 331 332 if (isa<UndefValue>(Res)) 333 Result.push_back(std::make_pair((ConstantInt*)0, PredBB)); 334 else if (ConstantInt *CI = dyn_cast<ConstantInt>(Res)) 335 Result.push_back(std::make_pair(CI, PredBB)); 336 } 337 338 return !Result.empty(); 339 } 340 341 // TODO: We could also recurse to see if we can determine constants another 342 // way. 343 } 344 return false; 345 } 346 347 348 349 /// GetBestDestForBranchOnUndef - If we determine that the specified block ends 350 /// in an undefined jump, decide which block is best to revector to. 351 /// 352 /// Since we can pick an arbitrary destination, we pick the successor with the 353 /// fewest predecessors. This should reduce the in-degree of the others. 354 /// 355 static unsigned GetBestDestForJumpOnUndef(BasicBlock *BB) { 356 TerminatorInst *BBTerm = BB->getTerminator(); 357 unsigned MinSucc = 0; 358 BasicBlock *TestBB = BBTerm->getSuccessor(MinSucc); 359 // Compute the successor with the minimum number of predecessors. 360 unsigned MinNumPreds = std::distance(pred_begin(TestBB), pred_end(TestBB)); 361 for (unsigned i = 1, e = BBTerm->getNumSuccessors(); i != e; ++i) { 362 TestBB = BBTerm->getSuccessor(i); 363 unsigned NumPreds = std::distance(pred_begin(TestBB), pred_end(TestBB)); 364 if (NumPreds < MinNumPreds) 365 MinSucc = i; 366 } 367 368 return MinSucc; 369 } 370 371 /// ProcessBlock - If there are any predecessors whose control can be threaded 372 /// through to a successor, transform them now. 373 bool JumpThreading::ProcessBlock(BasicBlock *BB) { 374 // If this block has a single predecessor, and if that pred has a single 375 // successor, merge the blocks. This encourages recursive jump threading 376 // because now the condition in this block can be threaded through 377 // predecessors of our predecessor block. 378 if (BasicBlock *SinglePred = BB->getSinglePredecessor()) { 379 if (SinglePred->getTerminator()->getNumSuccessors() == 1 && 380 SinglePred != BB) { 381 // If SinglePred was a loop header, BB becomes one. 382 if (LoopHeaders.erase(SinglePred)) 383 LoopHeaders.insert(BB); 384 385 // Remember if SinglePred was the entry block of the function. If so, we 386 // will need to move BB back to the entry position. 387 bool isEntry = SinglePred == &SinglePred->getParent()->getEntryBlock(); 388 MergeBasicBlockIntoOnlyPred(BB); 389 390 if (isEntry && BB != &BB->getParent()->getEntryBlock()) 391 BB->moveBefore(&BB->getParent()->getEntryBlock()); 392 return true; 393 } 394 } 395 396 // Look to see if the terminator is a branch of switch, if not we can't thread 397 // it. 398 Value *Condition; 399 if (BranchInst *BI = dyn_cast<BranchInst>(BB->getTerminator())) { 400 // Can't thread an unconditional jump. 401 if (BI->isUnconditional()) return false; 402 Condition = BI->getCondition(); 403 } else if (SwitchInst *SI = dyn_cast<SwitchInst>(BB->getTerminator())) 404 Condition = SI->getCondition(); 405 else 406 return false; // Must be an invoke. 407 408 // If the terminator of this block is branching on a constant, simplify the 409 // terminator to an unconditional branch. This can occur due to threading in 410 // other blocks. 411 if (isa<ConstantInt>(Condition)) { 412 DEBUG(errs() << " In block '" << BB->getName() 413 << "' folding terminator: " << *BB->getTerminator() << '\n'); 414 ++NumFolds; 415 ConstantFoldTerminator(BB); 416 return true; 417 } 418 419 // If the terminator is branching on an undef, we can pick any of the 420 // successors to branch to. Let GetBestDestForJumpOnUndef decide. 421 if (isa<UndefValue>(Condition)) { 422 unsigned BestSucc = GetBestDestForJumpOnUndef(BB); 423 424 // Fold the branch/switch. 425 TerminatorInst *BBTerm = BB->getTerminator(); 426 for (unsigned i = 0, e = BBTerm->getNumSuccessors(); i != e; ++i) { 427 if (i == BestSucc) continue; 428 RemovePredecessorAndSimplify(BBTerm->getSuccessor(i), BB, TD); 429 } 430 431 DEBUG(errs() << " In block '" << BB->getName() 432 << "' folding undef terminator: " << *BBTerm << '\n'); 433 BranchInst::Create(BBTerm->getSuccessor(BestSucc), BBTerm); 434 BBTerm->eraseFromParent(); 435 return true; 436 } 437 438 Instruction *CondInst = dyn_cast<Instruction>(Condition); 439 440 // If the condition is an instruction defined in another block, see if a 441 // predecessor has the same condition: 442 // br COND, BBX, BBY 443 // BBX: 444 // br COND, BBZ, BBW 445 if (!Condition->hasOneUse() && // Multiple uses. 446 (CondInst == 0 || CondInst->getParent() != BB)) { // Non-local definition. 447 pred_iterator PI = pred_begin(BB), E = pred_end(BB); 448 if (isa<BranchInst>(BB->getTerminator())) { 449 for (; PI != E; ++PI) 450 if (BranchInst *PBI = dyn_cast<BranchInst>((*PI)->getTerminator())) 451 if (PBI->isConditional() && PBI->getCondition() == Condition && 452 ProcessBranchOnDuplicateCond(*PI, BB)) 453 return true; 454 } else { 455 assert(isa<SwitchInst>(BB->getTerminator()) && "Unknown jump terminator"); 456 for (; PI != E; ++PI) 457 if (SwitchInst *PSI = dyn_cast<SwitchInst>((*PI)->getTerminator())) 458 if (PSI->getCondition() == Condition && 459 ProcessSwitchOnDuplicateCond(*PI, BB)) 460 return true; 461 } 462 } 463 464 // All the rest of our checks depend on the condition being an instruction. 465 if (CondInst == 0) 466 return false; 467 468 // See if this is a phi node in the current block. 469 if (PHINode *PN = dyn_cast<PHINode>(CondInst)) 470 if (PN->getParent() == BB) 471 return ProcessJumpOnPHI(PN); 472 473 if (CmpInst *CondCmp = dyn_cast<CmpInst>(CondInst)) { 474 if (!isa<PHINode>(CondCmp->getOperand(0)) || 475 cast<PHINode>(CondCmp->getOperand(0))->getParent() != BB) { 476 // If we have a comparison, loop over the predecessors to see if there is 477 // a condition with a lexically identical value. 478 pred_iterator PI = pred_begin(BB), E = pred_end(BB); 479 for (; PI != E; ++PI) 480 if (BranchInst *PBI = dyn_cast<BranchInst>((*PI)->getTerminator())) 481 if (PBI->isConditional() && *PI != BB) { 482 if (CmpInst *CI = dyn_cast<CmpInst>(PBI->getCondition())) { 483 if (CI->getOperand(0) == CondCmp->getOperand(0) && 484 CI->getOperand(1) == CondCmp->getOperand(1) && 485 CI->getPredicate() == CondCmp->getPredicate()) { 486 // TODO: Could handle things like (x != 4) --> (x == 17) 487 if (ProcessBranchOnDuplicateCond(*PI, BB)) 488 return true; 489 } 490 } 491 } 492 } 493 } 494 495 // Check for some cases that are worth simplifying. Right now we want to look 496 // for loads that are used by a switch or by the condition for the branch. If 497 // we see one, check to see if it's partially redundant. If so, insert a PHI 498 // which can then be used to thread the values. 499 // 500 // This is particularly important because reg2mem inserts loads and stores all 501 // over the place, and this blocks jump threading if we don't zap them. 502 Value *SimplifyValue = CondInst; 503 if (CmpInst *CondCmp = dyn_cast<CmpInst>(SimplifyValue)) 504 if (isa<Constant>(CondCmp->getOperand(1))) 505 SimplifyValue = CondCmp->getOperand(0); 506 507 if (LoadInst *LI = dyn_cast<LoadInst>(SimplifyValue)) 508 if (SimplifyPartiallyRedundantLoad(LI)) 509 return true; 510 511 512 // Handle a variety of cases where we are branching on something derived from 513 // a PHI node in the current block. If we can prove that any predecessors 514 // compute a predictable value based on a PHI node, thread those predecessors. 515 // 516 // We only bother doing this if the current block has a PHI node and if the 517 // conditional instruction lives in the current block. If either condition 518 // fails, this won't be a computable value anyway. 519 if (CondInst->getParent() == BB && isa<PHINode>(BB->front())) 520 if (ProcessThreadableEdges(CondInst, BB)) 521 return true; 522 523 524 // TODO: If we have: "br (X > 0)" and we have a predecessor where we know 525 // "(X == 4)" thread through this block. 526 527 return false; 528 } 529 530 /// ProcessBranchOnDuplicateCond - We found a block and a predecessor of that 531 /// block that jump on exactly the same condition. This means that we almost 532 /// always know the direction of the edge in the DESTBB: 533 /// PREDBB: 534 /// br COND, DESTBB, BBY 535 /// DESTBB: 536 /// br COND, BBZ, BBW 537 /// 538 /// If DESTBB has multiple predecessors, we can't just constant fold the branch 539 /// in DESTBB, we have to thread over it. 540 bool JumpThreading::ProcessBranchOnDuplicateCond(BasicBlock *PredBB, 541 BasicBlock *BB) { 542 BranchInst *PredBI = cast<BranchInst>(PredBB->getTerminator()); 543 544 // If both successors of PredBB go to DESTBB, we don't know anything. We can 545 // fold the branch to an unconditional one, which allows other recursive 546 // simplifications. 547 bool BranchDir; 548 if (PredBI->getSuccessor(1) != BB) 549 BranchDir = true; 550 else if (PredBI->getSuccessor(0) != BB) 551 BranchDir = false; 552 else { 553 DEBUG(errs() << " In block '" << PredBB->getName() 554 << "' folding terminator: " << *PredBB->getTerminator() << '\n'); 555 ++NumFolds; 556 ConstantFoldTerminator(PredBB); 557 return true; 558 } 559 560 BranchInst *DestBI = cast<BranchInst>(BB->getTerminator()); 561 562 // If the dest block has one predecessor, just fix the branch condition to a 563 // constant and fold it. 564 if (BB->getSinglePredecessor()) { 565 DEBUG(errs() << " In block '" << BB->getName() 566 << "' folding condition to '" << BranchDir << "': " 567 << *BB->getTerminator() << '\n'); 568 ++NumFolds; 569 Value *OldCond = DestBI->getCondition(); 570 DestBI->setCondition(ConstantInt::get(Type::getInt1Ty(BB->getContext()), 571 BranchDir)); 572 ConstantFoldTerminator(BB); 573 RecursivelyDeleteTriviallyDeadInstructions(OldCond); 574 return true; 575 } 576 577 578 // Next, figure out which successor we are threading to. 579 BasicBlock *SuccBB = DestBI->getSuccessor(!BranchDir); 580 581 SmallVector<BasicBlock*, 2> Preds; 582 Preds.push_back(PredBB); 583 584 // Ok, try to thread it! 585 return ThreadEdge(BB, Preds, SuccBB); 586 } 587 588 /// ProcessSwitchOnDuplicateCond - We found a block and a predecessor of that 589 /// block that switch on exactly the same condition. This means that we almost 590 /// always know the direction of the edge in the DESTBB: 591 /// PREDBB: 592 /// switch COND [... DESTBB, BBY ... ] 593 /// DESTBB: 594 /// switch COND [... BBZ, BBW ] 595 /// 596 /// Optimizing switches like this is very important, because simplifycfg builds 597 /// switches out of repeated 'if' conditions. 598 bool JumpThreading::ProcessSwitchOnDuplicateCond(BasicBlock *PredBB, 599 BasicBlock *DestBB) { 600 // Can't thread edge to self. 601 if (PredBB == DestBB) 602 return false; 603 604 SwitchInst *PredSI = cast<SwitchInst>(PredBB->getTerminator()); 605 SwitchInst *DestSI = cast<SwitchInst>(DestBB->getTerminator()); 606 607 // There are a variety of optimizations that we can potentially do on these 608 // blocks: we order them from most to least preferable. 609 610 // If DESTBB *just* contains the switch, then we can forward edges from PREDBB 611 // directly to their destination. This does not introduce *any* code size 612 // growth. Skip debug info first. 613 BasicBlock::iterator BBI = DestBB->begin(); 614 while (isa<DbgInfoIntrinsic>(BBI)) 615 BBI++; 616 617 // FIXME: Thread if it just contains a PHI. 618 if (isa<SwitchInst>(BBI)) { 619 bool MadeChange = false; 620 // Ignore the default edge for now. 621 for (unsigned i = 1, e = DestSI->getNumSuccessors(); i != e; ++i) { 622 ConstantInt *DestVal = DestSI->getCaseValue(i); 623 BasicBlock *DestSucc = DestSI->getSuccessor(i); 624 625 // Okay, DestSI has a case for 'DestVal' that goes to 'DestSucc'. See if 626 // PredSI has an explicit case for it. If so, forward. If it is covered 627 // by the default case, we can't update PredSI. 628 unsigned PredCase = PredSI->findCaseValue(DestVal); 629 if (PredCase == 0) continue; 630 631 // If PredSI doesn't go to DestBB on this value, then it won't reach the 632 // case on this condition. 633 if (PredSI->getSuccessor(PredCase) != DestBB && 634 DestSI->getSuccessor(i) != DestBB) 635 continue; 636 637 // Otherwise, we're safe to make the change. Make sure that the edge from 638 // DestSI to DestSucc is not critical and has no PHI nodes. 639 DEBUG(errs() << "FORWARDING EDGE " << *DestVal << " FROM: " << *PredSI); 640 DEBUG(errs() << "THROUGH: " << *DestSI); 641 642 // If the destination has PHI nodes, just split the edge for updating 643 // simplicity. 644 if (isa<PHINode>(DestSucc->begin()) && !DestSucc->getSinglePredecessor()){ 645 SplitCriticalEdge(DestSI, i, this); 646 DestSucc = DestSI->getSuccessor(i); 647 } 648 FoldSingleEntryPHINodes(DestSucc); 649 PredSI->setSuccessor(PredCase, DestSucc); 650 MadeChange = true; 651 } 652 653 if (MadeChange) 654 return true; 655 } 656 657 return false; 658 } 659 660 661 /// SimplifyPartiallyRedundantLoad - If LI is an obviously partially redundant 662 /// load instruction, eliminate it by replacing it with a PHI node. This is an 663 /// important optimization that encourages jump threading, and needs to be run 664 /// interlaced with other jump threading tasks. 665 bool JumpThreading::SimplifyPartiallyRedundantLoad(LoadInst *LI) { 666 // Don't hack volatile loads. 667 if (LI->isVolatile()) return false; 668 669 // If the load is defined in a block with exactly one predecessor, it can't be 670 // partially redundant. 671 BasicBlock *LoadBB = LI->getParent(); 672 if (LoadBB->getSinglePredecessor()) 673 return false; 674 675 Value *LoadedPtr = LI->getOperand(0); 676 677 // If the loaded operand is defined in the LoadBB, it can't be available. 678 // FIXME: Could do PHI translation, that would be fun :) 679 if (Instruction *PtrOp = dyn_cast<Instruction>(LoadedPtr)) 680 if (PtrOp->getParent() == LoadBB) 681 return false; 682 683 // Scan a few instructions up from the load, to see if it is obviously live at 684 // the entry to its block. 685 BasicBlock::iterator BBIt = LI; 686 687 if (Value *AvailableVal = FindAvailableLoadedValue(LoadedPtr, LoadBB, 688 BBIt, 6)) { 689 // If the value if the load is locally available within the block, just use 690 // it. This frequently occurs for reg2mem'd allocas. 691 //cerr << "LOAD ELIMINATED:\n" << *BBIt << *LI << "\n"; 692 693 // If the returned value is the load itself, replace with an undef. This can 694 // only happen in dead loops. 695 if (AvailableVal == LI) AvailableVal = UndefValue::get(LI->getType()); 696 LI->replaceAllUsesWith(AvailableVal); 697 LI->eraseFromParent(); 698 return true; 699 } 700 701 // Otherwise, if we scanned the whole block and got to the top of the block, 702 // we know the block is locally transparent to the load. If not, something 703 // might clobber its value. 704 if (BBIt != LoadBB->begin()) 705 return false; 706 707 708 SmallPtrSet<BasicBlock*, 8> PredsScanned; 709 typedef SmallVector<std::pair<BasicBlock*, Value*>, 8> AvailablePredsTy; 710 AvailablePredsTy AvailablePreds; 711 BasicBlock *OneUnavailablePred = 0; 712 713 // If we got here, the loaded value is transparent through to the start of the 714 // block. Check to see if it is available in any of the predecessor blocks. 715 for (pred_iterator PI = pred_begin(LoadBB), PE = pred_end(LoadBB); 716 PI != PE; ++PI) { 717 BasicBlock *PredBB = *PI; 718 719 // If we already scanned this predecessor, skip it. 720 if (!PredsScanned.insert(PredBB)) 721 continue; 722 723 // Scan the predecessor to see if the value is available in the pred. 724 BBIt = PredBB->end(); 725 Value *PredAvailable = FindAvailableLoadedValue(LoadedPtr, PredBB, BBIt, 6); 726 if (!PredAvailable) { 727 OneUnavailablePred = PredBB; 728 continue; 729 } 730 731 // If so, this load is partially redundant. Remember this info so that we 732 // can create a PHI node. 733 AvailablePreds.push_back(std::make_pair(PredBB, PredAvailable)); 734 } 735 736 // If the loaded value isn't available in any predecessor, it isn't partially 737 // redundant. 738 if (AvailablePreds.empty()) return false; 739 740 // Okay, the loaded value is available in at least one (and maybe all!) 741 // predecessors. If the value is unavailable in more than one unique 742 // predecessor, we want to insert a merge block for those common predecessors. 743 // This ensures that we only have to insert one reload, thus not increasing 744 // code size. 745 BasicBlock *UnavailablePred = 0; 746 747 // If there is exactly one predecessor where the value is unavailable, the 748 // already computed 'OneUnavailablePred' block is it. If it ends in an 749 // unconditional branch, we know that it isn't a critical edge. 750 if (PredsScanned.size() == AvailablePreds.size()+1 && 751 OneUnavailablePred->getTerminator()->getNumSuccessors() == 1) { 752 UnavailablePred = OneUnavailablePred; 753 } else if (PredsScanned.size() != AvailablePreds.size()) { 754 // Otherwise, we had multiple unavailable predecessors or we had a critical 755 // edge from the one. 756 SmallVector<BasicBlock*, 8> PredsToSplit; 757 SmallPtrSet<BasicBlock*, 8> AvailablePredSet; 758 759 for (unsigned i = 0, e = AvailablePreds.size(); i != e; ++i) 760 AvailablePredSet.insert(AvailablePreds[i].first); 761 762 // Add all the unavailable predecessors to the PredsToSplit list. 763 for (pred_iterator PI = pred_begin(LoadBB), PE = pred_end(LoadBB); 764 PI != PE; ++PI) 765 if (!AvailablePredSet.count(*PI)) 766 PredsToSplit.push_back(*PI); 767 768 // Split them out to their own block. 769 UnavailablePred = 770 SplitBlockPredecessors(LoadBB, &PredsToSplit[0], PredsToSplit.size(), 771 "thread-split", this); 772 } 773 774 // If the value isn't available in all predecessors, then there will be 775 // exactly one where it isn't available. Insert a load on that edge and add 776 // it to the AvailablePreds list. 777 if (UnavailablePred) { 778 assert(UnavailablePred->getTerminator()->getNumSuccessors() == 1 && 779 "Can't handle critical edge here!"); 780 Value *NewVal = new LoadInst(LoadedPtr, LI->getName()+".pr", 781 UnavailablePred->getTerminator()); 782 AvailablePreds.push_back(std::make_pair(UnavailablePred, NewVal)); 783 } 784 785 // Now we know that each predecessor of this block has a value in 786 // AvailablePreds, sort them for efficient access as we're walking the preds. 787 array_pod_sort(AvailablePreds.begin(), AvailablePreds.end()); 788 789 // Create a PHI node at the start of the block for the PRE'd load value. 790 PHINode *PN = PHINode::Create(LI->getType(), "", LoadBB->begin()); 791 PN->takeName(LI); 792 793 // Insert new entries into the PHI for each predecessor. A single block may 794 // have multiple entries here. 795 for (pred_iterator PI = pred_begin(LoadBB), E = pred_end(LoadBB); PI != E; 796 ++PI) { 797 AvailablePredsTy::iterator I = 798 std::lower_bound(AvailablePreds.begin(), AvailablePreds.end(), 799 std::make_pair(*PI, (Value*)0)); 800 801 assert(I != AvailablePreds.end() && I->first == *PI && 802 "Didn't find entry for predecessor!"); 803 804 PN->addIncoming(I->second, I->first); 805 } 806 807 //cerr << "PRE: " << *LI << *PN << "\n"; 808 809 LI->replaceAllUsesWith(PN); 810 LI->eraseFromParent(); 811 812 return true; 813 } 814 815 /// FindMostPopularDest - The specified list contains multiple possible 816 /// threadable destinations. Pick the one that occurs the most frequently in 817 /// the list. 818 static BasicBlock * 819 FindMostPopularDest(BasicBlock *BB, 820 const SmallVectorImpl<std::pair<BasicBlock*, 821 BasicBlock*> > &PredToDestList) { 822 assert(!PredToDestList.empty()); 823 824 // Determine popularity. If there are multiple possible destinations, we 825 // explicitly choose to ignore 'undef' destinations. We prefer to thread 826 // blocks with known and real destinations to threading undef. We'll handle 827 // them later if interesting. 828 DenseMap<BasicBlock*, unsigned> DestPopularity; 829 for (unsigned i = 0, e = PredToDestList.size(); i != e; ++i) 830 if (PredToDestList[i].second) 831 DestPopularity[PredToDestList[i].second]++; 832 833 // Find the most popular dest. 834 DenseMap<BasicBlock*, unsigned>::iterator DPI = DestPopularity.begin(); 835 BasicBlock *MostPopularDest = DPI->first; 836 unsigned Popularity = DPI->second; 837 SmallVector<BasicBlock*, 4> SamePopularity; 838 839 for (++DPI; DPI != DestPopularity.end(); ++DPI) { 840 // If the popularity of this entry isn't higher than the popularity we've 841 // seen so far, ignore it. 842 if (DPI->second < Popularity) 843 ; // ignore. 844 else if (DPI->second == Popularity) { 845 // If it is the same as what we've seen so far, keep track of it. 846 SamePopularity.push_back(DPI->first); 847 } else { 848 // If it is more popular, remember it. 849 SamePopularity.clear(); 850 MostPopularDest = DPI->first; 851 Popularity = DPI->second; 852 } 853 } 854 855 // Okay, now we know the most popular destination. If there is more than 856 // destination, we need to determine one. This is arbitrary, but we need 857 // to make a deterministic decision. Pick the first one that appears in the 858 // successor list. 859 if (!SamePopularity.empty()) { 860 SamePopularity.push_back(MostPopularDest); 861 TerminatorInst *TI = BB->getTerminator(); 862 for (unsigned i = 0; ; ++i) { 863 assert(i != TI->getNumSuccessors() && "Didn't find any successor!"); 864 865 if (std::find(SamePopularity.begin(), SamePopularity.end(), 866 TI->getSuccessor(i)) == SamePopularity.end()) 867 continue; 868 869 MostPopularDest = TI->getSuccessor(i); 870 break; 871 } 872 } 873 874 // Okay, we have finally picked the most popular destination. 875 return MostPopularDest; 876 } 877 878 bool JumpThreading::ProcessThreadableEdges(Instruction *CondInst, 879 BasicBlock *BB) { 880 // If threading this would thread across a loop header, don't even try to 881 // thread the edge. 882 if (LoopHeaders.count(BB)) 883 return false; 884 885 SmallVector<std::pair<ConstantInt*, BasicBlock*>, 8> PredValues; 886 if (!ComputeValueKnownInPredecessors(CondInst, BB, PredValues)) 887 return false; 888 assert(!PredValues.empty() && 889 "ComputeValueKnownInPredecessors returned true with no values"); 890 891 DEBUG(errs() << "IN BB: " << *BB; 892 for (unsigned i = 0, e = PredValues.size(); i != e; ++i) { 893 errs() << " BB '" << BB->getName() << "': FOUND condition = "; 894 if (PredValues[i].first) 895 errs() << *PredValues[i].first; 896 else 897 errs() << "UNDEF"; 898 errs() << " for pred '" << PredValues[i].second->getName() 899 << "'.\n"; 900 }); 901 902 // Decide what we want to thread through. Convert our list of known values to 903 // a list of known destinations for each pred. This also discards duplicate 904 // predecessors and keeps track of the undefined inputs (which are represented 905 // as a null dest in the PredToDestList). 906 SmallPtrSet<BasicBlock*, 16> SeenPreds; 907 SmallVector<std::pair<BasicBlock*, BasicBlock*>, 16> PredToDestList; 908 909 BasicBlock *OnlyDest = 0; 910 BasicBlock *MultipleDestSentinel = (BasicBlock*)(intptr_t)~0ULL; 911 912 for (unsigned i = 0, e = PredValues.size(); i != e; ++i) { 913 BasicBlock *Pred = PredValues[i].second; 914 if (!SeenPreds.insert(Pred)) 915 continue; // Duplicate predecessor entry. 916 917 // If the predecessor ends with an indirect goto, we can't change its 918 // destination. 919 if (isa<IndirectBrInst>(Pred->getTerminator())) 920 continue; 921 922 ConstantInt *Val = PredValues[i].first; 923 924 BasicBlock *DestBB; 925 if (Val == 0) // Undef. 926 DestBB = 0; 927 else if (BranchInst *BI = dyn_cast<BranchInst>(BB->getTerminator())) 928 DestBB = BI->getSuccessor(Val->isZero()); 929 else { 930 SwitchInst *SI = cast<SwitchInst>(BB->getTerminator()); 931 DestBB = SI->getSuccessor(SI->findCaseValue(Val)); 932 } 933 934 // If we have exactly one destination, remember it for efficiency below. 935 if (i == 0) 936 OnlyDest = DestBB; 937 else if (OnlyDest != DestBB) 938 OnlyDest = MultipleDestSentinel; 939 940 PredToDestList.push_back(std::make_pair(Pred, DestBB)); 941 } 942 943 // If all edges were unthreadable, we fail. 944 if (PredToDestList.empty()) 945 return false; 946 947 // Determine which is the most common successor. If we have many inputs and 948 // this block is a switch, we want to start by threading the batch that goes 949 // to the most popular destination first. If we only know about one 950 // threadable destination (the common case) we can avoid this. 951 BasicBlock *MostPopularDest = OnlyDest; 952 953 if (MostPopularDest == MultipleDestSentinel) 954 MostPopularDest = FindMostPopularDest(BB, PredToDestList); 955 956 // Now that we know what the most popular destination is, factor all 957 // predecessors that will jump to it into a single predecessor. 958 SmallVector<BasicBlock*, 16> PredsToFactor; 959 for (unsigned i = 0, e = PredToDestList.size(); i != e; ++i) 960 if (PredToDestList[i].second == MostPopularDest) { 961 BasicBlock *Pred = PredToDestList[i].first; 962 963 // This predecessor may be a switch or something else that has multiple 964 // edges to the block. Factor each of these edges by listing them 965 // according to # occurrences in PredsToFactor. 966 TerminatorInst *PredTI = Pred->getTerminator(); 967 for (unsigned i = 0, e = PredTI->getNumSuccessors(); i != e; ++i) 968 if (PredTI->getSuccessor(i) == BB) 969 PredsToFactor.push_back(Pred); 970 } 971 972 // If the threadable edges are branching on an undefined value, we get to pick 973 // the destination that these predecessors should get to. 974 if (MostPopularDest == 0) 975 MostPopularDest = BB->getTerminator()-> 976 getSuccessor(GetBestDestForJumpOnUndef(BB)); 977 978 // Ok, try to thread it! 979 return ThreadEdge(BB, PredsToFactor, MostPopularDest); 980 } 981 982 /// ProcessJumpOnPHI - We have a conditional branch or switch on a PHI node in 983 /// the current block. See if there are any simplifications we can do based on 984 /// inputs to the phi node. 985 /// 986 bool JumpThreading::ProcessJumpOnPHI(PHINode *PN) { 987 BasicBlock *BB = PN->getParent(); 988 989 // If any of the predecessor blocks end in an unconditional branch, we can 990 // *duplicate* the jump into that block in order to further encourage jump 991 // threading and to eliminate cases where we have branch on a phi of an icmp 992 // (branch on icmp is much better). 993 994 // We don't want to do this tranformation for switches, because we don't 995 // really want to duplicate a switch. 996 if (isa<SwitchInst>(BB->getTerminator())) 997 return false; 998 999 // Look for unconditional branch predecessors. 1000 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 1001 BasicBlock *PredBB = PN->getIncomingBlock(i); 1002 if (BranchInst *PredBr = dyn_cast<BranchInst>(PredBB->getTerminator())) 1003 if (PredBr->isUnconditional() && 1004 // Try to duplicate BB into PredBB. 1005 DuplicateCondBranchOnPHIIntoPred(BB, PredBB)) 1006 return true; 1007 } 1008 1009 return false; 1010 } 1011 1012 1013 /// AddPHINodeEntriesForMappedBlock - We're adding 'NewPred' as a new 1014 /// predecessor to the PHIBB block. If it has PHI nodes, add entries for 1015 /// NewPred using the entries from OldPred (suitably mapped). 1016 static void AddPHINodeEntriesForMappedBlock(BasicBlock *PHIBB, 1017 BasicBlock *OldPred, 1018 BasicBlock *NewPred, 1019 DenseMap<Instruction*, Value*> &ValueMap) { 1020 for (BasicBlock::iterator PNI = PHIBB->begin(); 1021 PHINode *PN = dyn_cast<PHINode>(PNI); ++PNI) { 1022 // Ok, we have a PHI node. Figure out what the incoming value was for the 1023 // DestBlock. 1024 Value *IV = PN->getIncomingValueForBlock(OldPred); 1025 1026 // Remap the value if necessary. 1027 if (Instruction *Inst = dyn_cast<Instruction>(IV)) { 1028 DenseMap<Instruction*, Value*>::iterator I = ValueMap.find(Inst); 1029 if (I != ValueMap.end()) 1030 IV = I->second; 1031 } 1032 1033 PN->addIncoming(IV, NewPred); 1034 } 1035 } 1036 1037 /// ThreadEdge - We have decided that it is safe and profitable to factor the 1038 /// blocks in PredBBs to one predecessor, then thread an edge from it to SuccBB 1039 /// across BB. Transform the IR to reflect this change. 1040 bool JumpThreading::ThreadEdge(BasicBlock *BB, 1041 const SmallVectorImpl<BasicBlock*> &PredBBs, 1042 BasicBlock *SuccBB) { 1043 // If threading to the same block as we come from, we would infinite loop. 1044 if (SuccBB == BB) { 1045 DEBUG(errs() << " Not threading across BB '" << BB->getName() 1046 << "' - would thread to self!\n"); 1047 return false; 1048 } 1049 1050 // If threading this would thread across a loop header, don't thread the edge. 1051 // See the comments above FindLoopHeaders for justifications and caveats. 1052 if (LoopHeaders.count(BB)) { 1053 DEBUG(errs() << " Not threading across loop header BB '" << BB->getName() 1054 << "' to dest BB '" << SuccBB->getName() 1055 << "' - it might create an irreducible loop!\n"); 1056 return false; 1057 } 1058 1059 unsigned JumpThreadCost = getJumpThreadDuplicationCost(BB); 1060 if (JumpThreadCost > Threshold) { 1061 DEBUG(errs() << " Not threading BB '" << BB->getName() 1062 << "' - Cost is too high: " << JumpThreadCost << "\n"); 1063 return false; 1064 } 1065 1066 // And finally, do it! Start by factoring the predecessors is needed. 1067 BasicBlock *PredBB; 1068 if (PredBBs.size() == 1) 1069 PredBB = PredBBs[0]; 1070 else { 1071 DEBUG(errs() << " Factoring out " << PredBBs.size() 1072 << " common predecessors.\n"); 1073 PredBB = SplitBlockPredecessors(BB, &PredBBs[0], PredBBs.size(), 1074 ".thr_comm", this); 1075 } 1076 1077 // And finally, do it! 1078 DEBUG(errs() << " Threading edge from '" << PredBB->getName() << "' to '" 1079 << SuccBB->getName() << "' with cost: " << JumpThreadCost 1080 << ", across block:\n " 1081 << *BB << "\n"); 1082 1083 // We are going to have to map operands from the original BB block to the new 1084 // copy of the block 'NewBB'. If there are PHI nodes in BB, evaluate them to 1085 // account for entry from PredBB. 1086 DenseMap<Instruction*, Value*> ValueMapping; 1087 1088 BasicBlock *NewBB = BasicBlock::Create(BB->getContext(), 1089 BB->getName()+".thread", 1090 BB->getParent(), BB); 1091 NewBB->moveAfter(PredBB); 1092 1093 BasicBlock::iterator BI = BB->begin(); 1094 for (; PHINode *PN = dyn_cast<PHINode>(BI); ++BI) 1095 ValueMapping[PN] = PN->getIncomingValueForBlock(PredBB); 1096 1097 // Clone the non-phi instructions of BB into NewBB, keeping track of the 1098 // mapping and using it to remap operands in the cloned instructions. 1099 for (; !isa<TerminatorInst>(BI); ++BI) { 1100 Instruction *New = BI->clone(); 1101 New->setName(BI->getName()); 1102 NewBB->getInstList().push_back(New); 1103 ValueMapping[BI] = New; 1104 1105 // Remap operands to patch up intra-block references. 1106 for (unsigned i = 0, e = New->getNumOperands(); i != e; ++i) 1107 if (Instruction *Inst = dyn_cast<Instruction>(New->getOperand(i))) { 1108 DenseMap<Instruction*, Value*>::iterator I = ValueMapping.find(Inst); 1109 if (I != ValueMapping.end()) 1110 New->setOperand(i, I->second); 1111 } 1112 } 1113 1114 // We didn't copy the terminator from BB over to NewBB, because there is now 1115 // an unconditional jump to SuccBB. Insert the unconditional jump. 1116 BranchInst::Create(SuccBB, NewBB); 1117 1118 // Check to see if SuccBB has PHI nodes. If so, we need to add entries to the 1119 // PHI nodes for NewBB now. 1120 AddPHINodeEntriesForMappedBlock(SuccBB, BB, NewBB, ValueMapping); 1121 1122 // If there were values defined in BB that are used outside the block, then we 1123 // now have to update all uses of the value to use either the original value, 1124 // the cloned value, or some PHI derived value. This can require arbitrary 1125 // PHI insertion, of which we are prepared to do, clean these up now. 1126 SSAUpdater SSAUpdate; 1127 SmallVector<Use*, 16> UsesToRename; 1128 for (BasicBlock::iterator I = BB->begin(); I != BB->end(); ++I) { 1129 // Scan all uses of this instruction to see if it is used outside of its 1130 // block, and if so, record them in UsesToRename. 1131 for (Value::use_iterator UI = I->use_begin(), E = I->use_end(); UI != E; 1132 ++UI) { 1133 Instruction *User = cast<Instruction>(*UI); 1134 if (PHINode *UserPN = dyn_cast<PHINode>(User)) { 1135 if (UserPN->getIncomingBlock(UI) == BB) 1136 continue; 1137 } else if (User->getParent() == BB) 1138 continue; 1139 1140 UsesToRename.push_back(&UI.getUse()); 1141 } 1142 1143 // If there are no uses outside the block, we're done with this instruction. 1144 if (UsesToRename.empty()) 1145 continue; 1146 1147 DEBUG(errs() << "JT: Renaming non-local uses of: " << *I << "\n"); 1148 1149 // We found a use of I outside of BB. Rename all uses of I that are outside 1150 // its block to be uses of the appropriate PHI node etc. See ValuesInBlocks 1151 // with the two values we know. 1152 SSAUpdate.Initialize(I); 1153 SSAUpdate.AddAvailableValue(BB, I); 1154 SSAUpdate.AddAvailableValue(NewBB, ValueMapping[I]); 1155 1156 while (!UsesToRename.empty()) 1157 SSAUpdate.RewriteUse(*UsesToRename.pop_back_val()); 1158 DEBUG(errs() << "\n"); 1159 } 1160 1161 1162 // Ok, NewBB is good to go. Update the terminator of PredBB to jump to 1163 // NewBB instead of BB. This eliminates predecessors from BB, which requires 1164 // us to simplify any PHI nodes in BB. 1165 TerminatorInst *PredTerm = PredBB->getTerminator(); 1166 for (unsigned i = 0, e = PredTerm->getNumSuccessors(); i != e; ++i) 1167 if (PredTerm->getSuccessor(i) == BB) { 1168 RemovePredecessorAndSimplify(BB, PredBB, TD); 1169 PredTerm->setSuccessor(i, NewBB); 1170 } 1171 1172 // At this point, the IR is fully up to date and consistent. Do a quick scan 1173 // over the new instructions and zap any that are constants or dead. This 1174 // frequently happens because of phi translation. 1175 BI = NewBB->begin(); 1176 for (BasicBlock::iterator E = NewBB->end(); BI != E; ) { 1177 Instruction *Inst = BI++; 1178 1179 if (Value *V = SimplifyInstruction(Inst, TD)) { 1180 WeakVH BIHandle(BI); 1181 ReplaceAndSimplifyAllUses(Inst, V, TD); 1182 if (BIHandle == 0) 1183 BI = NewBB->begin(); 1184 continue; 1185 } 1186 1187 RecursivelyDeleteTriviallyDeadInstructions(Inst); 1188 } 1189 1190 // Threaded an edge! 1191 ++NumThreads; 1192 return true; 1193 } 1194 1195 /// DuplicateCondBranchOnPHIIntoPred - PredBB contains an unconditional branch 1196 /// to BB which contains an i1 PHI node and a conditional branch on that PHI. 1197 /// If we can duplicate the contents of BB up into PredBB do so now, this 1198 /// improves the odds that the branch will be on an analyzable instruction like 1199 /// a compare. 1200 bool JumpThreading::DuplicateCondBranchOnPHIIntoPred(BasicBlock *BB, 1201 BasicBlock *PredBB) { 1202 // If BB is a loop header, then duplicating this block outside the loop would 1203 // cause us to transform this into an irreducible loop, don't do this. 1204 // See the comments above FindLoopHeaders for justifications and caveats. 1205 if (LoopHeaders.count(BB)) { 1206 DEBUG(errs() << " Not duplicating loop header '" << BB->getName() 1207 << "' into predecessor block '" << PredBB->getName() 1208 << "' - it might create an irreducible loop!\n"); 1209 return false; 1210 } 1211 1212 unsigned DuplicationCost = getJumpThreadDuplicationCost(BB); 1213 if (DuplicationCost > Threshold) { 1214 DEBUG(errs() << " Not duplicating BB '" << BB->getName() 1215 << "' - Cost is too high: " << DuplicationCost << "\n"); 1216 return false; 1217 } 1218 1219 // Okay, we decided to do this! Clone all the instructions in BB onto the end 1220 // of PredBB. 1221 DEBUG(errs() << " Duplicating block '" << BB->getName() << "' into end of '" 1222 << PredBB->getName() << "' to eliminate branch on phi. Cost: " 1223 << DuplicationCost << " block is:" << *BB << "\n"); 1224 1225 // We are going to have to map operands from the original BB block into the 1226 // PredBB block. Evaluate PHI nodes in BB. 1227 DenseMap<Instruction*, Value*> ValueMapping; 1228 1229 BasicBlock::iterator BI = BB->begin(); 1230 for (; PHINode *PN = dyn_cast<PHINode>(BI); ++BI) 1231 ValueMapping[PN] = PN->getIncomingValueForBlock(PredBB); 1232 1233 BranchInst *OldPredBranch = cast<BranchInst>(PredBB->getTerminator()); 1234 1235 // Clone the non-phi instructions of BB into PredBB, keeping track of the 1236 // mapping and using it to remap operands in the cloned instructions. 1237 for (; BI != BB->end(); ++BI) { 1238 Instruction *New = BI->clone(); 1239 New->setName(BI->getName()); 1240 PredBB->getInstList().insert(OldPredBranch, New); 1241 ValueMapping[BI] = New; 1242 1243 // Remap operands to patch up intra-block references. 1244 for (unsigned i = 0, e = New->getNumOperands(); i != e; ++i) 1245 if (Instruction *Inst = dyn_cast<Instruction>(New->getOperand(i))) { 1246 DenseMap<Instruction*, Value*>::iterator I = ValueMapping.find(Inst); 1247 if (I != ValueMapping.end()) 1248 New->setOperand(i, I->second); 1249 } 1250 } 1251 1252 // Check to see if the targets of the branch had PHI nodes. If so, we need to 1253 // add entries to the PHI nodes for branch from PredBB now. 1254 BranchInst *BBBranch = cast<BranchInst>(BB->getTerminator()); 1255 AddPHINodeEntriesForMappedBlock(BBBranch->getSuccessor(0), BB, PredBB, 1256 ValueMapping); 1257 AddPHINodeEntriesForMappedBlock(BBBranch->getSuccessor(1), BB, PredBB, 1258 ValueMapping); 1259 1260 // If there were values defined in BB that are used outside the block, then we 1261 // now have to update all uses of the value to use either the original value, 1262 // the cloned value, or some PHI derived value. This can require arbitrary 1263 // PHI insertion, of which we are prepared to do, clean these up now. 1264 SSAUpdater SSAUpdate; 1265 SmallVector<Use*, 16> UsesToRename; 1266 for (BasicBlock::iterator I = BB->begin(); I != BB->end(); ++I) { 1267 // Scan all uses of this instruction to see if it is used outside of its 1268 // block, and if so, record them in UsesToRename. 1269 for (Value::use_iterator UI = I->use_begin(), E = I->use_end(); UI != E; 1270 ++UI) { 1271 Instruction *User = cast<Instruction>(*UI); 1272 if (PHINode *UserPN = dyn_cast<PHINode>(User)) { 1273 if (UserPN->getIncomingBlock(UI) == BB) 1274 continue; 1275 } else if (User->getParent() == BB) 1276 continue; 1277 1278 UsesToRename.push_back(&UI.getUse()); 1279 } 1280 1281 // If there are no uses outside the block, we're done with this instruction. 1282 if (UsesToRename.empty()) 1283 continue; 1284 1285 DEBUG(errs() << "JT: Renaming non-local uses of: " << *I << "\n"); 1286 1287 // We found a use of I outside of BB. Rename all uses of I that are outside 1288 // its block to be uses of the appropriate PHI node etc. See ValuesInBlocks 1289 // with the two values we know. 1290 SSAUpdate.Initialize(I); 1291 SSAUpdate.AddAvailableValue(BB, I); 1292 SSAUpdate.AddAvailableValue(PredBB, ValueMapping[I]); 1293 1294 while (!UsesToRename.empty()) 1295 SSAUpdate.RewriteUse(*UsesToRename.pop_back_val()); 1296 DEBUG(errs() << "\n"); 1297 } 1298 1299 // PredBB no longer jumps to BB, remove entries in the PHI node for the edge 1300 // that we nuked. 1301 RemovePredecessorAndSimplify(BB, PredBB, TD); 1302 1303 // Remove the unconditional branch at the end of the PredBB block. 1304 OldPredBranch->eraseFromParent(); 1305 1306 ++NumDupes; 1307 return true; 1308 } 1309 1310 1311