1 //===- JumpThreading.cpp - Thread control through conditional blocks ------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the Jump Threading pass. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "llvm/Transforms/Scalar/JumpThreading.h" 14 #include "llvm/ADT/DenseMap.h" 15 #include "llvm/ADT/DenseSet.h" 16 #include "llvm/ADT/MapVector.h" 17 #include "llvm/ADT/STLExtras.h" 18 #include "llvm/ADT/SmallPtrSet.h" 19 #include "llvm/ADT/SmallVector.h" 20 #include "llvm/ADT/Statistic.h" 21 #include "llvm/Analysis/AliasAnalysis.h" 22 #include "llvm/Analysis/BlockFrequencyInfo.h" 23 #include "llvm/Analysis/BranchProbabilityInfo.h" 24 #include "llvm/Analysis/CFG.h" 25 #include "llvm/Analysis/ConstantFolding.h" 26 #include "llvm/Analysis/GlobalsModRef.h" 27 #include "llvm/Analysis/GuardUtils.h" 28 #include "llvm/Analysis/InstructionSimplify.h" 29 #include "llvm/Analysis/LazyValueInfo.h" 30 #include "llvm/Analysis/Loads.h" 31 #include "llvm/Analysis/LoopInfo.h" 32 #include "llvm/Analysis/MemoryLocation.h" 33 #include "llvm/Analysis/PostDominators.h" 34 #include "llvm/Analysis/TargetLibraryInfo.h" 35 #include "llvm/Analysis/TargetTransformInfo.h" 36 #include "llvm/Analysis/ValueTracking.h" 37 #include "llvm/IR/BasicBlock.h" 38 #include "llvm/IR/CFG.h" 39 #include "llvm/IR/Constant.h" 40 #include "llvm/IR/ConstantRange.h" 41 #include "llvm/IR/Constants.h" 42 #include "llvm/IR/DataLayout.h" 43 #include "llvm/IR/DebugInfo.h" 44 #include "llvm/IR/Dominators.h" 45 #include "llvm/IR/Function.h" 46 #include "llvm/IR/InstrTypes.h" 47 #include "llvm/IR/Instruction.h" 48 #include "llvm/IR/Instructions.h" 49 #include "llvm/IR/IntrinsicInst.h" 50 #include "llvm/IR/Intrinsics.h" 51 #include "llvm/IR/LLVMContext.h" 52 #include "llvm/IR/MDBuilder.h" 53 #include "llvm/IR/Metadata.h" 54 #include "llvm/IR/Module.h" 55 #include "llvm/IR/PassManager.h" 56 #include "llvm/IR/PatternMatch.h" 57 #include "llvm/IR/ProfDataUtils.h" 58 #include "llvm/IR/Type.h" 59 #include "llvm/IR/Use.h" 60 #include "llvm/IR/Value.h" 61 #include "llvm/Support/BlockFrequency.h" 62 #include "llvm/Support/BranchProbability.h" 63 #include "llvm/Support/Casting.h" 64 #include "llvm/Support/CommandLine.h" 65 #include "llvm/Support/Debug.h" 66 #include "llvm/Support/raw_ostream.h" 67 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 68 #include "llvm/Transforms/Utils/Cloning.h" 69 #include "llvm/Transforms/Utils/Local.h" 70 #include "llvm/Transforms/Utils/SSAUpdater.h" 71 #include "llvm/Transforms/Utils/ValueMapper.h" 72 #include <algorithm> 73 #include <cassert> 74 #include <cstdint> 75 #include <iterator> 76 #include <memory> 77 #include <utility> 78 79 using namespace llvm; 80 using namespace jumpthreading; 81 82 #define DEBUG_TYPE "jump-threading" 83 84 STATISTIC(NumThreads, "Number of jumps threaded"); 85 STATISTIC(NumFolds, "Number of terminators folded"); 86 STATISTIC(NumDupes, "Number of branch blocks duplicated to eliminate phi"); 87 88 static cl::opt<unsigned> 89 BBDuplicateThreshold("jump-threading-threshold", 90 cl::desc("Max block size to duplicate for jump threading"), 91 cl::init(6), cl::Hidden); 92 93 static cl::opt<unsigned> 94 ImplicationSearchThreshold( 95 "jump-threading-implication-search-threshold", 96 cl::desc("The number of predecessors to search for a stronger " 97 "condition to use to thread over a weaker condition"), 98 cl::init(3), cl::Hidden); 99 100 static cl::opt<unsigned> PhiDuplicateThreshold( 101 "jump-threading-phi-threshold", 102 cl::desc("Max PHIs in BB to duplicate for jump threading"), cl::init(76), 103 cl::Hidden); 104 105 static cl::opt<bool> PrintLVIAfterJumpThreading( 106 "print-lvi-after-jump-threading", 107 cl::desc("Print the LazyValueInfo cache after JumpThreading"), cl::init(false), 108 cl::Hidden); 109 110 static cl::opt<bool> ThreadAcrossLoopHeaders( 111 "jump-threading-across-loop-headers", 112 cl::desc("Allow JumpThreading to thread across loop headers, for testing"), 113 cl::init(false), cl::Hidden); 114 115 JumpThreadingPass::JumpThreadingPass(int T) { 116 DefaultBBDupThreshold = (T == -1) ? BBDuplicateThreshold : unsigned(T); 117 } 118 119 // Update branch probability information according to conditional 120 // branch probability. This is usually made possible for cloned branches 121 // in inline instances by the context specific profile in the caller. 122 // For instance, 123 // 124 // [Block PredBB] 125 // [Branch PredBr] 126 // if (t) { 127 // Block A; 128 // } else { 129 // Block B; 130 // } 131 // 132 // [Block BB] 133 // cond = PN([true, %A], [..., %B]); // PHI node 134 // [Branch CondBr] 135 // if (cond) { 136 // ... // P(cond == true) = 1% 137 // } 138 // 139 // Here we know that when block A is taken, cond must be true, which means 140 // P(cond == true | A) = 1 141 // 142 // Given that P(cond == true) = P(cond == true | A) * P(A) + 143 // P(cond == true | B) * P(B) 144 // we get: 145 // P(cond == true ) = P(A) + P(cond == true | B) * P(B) 146 // 147 // which gives us: 148 // P(A) is less than P(cond == true), i.e. 149 // P(t == true) <= P(cond == true) 150 // 151 // In other words, if we know P(cond == true) is unlikely, we know 152 // that P(t == true) is also unlikely. 153 // 154 static void updatePredecessorProfileMetadata(PHINode *PN, BasicBlock *BB) { 155 BranchInst *CondBr = dyn_cast<BranchInst>(BB->getTerminator()); 156 if (!CondBr) 157 return; 158 159 uint64_t TrueWeight, FalseWeight; 160 if (!extractBranchWeights(*CondBr, TrueWeight, FalseWeight)) 161 return; 162 163 if (TrueWeight + FalseWeight == 0) 164 // Zero branch_weights do not give a hint for getting branch probabilities. 165 // Technically it would result in division by zero denominator, which is 166 // TrueWeight + FalseWeight. 167 return; 168 169 // Returns the outgoing edge of the dominating predecessor block 170 // that leads to the PhiNode's incoming block: 171 auto GetPredOutEdge = 172 [](BasicBlock *IncomingBB, 173 BasicBlock *PhiBB) -> std::pair<BasicBlock *, BasicBlock *> { 174 auto *PredBB = IncomingBB; 175 auto *SuccBB = PhiBB; 176 SmallPtrSet<BasicBlock *, 16> Visited; 177 while (true) { 178 BranchInst *PredBr = dyn_cast<BranchInst>(PredBB->getTerminator()); 179 if (PredBr && PredBr->isConditional()) 180 return {PredBB, SuccBB}; 181 Visited.insert(PredBB); 182 auto *SinglePredBB = PredBB->getSinglePredecessor(); 183 if (!SinglePredBB) 184 return {nullptr, nullptr}; 185 186 // Stop searching when SinglePredBB has been visited. It means we see 187 // an unreachable loop. 188 if (Visited.count(SinglePredBB)) 189 return {nullptr, nullptr}; 190 191 SuccBB = PredBB; 192 PredBB = SinglePredBB; 193 } 194 }; 195 196 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 197 Value *PhiOpnd = PN->getIncomingValue(i); 198 ConstantInt *CI = dyn_cast<ConstantInt>(PhiOpnd); 199 200 if (!CI || !CI->getType()->isIntegerTy(1)) 201 continue; 202 203 BranchProbability BP = 204 (CI->isOne() ? BranchProbability::getBranchProbability( 205 TrueWeight, TrueWeight + FalseWeight) 206 : BranchProbability::getBranchProbability( 207 FalseWeight, TrueWeight + FalseWeight)); 208 209 auto PredOutEdge = GetPredOutEdge(PN->getIncomingBlock(i), BB); 210 if (!PredOutEdge.first) 211 return; 212 213 BasicBlock *PredBB = PredOutEdge.first; 214 BranchInst *PredBr = dyn_cast<BranchInst>(PredBB->getTerminator()); 215 if (!PredBr) 216 return; 217 218 uint64_t PredTrueWeight, PredFalseWeight; 219 // FIXME: We currently only set the profile data when it is missing. 220 // With PGO, this can be used to refine even existing profile data with 221 // context information. This needs to be done after more performance 222 // testing. 223 if (extractBranchWeights(*PredBr, PredTrueWeight, PredFalseWeight)) 224 continue; 225 226 // We can not infer anything useful when BP >= 50%, because BP is the 227 // upper bound probability value. 228 if (BP >= BranchProbability(50, 100)) 229 continue; 230 231 SmallVector<uint32_t, 2> Weights; 232 if (PredBr->getSuccessor(0) == PredOutEdge.second) { 233 Weights.push_back(BP.getNumerator()); 234 Weights.push_back(BP.getCompl().getNumerator()); 235 } else { 236 Weights.push_back(BP.getCompl().getNumerator()); 237 Weights.push_back(BP.getNumerator()); 238 } 239 PredBr->setMetadata(LLVMContext::MD_prof, 240 MDBuilder(PredBr->getParent()->getContext()) 241 .createBranchWeights(Weights)); 242 } 243 } 244 245 PreservedAnalyses JumpThreadingPass::run(Function &F, 246 FunctionAnalysisManager &AM) { 247 auto &TTI = AM.getResult<TargetIRAnalysis>(F); 248 // Jump Threading has no sense for the targets with divergent CF 249 if (TTI.hasBranchDivergence(&F)) 250 return PreservedAnalyses::all(); 251 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F); 252 auto &LVI = AM.getResult<LazyValueAnalysis>(F); 253 auto &AA = AM.getResult<AAManager>(F); 254 auto &DT = AM.getResult<DominatorTreeAnalysis>(F); 255 256 bool Changed = 257 runImpl(F, &AM, &TLI, &TTI, &LVI, &AA, 258 std::make_unique<DomTreeUpdater>( 259 &DT, nullptr, DomTreeUpdater::UpdateStrategy::Lazy), 260 std::nullopt, std::nullopt); 261 262 if (PrintLVIAfterJumpThreading) { 263 dbgs() << "LVI for function '" << F.getName() << "':\n"; 264 LVI.printLVI(F, getDomTreeUpdater()->getDomTree(), dbgs()); 265 } 266 267 if (!Changed) 268 return PreservedAnalyses::all(); 269 270 271 getDomTreeUpdater()->flush(); 272 273 #if defined(EXPENSIVE_CHECKS) 274 assert(getDomTreeUpdater()->getDomTree().verify( 275 DominatorTree::VerificationLevel::Full) && 276 "DT broken after JumpThreading"); 277 assert((!getDomTreeUpdater()->hasPostDomTree() || 278 getDomTreeUpdater()->getPostDomTree().verify( 279 PostDominatorTree::VerificationLevel::Full)) && 280 "PDT broken after JumpThreading"); 281 #else 282 assert(getDomTreeUpdater()->getDomTree().verify( 283 DominatorTree::VerificationLevel::Fast) && 284 "DT broken after JumpThreading"); 285 assert((!getDomTreeUpdater()->hasPostDomTree() || 286 getDomTreeUpdater()->getPostDomTree().verify( 287 PostDominatorTree::VerificationLevel::Fast)) && 288 "PDT broken after JumpThreading"); 289 #endif 290 291 return getPreservedAnalysis(); 292 } 293 294 bool JumpThreadingPass::runImpl(Function &F_, FunctionAnalysisManager *FAM_, 295 TargetLibraryInfo *TLI_, 296 TargetTransformInfo *TTI_, LazyValueInfo *LVI_, 297 AliasAnalysis *AA_, 298 std::unique_ptr<DomTreeUpdater> DTU_, 299 std::optional<BlockFrequencyInfo *> BFI_, 300 std::optional<BranchProbabilityInfo *> BPI_) { 301 LLVM_DEBUG(dbgs() << "Jump threading on function '" << F_.getName() << "'\n"); 302 F = &F_; 303 FAM = FAM_; 304 TLI = TLI_; 305 TTI = TTI_; 306 LVI = LVI_; 307 AA = AA_; 308 DTU = std::move(DTU_); 309 BFI = BFI_; 310 BPI = BPI_; 311 auto *GuardDecl = F->getParent()->getFunction( 312 Intrinsic::getName(Intrinsic::experimental_guard)); 313 HasGuards = GuardDecl && !GuardDecl->use_empty(); 314 315 // Reduce the number of instructions duplicated when optimizing strictly for 316 // size. 317 if (BBDuplicateThreshold.getNumOccurrences()) 318 BBDupThreshold = BBDuplicateThreshold; 319 else if (F->hasFnAttribute(Attribute::MinSize)) 320 BBDupThreshold = 3; 321 else 322 BBDupThreshold = DefaultBBDupThreshold; 323 324 // JumpThreading must not processes blocks unreachable from entry. It's a 325 // waste of compute time and can potentially lead to hangs. 326 SmallPtrSet<BasicBlock *, 16> Unreachable; 327 assert(DTU && "DTU isn't passed into JumpThreading before using it."); 328 assert(DTU->hasDomTree() && "JumpThreading relies on DomTree to proceed."); 329 DominatorTree &DT = DTU->getDomTree(); 330 for (auto &BB : *F) 331 if (!DT.isReachableFromEntry(&BB)) 332 Unreachable.insert(&BB); 333 334 if (!ThreadAcrossLoopHeaders) 335 findLoopHeaders(*F); 336 337 bool EverChanged = false; 338 bool Changed; 339 do { 340 Changed = false; 341 for (auto &BB : *F) { 342 if (Unreachable.count(&BB)) 343 continue; 344 while (processBlock(&BB)) // Thread all of the branches we can over BB. 345 Changed = ChangedSinceLastAnalysisUpdate = true; 346 347 // Jump threading may have introduced redundant debug values into BB 348 // which should be removed. 349 if (Changed) 350 RemoveRedundantDbgInstrs(&BB); 351 352 // Stop processing BB if it's the entry or is now deleted. The following 353 // routines attempt to eliminate BB and locating a suitable replacement 354 // for the entry is non-trivial. 355 if (&BB == &F->getEntryBlock() || DTU->isBBPendingDeletion(&BB)) 356 continue; 357 358 if (pred_empty(&BB)) { 359 // When processBlock makes BB unreachable it doesn't bother to fix up 360 // the instructions in it. We must remove BB to prevent invalid IR. 361 LLVM_DEBUG(dbgs() << " JT: Deleting dead block '" << BB.getName() 362 << "' with terminator: " << *BB.getTerminator() 363 << '\n'); 364 LoopHeaders.erase(&BB); 365 LVI->eraseBlock(&BB); 366 DeleteDeadBlock(&BB, DTU.get()); 367 Changed = ChangedSinceLastAnalysisUpdate = true; 368 continue; 369 } 370 371 // processBlock doesn't thread BBs with unconditional TIs. However, if BB 372 // is "almost empty", we attempt to merge BB with its sole successor. 373 auto *BI = dyn_cast<BranchInst>(BB.getTerminator()); 374 if (BI && BI->isUnconditional()) { 375 BasicBlock *Succ = BI->getSuccessor(0); 376 if ( 377 // The terminator must be the only non-phi instruction in BB. 378 BB.getFirstNonPHIOrDbg(true)->isTerminator() && 379 // Don't alter Loop headers and latches to ensure another pass can 380 // detect and transform nested loops later. 381 !LoopHeaders.count(&BB) && !LoopHeaders.count(Succ) && 382 TryToSimplifyUncondBranchFromEmptyBlock(&BB, DTU.get())) { 383 RemoveRedundantDbgInstrs(Succ); 384 // BB is valid for cleanup here because we passed in DTU. F remains 385 // BB's parent until a DTU->getDomTree() event. 386 LVI->eraseBlock(&BB); 387 Changed = ChangedSinceLastAnalysisUpdate = true; 388 } 389 } 390 } 391 EverChanged |= Changed; 392 } while (Changed); 393 394 LoopHeaders.clear(); 395 return EverChanged; 396 } 397 398 // Replace uses of Cond with ToVal when safe to do so. If all uses are 399 // replaced, we can remove Cond. We cannot blindly replace all uses of Cond 400 // because we may incorrectly replace uses when guards/assumes are uses of 401 // of `Cond` and we used the guards/assume to reason about the `Cond` value 402 // at the end of block. RAUW unconditionally replaces all uses 403 // including the guards/assumes themselves and the uses before the 404 // guard/assume. 405 static bool replaceFoldableUses(Instruction *Cond, Value *ToVal, 406 BasicBlock *KnownAtEndOfBB) { 407 bool Changed = false; 408 assert(Cond->getType() == ToVal->getType()); 409 // We can unconditionally replace all uses in non-local blocks (i.e. uses 410 // strictly dominated by BB), since LVI information is true from the 411 // terminator of BB. 412 if (Cond->getParent() == KnownAtEndOfBB) 413 Changed |= replaceNonLocalUsesWith(Cond, ToVal); 414 for (Instruction &I : reverse(*KnownAtEndOfBB)) { 415 // Reached the Cond whose uses we are trying to replace, so there are no 416 // more uses. 417 if (&I == Cond) 418 break; 419 // We only replace uses in instructions that are guaranteed to reach the end 420 // of BB, where we know Cond is ToVal. 421 if (!isGuaranteedToTransferExecutionToSuccessor(&I)) 422 break; 423 Changed |= I.replaceUsesOfWith(Cond, ToVal); 424 } 425 if (Cond->use_empty() && !Cond->mayHaveSideEffects()) { 426 Cond->eraseFromParent(); 427 Changed = true; 428 } 429 return Changed; 430 } 431 432 /// Return the cost of duplicating a piece of this block from first non-phi 433 /// and before StopAt instruction to thread across it. Stop scanning the block 434 /// when exceeding the threshold. If duplication is impossible, returns ~0U. 435 static unsigned getJumpThreadDuplicationCost(const TargetTransformInfo *TTI, 436 BasicBlock *BB, 437 Instruction *StopAt, 438 unsigned Threshold) { 439 assert(StopAt->getParent() == BB && "Not an instruction from proper BB?"); 440 441 // Do not duplicate the BB if it has a lot of PHI nodes. 442 // If a threadable chain is too long then the number of PHI nodes can add up, 443 // leading to a substantial increase in compile time when rewriting the SSA. 444 unsigned PhiCount = 0; 445 Instruction *FirstNonPHI = nullptr; 446 for (Instruction &I : *BB) { 447 if (!isa<PHINode>(&I)) { 448 FirstNonPHI = &I; 449 break; 450 } 451 if (++PhiCount > PhiDuplicateThreshold) 452 return ~0U; 453 } 454 455 /// Ignore PHI nodes, these will be flattened when duplication happens. 456 BasicBlock::const_iterator I(FirstNonPHI); 457 458 // FIXME: THREADING will delete values that are just used to compute the 459 // branch, so they shouldn't count against the duplication cost. 460 461 unsigned Bonus = 0; 462 if (BB->getTerminator() == StopAt) { 463 // Threading through a switch statement is particularly profitable. If this 464 // block ends in a switch, decrease its cost to make it more likely to 465 // happen. 466 if (isa<SwitchInst>(StopAt)) 467 Bonus = 6; 468 469 // The same holds for indirect branches, but slightly more so. 470 if (isa<IndirectBrInst>(StopAt)) 471 Bonus = 8; 472 } 473 474 // Bump the threshold up so the early exit from the loop doesn't skip the 475 // terminator-based Size adjustment at the end. 476 Threshold += Bonus; 477 478 // Sum up the cost of each instruction until we get to the terminator. Don't 479 // include the terminator because the copy won't include it. 480 unsigned Size = 0; 481 for (; &*I != StopAt; ++I) { 482 483 // Stop scanning the block if we've reached the threshold. 484 if (Size > Threshold) 485 return Size; 486 487 // Bail out if this instruction gives back a token type, it is not possible 488 // to duplicate it if it is used outside this BB. 489 if (I->getType()->isTokenTy() && I->isUsedOutsideOfBlock(BB)) 490 return ~0U; 491 492 // Blocks with NoDuplicate are modelled as having infinite cost, so they 493 // are never duplicated. 494 if (const CallInst *CI = dyn_cast<CallInst>(I)) 495 if (CI->cannotDuplicate() || CI->isConvergent()) 496 return ~0U; 497 498 if (TTI->getInstructionCost(&*I, TargetTransformInfo::TCK_SizeAndLatency) == 499 TargetTransformInfo::TCC_Free) 500 continue; 501 502 // All other instructions count for at least one unit. 503 ++Size; 504 505 // Calls are more expensive. If they are non-intrinsic calls, we model them 506 // as having cost of 4. If they are a non-vector intrinsic, we model them 507 // as having cost of 2 total, and if they are a vector intrinsic, we model 508 // them as having cost 1. 509 if (const CallInst *CI = dyn_cast<CallInst>(I)) { 510 if (!isa<IntrinsicInst>(CI)) 511 Size += 3; 512 else if (!CI->getType()->isVectorTy()) 513 Size += 1; 514 } 515 } 516 517 return Size > Bonus ? Size - Bonus : 0; 518 } 519 520 /// findLoopHeaders - We do not want jump threading to turn proper loop 521 /// structures into irreducible loops. Doing this breaks up the loop nesting 522 /// hierarchy and pessimizes later transformations. To prevent this from 523 /// happening, we first have to find the loop headers. Here we approximate this 524 /// by finding targets of backedges in the CFG. 525 /// 526 /// Note that there definitely are cases when we want to allow threading of 527 /// edges across a loop header. For example, threading a jump from outside the 528 /// loop (the preheader) to an exit block of the loop is definitely profitable. 529 /// It is also almost always profitable to thread backedges from within the loop 530 /// to exit blocks, and is often profitable to thread backedges to other blocks 531 /// within the loop (forming a nested loop). This simple analysis is not rich 532 /// enough to track all of these properties and keep it up-to-date as the CFG 533 /// mutates, so we don't allow any of these transformations. 534 void JumpThreadingPass::findLoopHeaders(Function &F) { 535 SmallVector<std::pair<const BasicBlock*,const BasicBlock*>, 32> Edges; 536 FindFunctionBackedges(F, Edges); 537 538 for (const auto &Edge : Edges) 539 LoopHeaders.insert(Edge.second); 540 } 541 542 /// getKnownConstant - Helper method to determine if we can thread over a 543 /// terminator with the given value as its condition, and if so what value to 544 /// use for that. What kind of value this is depends on whether we want an 545 /// integer or a block address, but an undef is always accepted. 546 /// Returns null if Val is null or not an appropriate constant. 547 static Constant *getKnownConstant(Value *Val, ConstantPreference Preference) { 548 if (!Val) 549 return nullptr; 550 551 // Undef is "known" enough. 552 if (UndefValue *U = dyn_cast<UndefValue>(Val)) 553 return U; 554 555 if (Preference == WantBlockAddress) 556 return dyn_cast<BlockAddress>(Val->stripPointerCasts()); 557 558 return dyn_cast<ConstantInt>(Val); 559 } 560 561 /// computeValueKnownInPredecessors - Given a basic block BB and a value V, see 562 /// if we can infer that the value is a known ConstantInt/BlockAddress or undef 563 /// in any of our predecessors. If so, return the known list of value and pred 564 /// BB in the result vector. 565 /// 566 /// This returns true if there were any known values. 567 bool JumpThreadingPass::computeValueKnownInPredecessorsImpl( 568 Value *V, BasicBlock *BB, PredValueInfo &Result, 569 ConstantPreference Preference, DenseSet<Value *> &RecursionSet, 570 Instruction *CxtI) { 571 const DataLayout &DL = BB->getModule()->getDataLayout(); 572 573 // This method walks up use-def chains recursively. Because of this, we could 574 // get into an infinite loop going around loops in the use-def chain. To 575 // prevent this, keep track of what (value, block) pairs we've already visited 576 // and terminate the search if we loop back to them 577 if (!RecursionSet.insert(V).second) 578 return false; 579 580 // If V is a constant, then it is known in all predecessors. 581 if (Constant *KC = getKnownConstant(V, Preference)) { 582 for (BasicBlock *Pred : predecessors(BB)) 583 Result.emplace_back(KC, Pred); 584 585 return !Result.empty(); 586 } 587 588 // If V is a non-instruction value, or an instruction in a different block, 589 // then it can't be derived from a PHI. 590 Instruction *I = dyn_cast<Instruction>(V); 591 if (!I || I->getParent() != BB) { 592 593 // Okay, if this is a live-in value, see if it has a known value at the any 594 // edge from our predecessors. 595 for (BasicBlock *P : predecessors(BB)) { 596 using namespace PatternMatch; 597 // If the value is known by LazyValueInfo to be a constant in a 598 // predecessor, use that information to try to thread this block. 599 Constant *PredCst = LVI->getConstantOnEdge(V, P, BB, CxtI); 600 // If I is a non-local compare-with-constant instruction, use more-rich 601 // 'getPredicateOnEdge' method. This would be able to handle value 602 // inequalities better, for example if the compare is "X < 4" and "X < 3" 603 // is known true but "X < 4" itself is not available. 604 CmpInst::Predicate Pred; 605 Value *Val; 606 Constant *Cst; 607 if (!PredCst && match(V, m_Cmp(Pred, m_Value(Val), m_Constant(Cst)))) { 608 auto Res = LVI->getPredicateOnEdge(Pred, Val, Cst, P, BB, CxtI); 609 if (Res != LazyValueInfo::Unknown) 610 PredCst = ConstantInt::getBool(V->getContext(), Res); 611 } 612 if (Constant *KC = getKnownConstant(PredCst, Preference)) 613 Result.emplace_back(KC, P); 614 } 615 616 return !Result.empty(); 617 } 618 619 /// If I is a PHI node, then we know the incoming values for any constants. 620 if (PHINode *PN = dyn_cast<PHINode>(I)) { 621 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 622 Value *InVal = PN->getIncomingValue(i); 623 if (Constant *KC = getKnownConstant(InVal, Preference)) { 624 Result.emplace_back(KC, PN->getIncomingBlock(i)); 625 } else { 626 Constant *CI = LVI->getConstantOnEdge(InVal, 627 PN->getIncomingBlock(i), 628 BB, CxtI); 629 if (Constant *KC = getKnownConstant(CI, Preference)) 630 Result.emplace_back(KC, PN->getIncomingBlock(i)); 631 } 632 } 633 634 return !Result.empty(); 635 } 636 637 // Handle Cast instructions. 638 if (CastInst *CI = dyn_cast<CastInst>(I)) { 639 Value *Source = CI->getOperand(0); 640 PredValueInfoTy Vals; 641 computeValueKnownInPredecessorsImpl(Source, BB, Vals, Preference, 642 RecursionSet, CxtI); 643 if (Vals.empty()) 644 return false; 645 646 // Convert the known values. 647 for (auto &Val : Vals) 648 if (Constant *Folded = ConstantFoldCastOperand(CI->getOpcode(), Val.first, 649 CI->getType(), DL)) 650 Result.emplace_back(Folded, Val.second); 651 652 return !Result.empty(); 653 } 654 655 if (FreezeInst *FI = dyn_cast<FreezeInst>(I)) { 656 Value *Source = FI->getOperand(0); 657 computeValueKnownInPredecessorsImpl(Source, BB, Result, Preference, 658 RecursionSet, CxtI); 659 660 erase_if(Result, [](auto &Pair) { 661 return !isGuaranteedNotToBeUndefOrPoison(Pair.first); 662 }); 663 664 return !Result.empty(); 665 } 666 667 // Handle some boolean conditions. 668 if (I->getType()->getPrimitiveSizeInBits() == 1) { 669 using namespace PatternMatch; 670 if (Preference != WantInteger) 671 return false; 672 // X | true -> true 673 // X & false -> false 674 Value *Op0, *Op1; 675 if (match(I, m_LogicalOr(m_Value(Op0), m_Value(Op1))) || 676 match(I, m_LogicalAnd(m_Value(Op0), m_Value(Op1)))) { 677 PredValueInfoTy LHSVals, RHSVals; 678 679 computeValueKnownInPredecessorsImpl(Op0, BB, LHSVals, WantInteger, 680 RecursionSet, CxtI); 681 computeValueKnownInPredecessorsImpl(Op1, BB, RHSVals, WantInteger, 682 RecursionSet, CxtI); 683 684 if (LHSVals.empty() && RHSVals.empty()) 685 return false; 686 687 ConstantInt *InterestingVal; 688 if (match(I, m_LogicalOr())) 689 InterestingVal = ConstantInt::getTrue(I->getContext()); 690 else 691 InterestingVal = ConstantInt::getFalse(I->getContext()); 692 693 SmallPtrSet<BasicBlock*, 4> LHSKnownBBs; 694 695 // Scan for the sentinel. If we find an undef, force it to the 696 // interesting value: x|undef -> true and x&undef -> false. 697 for (const auto &LHSVal : LHSVals) 698 if (LHSVal.first == InterestingVal || isa<UndefValue>(LHSVal.first)) { 699 Result.emplace_back(InterestingVal, LHSVal.second); 700 LHSKnownBBs.insert(LHSVal.second); 701 } 702 for (const auto &RHSVal : RHSVals) 703 if (RHSVal.first == InterestingVal || isa<UndefValue>(RHSVal.first)) { 704 // If we already inferred a value for this block on the LHS, don't 705 // re-add it. 706 if (!LHSKnownBBs.count(RHSVal.second)) 707 Result.emplace_back(InterestingVal, RHSVal.second); 708 } 709 710 return !Result.empty(); 711 } 712 713 // Handle the NOT form of XOR. 714 if (I->getOpcode() == Instruction::Xor && 715 isa<ConstantInt>(I->getOperand(1)) && 716 cast<ConstantInt>(I->getOperand(1))->isOne()) { 717 computeValueKnownInPredecessorsImpl(I->getOperand(0), BB, Result, 718 WantInteger, RecursionSet, CxtI); 719 if (Result.empty()) 720 return false; 721 722 // Invert the known values. 723 for (auto &R : Result) 724 R.first = ConstantExpr::getNot(R.first); 725 726 return true; 727 } 728 729 // Try to simplify some other binary operator values. 730 } else if (BinaryOperator *BO = dyn_cast<BinaryOperator>(I)) { 731 if (Preference != WantInteger) 732 return false; 733 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->getOperand(1))) { 734 PredValueInfoTy LHSVals; 735 computeValueKnownInPredecessorsImpl(BO->getOperand(0), BB, LHSVals, 736 WantInteger, RecursionSet, CxtI); 737 738 // Try to use constant folding to simplify the binary operator. 739 for (const auto &LHSVal : LHSVals) { 740 Constant *V = LHSVal.first; 741 Constant *Folded = 742 ConstantFoldBinaryOpOperands(BO->getOpcode(), V, CI, DL); 743 744 if (Constant *KC = getKnownConstant(Folded, WantInteger)) 745 Result.emplace_back(KC, LHSVal.second); 746 } 747 } 748 749 return !Result.empty(); 750 } 751 752 // Handle compare with phi operand, where the PHI is defined in this block. 753 if (CmpInst *Cmp = dyn_cast<CmpInst>(I)) { 754 if (Preference != WantInteger) 755 return false; 756 Type *CmpType = Cmp->getType(); 757 Value *CmpLHS = Cmp->getOperand(0); 758 Value *CmpRHS = Cmp->getOperand(1); 759 CmpInst::Predicate Pred = Cmp->getPredicate(); 760 761 PHINode *PN = dyn_cast<PHINode>(CmpLHS); 762 if (!PN) 763 PN = dyn_cast<PHINode>(CmpRHS); 764 // Do not perform phi translation across a loop header phi, because this 765 // may result in comparison of values from two different loop iterations. 766 // FIXME: This check is broken if LoopHeaders is not populated. 767 if (PN && PN->getParent() == BB && !LoopHeaders.contains(BB)) { 768 const DataLayout &DL = PN->getModule()->getDataLayout(); 769 // We can do this simplification if any comparisons fold to true or false. 770 // See if any do. 771 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 772 BasicBlock *PredBB = PN->getIncomingBlock(i); 773 Value *LHS, *RHS; 774 if (PN == CmpLHS) { 775 LHS = PN->getIncomingValue(i); 776 RHS = CmpRHS->DoPHITranslation(BB, PredBB); 777 } else { 778 LHS = CmpLHS->DoPHITranslation(BB, PredBB); 779 RHS = PN->getIncomingValue(i); 780 } 781 Value *Res = simplifyCmpInst(Pred, LHS, RHS, {DL}); 782 if (!Res) { 783 if (!isa<Constant>(RHS)) 784 continue; 785 786 // getPredicateOnEdge call will make no sense if LHS is defined in BB. 787 auto LHSInst = dyn_cast<Instruction>(LHS); 788 if (LHSInst && LHSInst->getParent() == BB) 789 continue; 790 791 LazyValueInfo::Tristate 792 ResT = LVI->getPredicateOnEdge(Pred, LHS, 793 cast<Constant>(RHS), PredBB, BB, 794 CxtI ? CxtI : Cmp); 795 if (ResT == LazyValueInfo::Unknown) 796 continue; 797 Res = ConstantInt::get(Type::getInt1Ty(LHS->getContext()), ResT); 798 } 799 800 if (Constant *KC = getKnownConstant(Res, WantInteger)) 801 Result.emplace_back(KC, PredBB); 802 } 803 804 return !Result.empty(); 805 } 806 807 // If comparing a live-in value against a constant, see if we know the 808 // live-in value on any predecessors. 809 if (isa<Constant>(CmpRHS) && !CmpType->isVectorTy()) { 810 Constant *CmpConst = cast<Constant>(CmpRHS); 811 812 if (!isa<Instruction>(CmpLHS) || 813 cast<Instruction>(CmpLHS)->getParent() != BB) { 814 for (BasicBlock *P : predecessors(BB)) { 815 // If the value is known by LazyValueInfo to be a constant in a 816 // predecessor, use that information to try to thread this block. 817 LazyValueInfo::Tristate Res = 818 LVI->getPredicateOnEdge(Pred, CmpLHS, 819 CmpConst, P, BB, CxtI ? CxtI : Cmp); 820 if (Res == LazyValueInfo::Unknown) 821 continue; 822 823 Constant *ResC = ConstantInt::get(CmpType, Res); 824 Result.emplace_back(ResC, P); 825 } 826 827 return !Result.empty(); 828 } 829 830 // InstCombine can fold some forms of constant range checks into 831 // (icmp (add (x, C1)), C2). See if we have we have such a thing with 832 // x as a live-in. 833 { 834 using namespace PatternMatch; 835 836 Value *AddLHS; 837 ConstantInt *AddConst; 838 if (isa<ConstantInt>(CmpConst) && 839 match(CmpLHS, m_Add(m_Value(AddLHS), m_ConstantInt(AddConst)))) { 840 if (!isa<Instruction>(AddLHS) || 841 cast<Instruction>(AddLHS)->getParent() != BB) { 842 for (BasicBlock *P : predecessors(BB)) { 843 // If the value is known by LazyValueInfo to be a ConstantRange in 844 // a predecessor, use that information to try to thread this 845 // block. 846 ConstantRange CR = LVI->getConstantRangeOnEdge( 847 AddLHS, P, BB, CxtI ? CxtI : cast<Instruction>(CmpLHS)); 848 // Propagate the range through the addition. 849 CR = CR.add(AddConst->getValue()); 850 851 // Get the range where the compare returns true. 852 ConstantRange CmpRange = ConstantRange::makeExactICmpRegion( 853 Pred, cast<ConstantInt>(CmpConst)->getValue()); 854 855 Constant *ResC; 856 if (CmpRange.contains(CR)) 857 ResC = ConstantInt::getTrue(CmpType); 858 else if (CmpRange.inverse().contains(CR)) 859 ResC = ConstantInt::getFalse(CmpType); 860 else 861 continue; 862 863 Result.emplace_back(ResC, P); 864 } 865 866 return !Result.empty(); 867 } 868 } 869 } 870 871 // Try to find a constant value for the LHS of a comparison, 872 // and evaluate it statically if we can. 873 PredValueInfoTy LHSVals; 874 computeValueKnownInPredecessorsImpl(I->getOperand(0), BB, LHSVals, 875 WantInteger, RecursionSet, CxtI); 876 877 for (const auto &LHSVal : LHSVals) { 878 Constant *V = LHSVal.first; 879 Constant *Folded = ConstantExpr::getCompare(Pred, V, CmpConst); 880 if (Constant *KC = getKnownConstant(Folded, WantInteger)) 881 Result.emplace_back(KC, LHSVal.second); 882 } 883 884 return !Result.empty(); 885 } 886 } 887 888 if (SelectInst *SI = dyn_cast<SelectInst>(I)) { 889 // Handle select instructions where at least one operand is a known constant 890 // and we can figure out the condition value for any predecessor block. 891 Constant *TrueVal = getKnownConstant(SI->getTrueValue(), Preference); 892 Constant *FalseVal = getKnownConstant(SI->getFalseValue(), Preference); 893 PredValueInfoTy Conds; 894 if ((TrueVal || FalseVal) && 895 computeValueKnownInPredecessorsImpl(SI->getCondition(), BB, Conds, 896 WantInteger, RecursionSet, CxtI)) { 897 for (auto &C : Conds) { 898 Constant *Cond = C.first; 899 900 // Figure out what value to use for the condition. 901 bool KnownCond; 902 if (ConstantInt *CI = dyn_cast<ConstantInt>(Cond)) { 903 // A known boolean. 904 KnownCond = CI->isOne(); 905 } else { 906 assert(isa<UndefValue>(Cond) && "Unexpected condition value"); 907 // Either operand will do, so be sure to pick the one that's a known 908 // constant. 909 // FIXME: Do this more cleverly if both values are known constants? 910 KnownCond = (TrueVal != nullptr); 911 } 912 913 // See if the select has a known constant value for this predecessor. 914 if (Constant *Val = KnownCond ? TrueVal : FalseVal) 915 Result.emplace_back(Val, C.second); 916 } 917 918 return !Result.empty(); 919 } 920 } 921 922 // If all else fails, see if LVI can figure out a constant value for us. 923 assert(CxtI->getParent() == BB && "CxtI should be in BB"); 924 Constant *CI = LVI->getConstant(V, CxtI); 925 if (Constant *KC = getKnownConstant(CI, Preference)) { 926 for (BasicBlock *Pred : predecessors(BB)) 927 Result.emplace_back(KC, Pred); 928 } 929 930 return !Result.empty(); 931 } 932 933 /// GetBestDestForBranchOnUndef - If we determine that the specified block ends 934 /// in an undefined jump, decide which block is best to revector to. 935 /// 936 /// Since we can pick an arbitrary destination, we pick the successor with the 937 /// fewest predecessors. This should reduce the in-degree of the others. 938 static unsigned getBestDestForJumpOnUndef(BasicBlock *BB) { 939 Instruction *BBTerm = BB->getTerminator(); 940 unsigned MinSucc = 0; 941 BasicBlock *TestBB = BBTerm->getSuccessor(MinSucc); 942 // Compute the successor with the minimum number of predecessors. 943 unsigned MinNumPreds = pred_size(TestBB); 944 for (unsigned i = 1, e = BBTerm->getNumSuccessors(); i != e; ++i) { 945 TestBB = BBTerm->getSuccessor(i); 946 unsigned NumPreds = pred_size(TestBB); 947 if (NumPreds < MinNumPreds) { 948 MinSucc = i; 949 MinNumPreds = NumPreds; 950 } 951 } 952 953 return MinSucc; 954 } 955 956 static bool hasAddressTakenAndUsed(BasicBlock *BB) { 957 if (!BB->hasAddressTaken()) return false; 958 959 // If the block has its address taken, it may be a tree of dead constants 960 // hanging off of it. These shouldn't keep the block alive. 961 BlockAddress *BA = BlockAddress::get(BB); 962 BA->removeDeadConstantUsers(); 963 return !BA->use_empty(); 964 } 965 966 /// processBlock - If there are any predecessors whose control can be threaded 967 /// through to a successor, transform them now. 968 bool JumpThreadingPass::processBlock(BasicBlock *BB) { 969 // If the block is trivially dead, just return and let the caller nuke it. 970 // This simplifies other transformations. 971 if (DTU->isBBPendingDeletion(BB) || 972 (pred_empty(BB) && BB != &BB->getParent()->getEntryBlock())) 973 return false; 974 975 // If this block has a single predecessor, and if that pred has a single 976 // successor, merge the blocks. This encourages recursive jump threading 977 // because now the condition in this block can be threaded through 978 // predecessors of our predecessor block. 979 if (maybeMergeBasicBlockIntoOnlyPred(BB)) 980 return true; 981 982 if (tryToUnfoldSelectInCurrBB(BB)) 983 return true; 984 985 // Look if we can propagate guards to predecessors. 986 if (HasGuards && processGuards(BB)) 987 return true; 988 989 // What kind of constant we're looking for. 990 ConstantPreference Preference = WantInteger; 991 992 // Look to see if the terminator is a conditional branch, switch or indirect 993 // branch, if not we can't thread it. 994 Value *Condition; 995 Instruction *Terminator = BB->getTerminator(); 996 if (BranchInst *BI = dyn_cast<BranchInst>(Terminator)) { 997 // Can't thread an unconditional jump. 998 if (BI->isUnconditional()) return false; 999 Condition = BI->getCondition(); 1000 } else if (SwitchInst *SI = dyn_cast<SwitchInst>(Terminator)) { 1001 Condition = SI->getCondition(); 1002 } else if (IndirectBrInst *IB = dyn_cast<IndirectBrInst>(Terminator)) { 1003 // Can't thread indirect branch with no successors. 1004 if (IB->getNumSuccessors() == 0) return false; 1005 Condition = IB->getAddress()->stripPointerCasts(); 1006 Preference = WantBlockAddress; 1007 } else { 1008 return false; // Must be an invoke or callbr. 1009 } 1010 1011 // Keep track if we constant folded the condition in this invocation. 1012 bool ConstantFolded = false; 1013 1014 // Run constant folding to see if we can reduce the condition to a simple 1015 // constant. 1016 if (Instruction *I = dyn_cast<Instruction>(Condition)) { 1017 Value *SimpleVal = 1018 ConstantFoldInstruction(I, BB->getModule()->getDataLayout(), TLI); 1019 if (SimpleVal) { 1020 I->replaceAllUsesWith(SimpleVal); 1021 if (isInstructionTriviallyDead(I, TLI)) 1022 I->eraseFromParent(); 1023 Condition = SimpleVal; 1024 ConstantFolded = true; 1025 } 1026 } 1027 1028 // If the terminator is branching on an undef or freeze undef, we can pick any 1029 // of the successors to branch to. Let getBestDestForJumpOnUndef decide. 1030 auto *FI = dyn_cast<FreezeInst>(Condition); 1031 if (isa<UndefValue>(Condition) || 1032 (FI && isa<UndefValue>(FI->getOperand(0)) && FI->hasOneUse())) { 1033 unsigned BestSucc = getBestDestForJumpOnUndef(BB); 1034 std::vector<DominatorTree::UpdateType> Updates; 1035 1036 // Fold the branch/switch. 1037 Instruction *BBTerm = BB->getTerminator(); 1038 Updates.reserve(BBTerm->getNumSuccessors()); 1039 for (unsigned i = 0, e = BBTerm->getNumSuccessors(); i != e; ++i) { 1040 if (i == BestSucc) continue; 1041 BasicBlock *Succ = BBTerm->getSuccessor(i); 1042 Succ->removePredecessor(BB, true); 1043 Updates.push_back({DominatorTree::Delete, BB, Succ}); 1044 } 1045 1046 LLVM_DEBUG(dbgs() << " In block '" << BB->getName() 1047 << "' folding undef terminator: " << *BBTerm << '\n'); 1048 BranchInst::Create(BBTerm->getSuccessor(BestSucc), BBTerm); 1049 ++NumFolds; 1050 BBTerm->eraseFromParent(); 1051 DTU->applyUpdatesPermissive(Updates); 1052 if (FI) 1053 FI->eraseFromParent(); 1054 return true; 1055 } 1056 1057 // If the terminator of this block is branching on a constant, simplify the 1058 // terminator to an unconditional branch. This can occur due to threading in 1059 // other blocks. 1060 if (getKnownConstant(Condition, Preference)) { 1061 LLVM_DEBUG(dbgs() << " In block '" << BB->getName() 1062 << "' folding terminator: " << *BB->getTerminator() 1063 << '\n'); 1064 ++NumFolds; 1065 ConstantFoldTerminator(BB, true, nullptr, DTU.get()); 1066 if (auto *BPI = getBPI()) 1067 BPI->eraseBlock(BB); 1068 return true; 1069 } 1070 1071 Instruction *CondInst = dyn_cast<Instruction>(Condition); 1072 1073 // All the rest of our checks depend on the condition being an instruction. 1074 if (!CondInst) { 1075 // FIXME: Unify this with code below. 1076 if (processThreadableEdges(Condition, BB, Preference, Terminator)) 1077 return true; 1078 return ConstantFolded; 1079 } 1080 1081 // Some of the following optimization can safely work on the unfrozen cond. 1082 Value *CondWithoutFreeze = CondInst; 1083 if (auto *FI = dyn_cast<FreezeInst>(CondInst)) 1084 CondWithoutFreeze = FI->getOperand(0); 1085 1086 if (CmpInst *CondCmp = dyn_cast<CmpInst>(CondWithoutFreeze)) { 1087 // If we're branching on a conditional, LVI might be able to determine 1088 // it's value at the branch instruction. We only handle comparisons 1089 // against a constant at this time. 1090 if (Constant *CondConst = dyn_cast<Constant>(CondCmp->getOperand(1))) { 1091 LazyValueInfo::Tristate Ret = 1092 LVI->getPredicateAt(CondCmp->getPredicate(), CondCmp->getOperand(0), 1093 CondConst, BB->getTerminator(), 1094 /*UseBlockValue=*/false); 1095 if (Ret != LazyValueInfo::Unknown) { 1096 // We can safely replace *some* uses of the CondInst if it has 1097 // exactly one value as returned by LVI. RAUW is incorrect in the 1098 // presence of guards and assumes, that have the `Cond` as the use. This 1099 // is because we use the guards/assume to reason about the `Cond` value 1100 // at the end of block, but RAUW unconditionally replaces all uses 1101 // including the guards/assumes themselves and the uses before the 1102 // guard/assume. 1103 auto *CI = Ret == LazyValueInfo::True ? 1104 ConstantInt::getTrue(CondCmp->getType()) : 1105 ConstantInt::getFalse(CondCmp->getType()); 1106 if (replaceFoldableUses(CondCmp, CI, BB)) 1107 return true; 1108 } 1109 1110 // We did not manage to simplify this branch, try to see whether 1111 // CondCmp depends on a known phi-select pattern. 1112 if (tryToUnfoldSelect(CondCmp, BB)) 1113 return true; 1114 } 1115 } 1116 1117 if (SwitchInst *SI = dyn_cast<SwitchInst>(BB->getTerminator())) 1118 if (tryToUnfoldSelect(SI, BB)) 1119 return true; 1120 1121 // Check for some cases that are worth simplifying. Right now we want to look 1122 // for loads that are used by a switch or by the condition for the branch. If 1123 // we see one, check to see if it's partially redundant. If so, insert a PHI 1124 // which can then be used to thread the values. 1125 Value *SimplifyValue = CondWithoutFreeze; 1126 1127 if (CmpInst *CondCmp = dyn_cast<CmpInst>(SimplifyValue)) 1128 if (isa<Constant>(CondCmp->getOperand(1))) 1129 SimplifyValue = CondCmp->getOperand(0); 1130 1131 // TODO: There are other places where load PRE would be profitable, such as 1132 // more complex comparisons. 1133 if (LoadInst *LoadI = dyn_cast<LoadInst>(SimplifyValue)) 1134 if (simplifyPartiallyRedundantLoad(LoadI)) 1135 return true; 1136 1137 // Before threading, try to propagate profile data backwards: 1138 if (PHINode *PN = dyn_cast<PHINode>(CondInst)) 1139 if (PN->getParent() == BB && isa<BranchInst>(BB->getTerminator())) 1140 updatePredecessorProfileMetadata(PN, BB); 1141 1142 // Handle a variety of cases where we are branching on something derived from 1143 // a PHI node in the current block. If we can prove that any predecessors 1144 // compute a predictable value based on a PHI node, thread those predecessors. 1145 if (processThreadableEdges(CondInst, BB, Preference, Terminator)) 1146 return true; 1147 1148 // If this is an otherwise-unfoldable branch on a phi node or freeze(phi) in 1149 // the current block, see if we can simplify. 1150 PHINode *PN = dyn_cast<PHINode>(CondWithoutFreeze); 1151 if (PN && PN->getParent() == BB && isa<BranchInst>(BB->getTerminator())) 1152 return processBranchOnPHI(PN); 1153 1154 // If this is an otherwise-unfoldable branch on a XOR, see if we can simplify. 1155 if (CondInst->getOpcode() == Instruction::Xor && 1156 CondInst->getParent() == BB && isa<BranchInst>(BB->getTerminator())) 1157 return processBranchOnXOR(cast<BinaryOperator>(CondInst)); 1158 1159 // Search for a stronger dominating condition that can be used to simplify a 1160 // conditional branch leaving BB. 1161 if (processImpliedCondition(BB)) 1162 return true; 1163 1164 return false; 1165 } 1166 1167 bool JumpThreadingPass::processImpliedCondition(BasicBlock *BB) { 1168 auto *BI = dyn_cast<BranchInst>(BB->getTerminator()); 1169 if (!BI || !BI->isConditional()) 1170 return false; 1171 1172 Value *Cond = BI->getCondition(); 1173 // Assuming that predecessor's branch was taken, if pred's branch condition 1174 // (V) implies Cond, Cond can be either true, undef, or poison. In this case, 1175 // freeze(Cond) is either true or a nondeterministic value. 1176 // If freeze(Cond) has only one use, we can freely fold freeze(Cond) to true 1177 // without affecting other instructions. 1178 auto *FICond = dyn_cast<FreezeInst>(Cond); 1179 if (FICond && FICond->hasOneUse()) 1180 Cond = FICond->getOperand(0); 1181 else 1182 FICond = nullptr; 1183 1184 BasicBlock *CurrentBB = BB; 1185 BasicBlock *CurrentPred = BB->getSinglePredecessor(); 1186 unsigned Iter = 0; 1187 1188 auto &DL = BB->getModule()->getDataLayout(); 1189 1190 while (CurrentPred && Iter++ < ImplicationSearchThreshold) { 1191 auto *PBI = dyn_cast<BranchInst>(CurrentPred->getTerminator()); 1192 if (!PBI || !PBI->isConditional()) 1193 return false; 1194 if (PBI->getSuccessor(0) != CurrentBB && PBI->getSuccessor(1) != CurrentBB) 1195 return false; 1196 1197 bool CondIsTrue = PBI->getSuccessor(0) == CurrentBB; 1198 std::optional<bool> Implication = 1199 isImpliedCondition(PBI->getCondition(), Cond, DL, CondIsTrue); 1200 1201 // If the branch condition of BB (which is Cond) and CurrentPred are 1202 // exactly the same freeze instruction, Cond can be folded into CondIsTrue. 1203 if (!Implication && FICond && isa<FreezeInst>(PBI->getCondition())) { 1204 if (cast<FreezeInst>(PBI->getCondition())->getOperand(0) == 1205 FICond->getOperand(0)) 1206 Implication = CondIsTrue; 1207 } 1208 1209 if (Implication) { 1210 BasicBlock *KeepSucc = BI->getSuccessor(*Implication ? 0 : 1); 1211 BasicBlock *RemoveSucc = BI->getSuccessor(*Implication ? 1 : 0); 1212 RemoveSucc->removePredecessor(BB); 1213 BranchInst *UncondBI = BranchInst::Create(KeepSucc, BI); 1214 UncondBI->setDebugLoc(BI->getDebugLoc()); 1215 ++NumFolds; 1216 BI->eraseFromParent(); 1217 if (FICond) 1218 FICond->eraseFromParent(); 1219 1220 DTU->applyUpdatesPermissive({{DominatorTree::Delete, BB, RemoveSucc}}); 1221 if (auto *BPI = getBPI()) 1222 BPI->eraseBlock(BB); 1223 return true; 1224 } 1225 CurrentBB = CurrentPred; 1226 CurrentPred = CurrentBB->getSinglePredecessor(); 1227 } 1228 1229 return false; 1230 } 1231 1232 /// Return true if Op is an instruction defined in the given block. 1233 static bool isOpDefinedInBlock(Value *Op, BasicBlock *BB) { 1234 if (Instruction *OpInst = dyn_cast<Instruction>(Op)) 1235 if (OpInst->getParent() == BB) 1236 return true; 1237 return false; 1238 } 1239 1240 /// simplifyPartiallyRedundantLoad - If LoadI is an obviously partially 1241 /// redundant load instruction, eliminate it by replacing it with a PHI node. 1242 /// This is an important optimization that encourages jump threading, and needs 1243 /// to be run interlaced with other jump threading tasks. 1244 bool JumpThreadingPass::simplifyPartiallyRedundantLoad(LoadInst *LoadI) { 1245 // Don't hack volatile and ordered loads. 1246 if (!LoadI->isUnordered()) return false; 1247 1248 // If the load is defined in a block with exactly one predecessor, it can't be 1249 // partially redundant. 1250 BasicBlock *LoadBB = LoadI->getParent(); 1251 if (LoadBB->getSinglePredecessor()) 1252 return false; 1253 1254 // If the load is defined in an EH pad, it can't be partially redundant, 1255 // because the edges between the invoke and the EH pad cannot have other 1256 // instructions between them. 1257 if (LoadBB->isEHPad()) 1258 return false; 1259 1260 Value *LoadedPtr = LoadI->getOperand(0); 1261 1262 // If the loaded operand is defined in the LoadBB and its not a phi, 1263 // it can't be available in predecessors. 1264 if (isOpDefinedInBlock(LoadedPtr, LoadBB) && !isa<PHINode>(LoadedPtr)) 1265 return false; 1266 1267 // Scan a few instructions up from the load, to see if it is obviously live at 1268 // the entry to its block. 1269 BasicBlock::iterator BBIt(LoadI); 1270 bool IsLoadCSE; 1271 if (Value *AvailableVal = FindAvailableLoadedValue( 1272 LoadI, LoadBB, BBIt, DefMaxInstsToScan, AA, &IsLoadCSE)) { 1273 // If the value of the load is locally available within the block, just use 1274 // it. This frequently occurs for reg2mem'd allocas. 1275 1276 if (IsLoadCSE) { 1277 LoadInst *NLoadI = cast<LoadInst>(AvailableVal); 1278 combineMetadataForCSE(NLoadI, LoadI, false); 1279 LVI->forgetValue(NLoadI); 1280 }; 1281 1282 // If the returned value is the load itself, replace with poison. This can 1283 // only happen in dead loops. 1284 if (AvailableVal == LoadI) 1285 AvailableVal = PoisonValue::get(LoadI->getType()); 1286 if (AvailableVal->getType() != LoadI->getType()) 1287 AvailableVal = CastInst::CreateBitOrPointerCast( 1288 AvailableVal, LoadI->getType(), "", LoadI); 1289 LoadI->replaceAllUsesWith(AvailableVal); 1290 LoadI->eraseFromParent(); 1291 return true; 1292 } 1293 1294 // Otherwise, if we scanned the whole block and got to the top of the block, 1295 // we know the block is locally transparent to the load. If not, something 1296 // might clobber its value. 1297 if (BBIt != LoadBB->begin()) 1298 return false; 1299 1300 // If all of the loads and stores that feed the value have the same AA tags, 1301 // then we can propagate them onto any newly inserted loads. 1302 AAMDNodes AATags = LoadI->getAAMetadata(); 1303 1304 SmallPtrSet<BasicBlock*, 8> PredsScanned; 1305 1306 using AvailablePredsTy = SmallVector<std::pair<BasicBlock *, Value *>, 8>; 1307 1308 AvailablePredsTy AvailablePreds; 1309 BasicBlock *OneUnavailablePred = nullptr; 1310 SmallVector<LoadInst*, 8> CSELoads; 1311 1312 // If we got here, the loaded value is transparent through to the start of the 1313 // block. Check to see if it is available in any of the predecessor blocks. 1314 for (BasicBlock *PredBB : predecessors(LoadBB)) { 1315 // If we already scanned this predecessor, skip it. 1316 if (!PredsScanned.insert(PredBB).second) 1317 continue; 1318 1319 BBIt = PredBB->end(); 1320 unsigned NumScanedInst = 0; 1321 Value *PredAvailable = nullptr; 1322 // NOTE: We don't CSE load that is volatile or anything stronger than 1323 // unordered, that should have been checked when we entered the function. 1324 assert(LoadI->isUnordered() && 1325 "Attempting to CSE volatile or atomic loads"); 1326 // If this is a load on a phi pointer, phi-translate it and search 1327 // for available load/store to the pointer in predecessors. 1328 Type *AccessTy = LoadI->getType(); 1329 const auto &DL = LoadI->getModule()->getDataLayout(); 1330 MemoryLocation Loc(LoadedPtr->DoPHITranslation(LoadBB, PredBB), 1331 LocationSize::precise(DL.getTypeStoreSize(AccessTy)), 1332 AATags); 1333 PredAvailable = findAvailablePtrLoadStore(Loc, AccessTy, LoadI->isAtomic(), 1334 PredBB, BBIt, DefMaxInstsToScan, 1335 AA, &IsLoadCSE, &NumScanedInst); 1336 1337 // If PredBB has a single predecessor, continue scanning through the 1338 // single predecessor. 1339 BasicBlock *SinglePredBB = PredBB; 1340 while (!PredAvailable && SinglePredBB && BBIt == SinglePredBB->begin() && 1341 NumScanedInst < DefMaxInstsToScan) { 1342 SinglePredBB = SinglePredBB->getSinglePredecessor(); 1343 if (SinglePredBB) { 1344 BBIt = SinglePredBB->end(); 1345 PredAvailable = findAvailablePtrLoadStore( 1346 Loc, AccessTy, LoadI->isAtomic(), SinglePredBB, BBIt, 1347 (DefMaxInstsToScan - NumScanedInst), AA, &IsLoadCSE, 1348 &NumScanedInst); 1349 } 1350 } 1351 1352 if (!PredAvailable) { 1353 OneUnavailablePred = PredBB; 1354 continue; 1355 } 1356 1357 if (IsLoadCSE) 1358 CSELoads.push_back(cast<LoadInst>(PredAvailable)); 1359 1360 // If so, this load is partially redundant. Remember this info so that we 1361 // can create a PHI node. 1362 AvailablePreds.emplace_back(PredBB, PredAvailable); 1363 } 1364 1365 // If the loaded value isn't available in any predecessor, it isn't partially 1366 // redundant. 1367 if (AvailablePreds.empty()) return false; 1368 1369 // Okay, the loaded value is available in at least one (and maybe all!) 1370 // predecessors. If the value is unavailable in more than one unique 1371 // predecessor, we want to insert a merge block for those common predecessors. 1372 // This ensures that we only have to insert one reload, thus not increasing 1373 // code size. 1374 BasicBlock *UnavailablePred = nullptr; 1375 1376 // If the value is unavailable in one of predecessors, we will end up 1377 // inserting a new instruction into them. It is only valid if all the 1378 // instructions before LoadI are guaranteed to pass execution to its 1379 // successor, or if LoadI is safe to speculate. 1380 // TODO: If this logic becomes more complex, and we will perform PRE insertion 1381 // farther than to a predecessor, we need to reuse the code from GVN's PRE. 1382 // It requires domination tree analysis, so for this simple case it is an 1383 // overkill. 1384 if (PredsScanned.size() != AvailablePreds.size() && 1385 !isSafeToSpeculativelyExecute(LoadI)) 1386 for (auto I = LoadBB->begin(); &*I != LoadI; ++I) 1387 if (!isGuaranteedToTransferExecutionToSuccessor(&*I)) 1388 return false; 1389 1390 // If there is exactly one predecessor where the value is unavailable, the 1391 // already computed 'OneUnavailablePred' block is it. If it ends in an 1392 // unconditional branch, we know that it isn't a critical edge. 1393 if (PredsScanned.size() == AvailablePreds.size()+1 && 1394 OneUnavailablePred->getTerminator()->getNumSuccessors() == 1) { 1395 UnavailablePred = OneUnavailablePred; 1396 } else if (PredsScanned.size() != AvailablePreds.size()) { 1397 // Otherwise, we had multiple unavailable predecessors or we had a critical 1398 // edge from the one. 1399 SmallVector<BasicBlock*, 8> PredsToSplit; 1400 SmallPtrSet<BasicBlock*, 8> AvailablePredSet; 1401 1402 for (const auto &AvailablePred : AvailablePreds) 1403 AvailablePredSet.insert(AvailablePred.first); 1404 1405 // Add all the unavailable predecessors to the PredsToSplit list. 1406 for (BasicBlock *P : predecessors(LoadBB)) { 1407 // If the predecessor is an indirect goto, we can't split the edge. 1408 if (isa<IndirectBrInst>(P->getTerminator())) 1409 return false; 1410 1411 if (!AvailablePredSet.count(P)) 1412 PredsToSplit.push_back(P); 1413 } 1414 1415 // Split them out to their own block. 1416 UnavailablePred = splitBlockPreds(LoadBB, PredsToSplit, "thread-pre-split"); 1417 } 1418 1419 // If the value isn't available in all predecessors, then there will be 1420 // exactly one where it isn't available. Insert a load on that edge and add 1421 // it to the AvailablePreds list. 1422 if (UnavailablePred) { 1423 assert(UnavailablePred->getTerminator()->getNumSuccessors() == 1 && 1424 "Can't handle critical edge here!"); 1425 LoadInst *NewVal = new LoadInst( 1426 LoadI->getType(), LoadedPtr->DoPHITranslation(LoadBB, UnavailablePred), 1427 LoadI->getName() + ".pr", false, LoadI->getAlign(), 1428 LoadI->getOrdering(), LoadI->getSyncScopeID(), 1429 UnavailablePred->getTerminator()); 1430 NewVal->setDebugLoc(LoadI->getDebugLoc()); 1431 if (AATags) 1432 NewVal->setAAMetadata(AATags); 1433 1434 AvailablePreds.emplace_back(UnavailablePred, NewVal); 1435 } 1436 1437 // Now we know that each predecessor of this block has a value in 1438 // AvailablePreds, sort them for efficient access as we're walking the preds. 1439 array_pod_sort(AvailablePreds.begin(), AvailablePreds.end()); 1440 1441 // Create a PHI node at the start of the block for the PRE'd load value. 1442 pred_iterator PB = pred_begin(LoadBB), PE = pred_end(LoadBB); 1443 PHINode *PN = PHINode::Create(LoadI->getType(), std::distance(PB, PE), ""); 1444 PN->insertBefore(LoadBB->begin()); 1445 PN->takeName(LoadI); 1446 PN->setDebugLoc(LoadI->getDebugLoc()); 1447 1448 // Insert new entries into the PHI for each predecessor. A single block may 1449 // have multiple entries here. 1450 for (pred_iterator PI = PB; PI != PE; ++PI) { 1451 BasicBlock *P = *PI; 1452 AvailablePredsTy::iterator I = 1453 llvm::lower_bound(AvailablePreds, std::make_pair(P, (Value *)nullptr)); 1454 1455 assert(I != AvailablePreds.end() && I->first == P && 1456 "Didn't find entry for predecessor!"); 1457 1458 // If we have an available predecessor but it requires casting, insert the 1459 // cast in the predecessor and use the cast. Note that we have to update the 1460 // AvailablePreds vector as we go so that all of the PHI entries for this 1461 // predecessor use the same bitcast. 1462 Value *&PredV = I->second; 1463 if (PredV->getType() != LoadI->getType()) 1464 PredV = CastInst::CreateBitOrPointerCast(PredV, LoadI->getType(), "", 1465 P->getTerminator()); 1466 1467 PN->addIncoming(PredV, I->first); 1468 } 1469 1470 for (LoadInst *PredLoadI : CSELoads) { 1471 combineMetadataForCSE(PredLoadI, LoadI, true); 1472 LVI->forgetValue(PredLoadI); 1473 } 1474 1475 LoadI->replaceAllUsesWith(PN); 1476 LoadI->eraseFromParent(); 1477 1478 return true; 1479 } 1480 1481 /// findMostPopularDest - The specified list contains multiple possible 1482 /// threadable destinations. Pick the one that occurs the most frequently in 1483 /// the list. 1484 static BasicBlock * 1485 findMostPopularDest(BasicBlock *BB, 1486 const SmallVectorImpl<std::pair<BasicBlock *, 1487 BasicBlock *>> &PredToDestList) { 1488 assert(!PredToDestList.empty()); 1489 1490 // Determine popularity. If there are multiple possible destinations, we 1491 // explicitly choose to ignore 'undef' destinations. We prefer to thread 1492 // blocks with known and real destinations to threading undef. We'll handle 1493 // them later if interesting. 1494 MapVector<BasicBlock *, unsigned> DestPopularity; 1495 1496 // Populate DestPopularity with the successors in the order they appear in the 1497 // successor list. This way, we ensure determinism by iterating it in the 1498 // same order in std::max_element below. We map nullptr to 0 so that we can 1499 // return nullptr when PredToDestList contains nullptr only. 1500 DestPopularity[nullptr] = 0; 1501 for (auto *SuccBB : successors(BB)) 1502 DestPopularity[SuccBB] = 0; 1503 1504 for (const auto &PredToDest : PredToDestList) 1505 if (PredToDest.second) 1506 DestPopularity[PredToDest.second]++; 1507 1508 // Find the most popular dest. 1509 auto MostPopular = std::max_element( 1510 DestPopularity.begin(), DestPopularity.end(), llvm::less_second()); 1511 1512 // Okay, we have finally picked the most popular destination. 1513 return MostPopular->first; 1514 } 1515 1516 // Try to evaluate the value of V when the control flows from PredPredBB to 1517 // BB->getSinglePredecessor() and then on to BB. 1518 Constant *JumpThreadingPass::evaluateOnPredecessorEdge(BasicBlock *BB, 1519 BasicBlock *PredPredBB, 1520 Value *V) { 1521 BasicBlock *PredBB = BB->getSinglePredecessor(); 1522 assert(PredBB && "Expected a single predecessor"); 1523 1524 if (Constant *Cst = dyn_cast<Constant>(V)) { 1525 return Cst; 1526 } 1527 1528 // Consult LVI if V is not an instruction in BB or PredBB. 1529 Instruction *I = dyn_cast<Instruction>(V); 1530 if (!I || (I->getParent() != BB && I->getParent() != PredBB)) { 1531 return LVI->getConstantOnEdge(V, PredPredBB, PredBB, nullptr); 1532 } 1533 1534 // Look into a PHI argument. 1535 if (PHINode *PHI = dyn_cast<PHINode>(V)) { 1536 if (PHI->getParent() == PredBB) 1537 return dyn_cast<Constant>(PHI->getIncomingValueForBlock(PredPredBB)); 1538 return nullptr; 1539 } 1540 1541 // If we have a CmpInst, try to fold it for each incoming edge into PredBB. 1542 if (CmpInst *CondCmp = dyn_cast<CmpInst>(V)) { 1543 if (CondCmp->getParent() == BB) { 1544 Constant *Op0 = 1545 evaluateOnPredecessorEdge(BB, PredPredBB, CondCmp->getOperand(0)); 1546 Constant *Op1 = 1547 evaluateOnPredecessorEdge(BB, PredPredBB, CondCmp->getOperand(1)); 1548 if (Op0 && Op1) { 1549 return ConstantExpr::getCompare(CondCmp->getPredicate(), Op0, Op1); 1550 } 1551 } 1552 return nullptr; 1553 } 1554 1555 return nullptr; 1556 } 1557 1558 bool JumpThreadingPass::processThreadableEdges(Value *Cond, BasicBlock *BB, 1559 ConstantPreference Preference, 1560 Instruction *CxtI) { 1561 // If threading this would thread across a loop header, don't even try to 1562 // thread the edge. 1563 if (LoopHeaders.count(BB)) 1564 return false; 1565 1566 PredValueInfoTy PredValues; 1567 if (!computeValueKnownInPredecessors(Cond, BB, PredValues, Preference, 1568 CxtI)) { 1569 // We don't have known values in predecessors. See if we can thread through 1570 // BB and its sole predecessor. 1571 return maybethreadThroughTwoBasicBlocks(BB, Cond); 1572 } 1573 1574 assert(!PredValues.empty() && 1575 "computeValueKnownInPredecessors returned true with no values"); 1576 1577 LLVM_DEBUG(dbgs() << "IN BB: " << *BB; 1578 for (const auto &PredValue : PredValues) { 1579 dbgs() << " BB '" << BB->getName() 1580 << "': FOUND condition = " << *PredValue.first 1581 << " for pred '" << PredValue.second->getName() << "'.\n"; 1582 }); 1583 1584 // Decide what we want to thread through. Convert our list of known values to 1585 // a list of known destinations for each pred. This also discards duplicate 1586 // predecessors and keeps track of the undefined inputs (which are represented 1587 // as a null dest in the PredToDestList). 1588 SmallPtrSet<BasicBlock*, 16> SeenPreds; 1589 SmallVector<std::pair<BasicBlock*, BasicBlock*>, 16> PredToDestList; 1590 1591 BasicBlock *OnlyDest = nullptr; 1592 BasicBlock *MultipleDestSentinel = (BasicBlock*)(intptr_t)~0ULL; 1593 Constant *OnlyVal = nullptr; 1594 Constant *MultipleVal = (Constant *)(intptr_t)~0ULL; 1595 1596 for (const auto &PredValue : PredValues) { 1597 BasicBlock *Pred = PredValue.second; 1598 if (!SeenPreds.insert(Pred).second) 1599 continue; // Duplicate predecessor entry. 1600 1601 Constant *Val = PredValue.first; 1602 1603 BasicBlock *DestBB; 1604 if (isa<UndefValue>(Val)) 1605 DestBB = nullptr; 1606 else if (BranchInst *BI = dyn_cast<BranchInst>(BB->getTerminator())) { 1607 assert(isa<ConstantInt>(Val) && "Expecting a constant integer"); 1608 DestBB = BI->getSuccessor(cast<ConstantInt>(Val)->isZero()); 1609 } else if (SwitchInst *SI = dyn_cast<SwitchInst>(BB->getTerminator())) { 1610 assert(isa<ConstantInt>(Val) && "Expecting a constant integer"); 1611 DestBB = SI->findCaseValue(cast<ConstantInt>(Val))->getCaseSuccessor(); 1612 } else { 1613 assert(isa<IndirectBrInst>(BB->getTerminator()) 1614 && "Unexpected terminator"); 1615 assert(isa<BlockAddress>(Val) && "Expecting a constant blockaddress"); 1616 DestBB = cast<BlockAddress>(Val)->getBasicBlock(); 1617 } 1618 1619 // If we have exactly one destination, remember it for efficiency below. 1620 if (PredToDestList.empty()) { 1621 OnlyDest = DestBB; 1622 OnlyVal = Val; 1623 } else { 1624 if (OnlyDest != DestBB) 1625 OnlyDest = MultipleDestSentinel; 1626 // It possible we have same destination, but different value, e.g. default 1627 // case in switchinst. 1628 if (Val != OnlyVal) 1629 OnlyVal = MultipleVal; 1630 } 1631 1632 // If the predecessor ends with an indirect goto, we can't change its 1633 // destination. 1634 if (isa<IndirectBrInst>(Pred->getTerminator())) 1635 continue; 1636 1637 PredToDestList.emplace_back(Pred, DestBB); 1638 } 1639 1640 // If all edges were unthreadable, we fail. 1641 if (PredToDestList.empty()) 1642 return false; 1643 1644 // If all the predecessors go to a single known successor, we want to fold, 1645 // not thread. By doing so, we do not need to duplicate the current block and 1646 // also miss potential opportunities in case we dont/cant duplicate. 1647 if (OnlyDest && OnlyDest != MultipleDestSentinel) { 1648 if (BB->hasNPredecessors(PredToDestList.size())) { 1649 bool SeenFirstBranchToOnlyDest = false; 1650 std::vector <DominatorTree::UpdateType> Updates; 1651 Updates.reserve(BB->getTerminator()->getNumSuccessors() - 1); 1652 for (BasicBlock *SuccBB : successors(BB)) { 1653 if (SuccBB == OnlyDest && !SeenFirstBranchToOnlyDest) { 1654 SeenFirstBranchToOnlyDest = true; // Don't modify the first branch. 1655 } else { 1656 SuccBB->removePredecessor(BB, true); // This is unreachable successor. 1657 Updates.push_back({DominatorTree::Delete, BB, SuccBB}); 1658 } 1659 } 1660 1661 // Finally update the terminator. 1662 Instruction *Term = BB->getTerminator(); 1663 BranchInst::Create(OnlyDest, Term); 1664 ++NumFolds; 1665 Term->eraseFromParent(); 1666 DTU->applyUpdatesPermissive(Updates); 1667 if (auto *BPI = getBPI()) 1668 BPI->eraseBlock(BB); 1669 1670 // If the condition is now dead due to the removal of the old terminator, 1671 // erase it. 1672 if (auto *CondInst = dyn_cast<Instruction>(Cond)) { 1673 if (CondInst->use_empty() && !CondInst->mayHaveSideEffects()) 1674 CondInst->eraseFromParent(); 1675 // We can safely replace *some* uses of the CondInst if it has 1676 // exactly one value as returned by LVI. RAUW is incorrect in the 1677 // presence of guards and assumes, that have the `Cond` as the use. This 1678 // is because we use the guards/assume to reason about the `Cond` value 1679 // at the end of block, but RAUW unconditionally replaces all uses 1680 // including the guards/assumes themselves and the uses before the 1681 // guard/assume. 1682 else if (OnlyVal && OnlyVal != MultipleVal) 1683 replaceFoldableUses(CondInst, OnlyVal, BB); 1684 } 1685 return true; 1686 } 1687 } 1688 1689 // Determine which is the most common successor. If we have many inputs and 1690 // this block is a switch, we want to start by threading the batch that goes 1691 // to the most popular destination first. If we only know about one 1692 // threadable destination (the common case) we can avoid this. 1693 BasicBlock *MostPopularDest = OnlyDest; 1694 1695 if (MostPopularDest == MultipleDestSentinel) { 1696 // Remove any loop headers from the Dest list, threadEdge conservatively 1697 // won't process them, but we might have other destination that are eligible 1698 // and we still want to process. 1699 erase_if(PredToDestList, 1700 [&](const std::pair<BasicBlock *, BasicBlock *> &PredToDest) { 1701 return LoopHeaders.contains(PredToDest.second); 1702 }); 1703 1704 if (PredToDestList.empty()) 1705 return false; 1706 1707 MostPopularDest = findMostPopularDest(BB, PredToDestList); 1708 } 1709 1710 // Now that we know what the most popular destination is, factor all 1711 // predecessors that will jump to it into a single predecessor. 1712 SmallVector<BasicBlock*, 16> PredsToFactor; 1713 for (const auto &PredToDest : PredToDestList) 1714 if (PredToDest.second == MostPopularDest) { 1715 BasicBlock *Pred = PredToDest.first; 1716 1717 // This predecessor may be a switch or something else that has multiple 1718 // edges to the block. Factor each of these edges by listing them 1719 // according to # occurrences in PredsToFactor. 1720 for (BasicBlock *Succ : successors(Pred)) 1721 if (Succ == BB) 1722 PredsToFactor.push_back(Pred); 1723 } 1724 1725 // If the threadable edges are branching on an undefined value, we get to pick 1726 // the destination that these predecessors should get to. 1727 if (!MostPopularDest) 1728 MostPopularDest = BB->getTerminator()-> 1729 getSuccessor(getBestDestForJumpOnUndef(BB)); 1730 1731 // Ok, try to thread it! 1732 return tryThreadEdge(BB, PredsToFactor, MostPopularDest); 1733 } 1734 1735 /// processBranchOnPHI - We have an otherwise unthreadable conditional branch on 1736 /// a PHI node (or freeze PHI) in the current block. See if there are any 1737 /// simplifications we can do based on inputs to the phi node. 1738 bool JumpThreadingPass::processBranchOnPHI(PHINode *PN) { 1739 BasicBlock *BB = PN->getParent(); 1740 1741 // TODO: We could make use of this to do it once for blocks with common PHI 1742 // values. 1743 SmallVector<BasicBlock*, 1> PredBBs; 1744 PredBBs.resize(1); 1745 1746 // If any of the predecessor blocks end in an unconditional branch, we can 1747 // *duplicate* the conditional branch into that block in order to further 1748 // encourage jump threading and to eliminate cases where we have branch on a 1749 // phi of an icmp (branch on icmp is much better). 1750 // This is still beneficial when a frozen phi is used as the branch condition 1751 // because it allows CodeGenPrepare to further canonicalize br(freeze(icmp)) 1752 // to br(icmp(freeze ...)). 1753 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 1754 BasicBlock *PredBB = PN->getIncomingBlock(i); 1755 if (BranchInst *PredBr = dyn_cast<BranchInst>(PredBB->getTerminator())) 1756 if (PredBr->isUnconditional()) { 1757 PredBBs[0] = PredBB; 1758 // Try to duplicate BB into PredBB. 1759 if (duplicateCondBranchOnPHIIntoPred(BB, PredBBs)) 1760 return true; 1761 } 1762 } 1763 1764 return false; 1765 } 1766 1767 /// processBranchOnXOR - We have an otherwise unthreadable conditional branch on 1768 /// a xor instruction in the current block. See if there are any 1769 /// simplifications we can do based on inputs to the xor. 1770 bool JumpThreadingPass::processBranchOnXOR(BinaryOperator *BO) { 1771 BasicBlock *BB = BO->getParent(); 1772 1773 // If either the LHS or RHS of the xor is a constant, don't do this 1774 // optimization. 1775 if (isa<ConstantInt>(BO->getOperand(0)) || 1776 isa<ConstantInt>(BO->getOperand(1))) 1777 return false; 1778 1779 // If the first instruction in BB isn't a phi, we won't be able to infer 1780 // anything special about any particular predecessor. 1781 if (!isa<PHINode>(BB->front())) 1782 return false; 1783 1784 // If this BB is a landing pad, we won't be able to split the edge into it. 1785 if (BB->isEHPad()) 1786 return false; 1787 1788 // If we have a xor as the branch input to this block, and we know that the 1789 // LHS or RHS of the xor in any predecessor is true/false, then we can clone 1790 // the condition into the predecessor and fix that value to true, saving some 1791 // logical ops on that path and encouraging other paths to simplify. 1792 // 1793 // This copies something like this: 1794 // 1795 // BB: 1796 // %X = phi i1 [1], [%X'] 1797 // %Y = icmp eq i32 %A, %B 1798 // %Z = xor i1 %X, %Y 1799 // br i1 %Z, ... 1800 // 1801 // Into: 1802 // BB': 1803 // %Y = icmp ne i32 %A, %B 1804 // br i1 %Y, ... 1805 1806 PredValueInfoTy XorOpValues; 1807 bool isLHS = true; 1808 if (!computeValueKnownInPredecessors(BO->getOperand(0), BB, XorOpValues, 1809 WantInteger, BO)) { 1810 assert(XorOpValues.empty()); 1811 if (!computeValueKnownInPredecessors(BO->getOperand(1), BB, XorOpValues, 1812 WantInteger, BO)) 1813 return false; 1814 isLHS = false; 1815 } 1816 1817 assert(!XorOpValues.empty() && 1818 "computeValueKnownInPredecessors returned true with no values"); 1819 1820 // Scan the information to see which is most popular: true or false. The 1821 // predecessors can be of the set true, false, or undef. 1822 unsigned NumTrue = 0, NumFalse = 0; 1823 for (const auto &XorOpValue : XorOpValues) { 1824 if (isa<UndefValue>(XorOpValue.first)) 1825 // Ignore undefs for the count. 1826 continue; 1827 if (cast<ConstantInt>(XorOpValue.first)->isZero()) 1828 ++NumFalse; 1829 else 1830 ++NumTrue; 1831 } 1832 1833 // Determine which value to split on, true, false, or undef if neither. 1834 ConstantInt *SplitVal = nullptr; 1835 if (NumTrue > NumFalse) 1836 SplitVal = ConstantInt::getTrue(BB->getContext()); 1837 else if (NumTrue != 0 || NumFalse != 0) 1838 SplitVal = ConstantInt::getFalse(BB->getContext()); 1839 1840 // Collect all of the blocks that this can be folded into so that we can 1841 // factor this once and clone it once. 1842 SmallVector<BasicBlock*, 8> BlocksToFoldInto; 1843 for (const auto &XorOpValue : XorOpValues) { 1844 if (XorOpValue.first != SplitVal && !isa<UndefValue>(XorOpValue.first)) 1845 continue; 1846 1847 BlocksToFoldInto.push_back(XorOpValue.second); 1848 } 1849 1850 // If we inferred a value for all of the predecessors, then duplication won't 1851 // help us. However, we can just replace the LHS or RHS with the constant. 1852 if (BlocksToFoldInto.size() == 1853 cast<PHINode>(BB->front()).getNumIncomingValues()) { 1854 if (!SplitVal) { 1855 // If all preds provide undef, just nuke the xor, because it is undef too. 1856 BO->replaceAllUsesWith(UndefValue::get(BO->getType())); 1857 BO->eraseFromParent(); 1858 } else if (SplitVal->isZero() && BO != BO->getOperand(isLHS)) { 1859 // If all preds provide 0, replace the xor with the other input. 1860 BO->replaceAllUsesWith(BO->getOperand(isLHS)); 1861 BO->eraseFromParent(); 1862 } else { 1863 // If all preds provide 1, set the computed value to 1. 1864 BO->setOperand(!isLHS, SplitVal); 1865 } 1866 1867 return true; 1868 } 1869 1870 // If any of predecessors end with an indirect goto, we can't change its 1871 // destination. 1872 if (any_of(BlocksToFoldInto, [](BasicBlock *Pred) { 1873 return isa<IndirectBrInst>(Pred->getTerminator()); 1874 })) 1875 return false; 1876 1877 // Try to duplicate BB into PredBB. 1878 return duplicateCondBranchOnPHIIntoPred(BB, BlocksToFoldInto); 1879 } 1880 1881 /// addPHINodeEntriesForMappedBlock - We're adding 'NewPred' as a new 1882 /// predecessor to the PHIBB block. If it has PHI nodes, add entries for 1883 /// NewPred using the entries from OldPred (suitably mapped). 1884 static void addPHINodeEntriesForMappedBlock(BasicBlock *PHIBB, 1885 BasicBlock *OldPred, 1886 BasicBlock *NewPred, 1887 DenseMap<Instruction*, Value*> &ValueMap) { 1888 for (PHINode &PN : PHIBB->phis()) { 1889 // Ok, we have a PHI node. Figure out what the incoming value was for the 1890 // DestBlock. 1891 Value *IV = PN.getIncomingValueForBlock(OldPred); 1892 1893 // Remap the value if necessary. 1894 if (Instruction *Inst = dyn_cast<Instruction>(IV)) { 1895 DenseMap<Instruction*, Value*>::iterator I = ValueMap.find(Inst); 1896 if (I != ValueMap.end()) 1897 IV = I->second; 1898 } 1899 1900 PN.addIncoming(IV, NewPred); 1901 } 1902 } 1903 1904 /// Merge basic block BB into its sole predecessor if possible. 1905 bool JumpThreadingPass::maybeMergeBasicBlockIntoOnlyPred(BasicBlock *BB) { 1906 BasicBlock *SinglePred = BB->getSinglePredecessor(); 1907 if (!SinglePred) 1908 return false; 1909 1910 const Instruction *TI = SinglePred->getTerminator(); 1911 if (TI->isSpecialTerminator() || TI->getNumSuccessors() != 1 || 1912 SinglePred == BB || hasAddressTakenAndUsed(BB)) 1913 return false; 1914 1915 // If SinglePred was a loop header, BB becomes one. 1916 if (LoopHeaders.erase(SinglePred)) 1917 LoopHeaders.insert(BB); 1918 1919 LVI->eraseBlock(SinglePred); 1920 MergeBasicBlockIntoOnlyPred(BB, DTU.get()); 1921 1922 // Now that BB is merged into SinglePred (i.e. SinglePred code followed by 1923 // BB code within one basic block `BB`), we need to invalidate the LVI 1924 // information associated with BB, because the LVI information need not be 1925 // true for all of BB after the merge. For example, 1926 // Before the merge, LVI info and code is as follows: 1927 // SinglePred: <LVI info1 for %p val> 1928 // %y = use of %p 1929 // call @exit() // need not transfer execution to successor. 1930 // assume(%p) // from this point on %p is true 1931 // br label %BB 1932 // BB: <LVI info2 for %p val, i.e. %p is true> 1933 // %x = use of %p 1934 // br label exit 1935 // 1936 // Note that this LVI info for blocks BB and SinglPred is correct for %p 1937 // (info2 and info1 respectively). After the merge and the deletion of the 1938 // LVI info1 for SinglePred. We have the following code: 1939 // BB: <LVI info2 for %p val> 1940 // %y = use of %p 1941 // call @exit() 1942 // assume(%p) 1943 // %x = use of %p <-- LVI info2 is correct from here onwards. 1944 // br label exit 1945 // LVI info2 for BB is incorrect at the beginning of BB. 1946 1947 // Invalidate LVI information for BB if the LVI is not provably true for 1948 // all of BB. 1949 if (!isGuaranteedToTransferExecutionToSuccessor(BB)) 1950 LVI->eraseBlock(BB); 1951 return true; 1952 } 1953 1954 /// Update the SSA form. NewBB contains instructions that are copied from BB. 1955 /// ValueMapping maps old values in BB to new ones in NewBB. 1956 void JumpThreadingPass::updateSSA( 1957 BasicBlock *BB, BasicBlock *NewBB, 1958 DenseMap<Instruction *, Value *> &ValueMapping) { 1959 // If there were values defined in BB that are used outside the block, then we 1960 // now have to update all uses of the value to use either the original value, 1961 // the cloned value, or some PHI derived value. This can require arbitrary 1962 // PHI insertion, of which we are prepared to do, clean these up now. 1963 SSAUpdater SSAUpdate; 1964 SmallVector<Use *, 16> UsesToRename; 1965 SmallVector<DbgValueInst *, 4> DbgValues; 1966 1967 for (Instruction &I : *BB) { 1968 // Scan all uses of this instruction to see if it is used outside of its 1969 // block, and if so, record them in UsesToRename. 1970 for (Use &U : I.uses()) { 1971 Instruction *User = cast<Instruction>(U.getUser()); 1972 if (PHINode *UserPN = dyn_cast<PHINode>(User)) { 1973 if (UserPN->getIncomingBlock(U) == BB) 1974 continue; 1975 } else if (User->getParent() == BB) 1976 continue; 1977 1978 UsesToRename.push_back(&U); 1979 } 1980 1981 // Find debug values outside of the block 1982 findDbgValues(DbgValues, &I); 1983 llvm::erase_if(DbgValues, [&](const DbgValueInst *DbgVal) { 1984 return DbgVal->getParent() == BB; 1985 }); 1986 1987 // If there are no uses outside the block, we're done with this instruction. 1988 if (UsesToRename.empty() && DbgValues.empty()) 1989 continue; 1990 LLVM_DEBUG(dbgs() << "JT: Renaming non-local uses of: " << I << "\n"); 1991 1992 // We found a use of I outside of BB. Rename all uses of I that are outside 1993 // its block to be uses of the appropriate PHI node etc. See ValuesInBlocks 1994 // with the two values we know. 1995 SSAUpdate.Initialize(I.getType(), I.getName()); 1996 SSAUpdate.AddAvailableValue(BB, &I); 1997 SSAUpdate.AddAvailableValue(NewBB, ValueMapping[&I]); 1998 1999 while (!UsesToRename.empty()) 2000 SSAUpdate.RewriteUse(*UsesToRename.pop_back_val()); 2001 if (!DbgValues.empty()) { 2002 SSAUpdate.UpdateDebugValues(&I, DbgValues); 2003 DbgValues.clear(); 2004 } 2005 2006 LLVM_DEBUG(dbgs() << "\n"); 2007 } 2008 } 2009 2010 /// Clone instructions in range [BI, BE) to NewBB. For PHI nodes, we only clone 2011 /// arguments that come from PredBB. Return the map from the variables in the 2012 /// source basic block to the variables in the newly created basic block. 2013 DenseMap<Instruction *, Value *> 2014 JumpThreadingPass::cloneInstructions(BasicBlock::iterator BI, 2015 BasicBlock::iterator BE, BasicBlock *NewBB, 2016 BasicBlock *PredBB) { 2017 // We are going to have to map operands from the source basic block to the new 2018 // copy of the block 'NewBB'. If there are PHI nodes in the source basic 2019 // block, evaluate them to account for entry from PredBB. 2020 DenseMap<Instruction *, Value *> ValueMapping; 2021 2022 // Retargets llvm.dbg.value to any renamed variables. 2023 auto RetargetDbgValueIfPossible = [&](Instruction *NewInst) -> bool { 2024 auto DbgInstruction = dyn_cast<DbgValueInst>(NewInst); 2025 if (!DbgInstruction) 2026 return false; 2027 2028 SmallSet<std::pair<Value *, Value *>, 16> OperandsToRemap; 2029 for (auto DbgOperand : DbgInstruction->location_ops()) { 2030 auto DbgOperandInstruction = dyn_cast<Instruction>(DbgOperand); 2031 if (!DbgOperandInstruction) 2032 continue; 2033 2034 auto I = ValueMapping.find(DbgOperandInstruction); 2035 if (I != ValueMapping.end()) { 2036 OperandsToRemap.insert( 2037 std::pair<Value *, Value *>(DbgOperand, I->second)); 2038 } 2039 } 2040 2041 for (auto &[OldOp, MappedOp] : OperandsToRemap) 2042 DbgInstruction->replaceVariableLocationOp(OldOp, MappedOp); 2043 return true; 2044 }; 2045 2046 // Clone the phi nodes of the source basic block into NewBB. The resulting 2047 // phi nodes are trivial since NewBB only has one predecessor, but SSAUpdater 2048 // might need to rewrite the operand of the cloned phi. 2049 for (; PHINode *PN = dyn_cast<PHINode>(BI); ++BI) { 2050 PHINode *NewPN = PHINode::Create(PN->getType(), 1, PN->getName(), NewBB); 2051 NewPN->addIncoming(PN->getIncomingValueForBlock(PredBB), PredBB); 2052 ValueMapping[PN] = NewPN; 2053 } 2054 2055 // Clone noalias scope declarations in the threaded block. When threading a 2056 // loop exit, we would otherwise end up with two idential scope declarations 2057 // visible at the same time. 2058 SmallVector<MDNode *> NoAliasScopes; 2059 DenseMap<MDNode *, MDNode *> ClonedScopes; 2060 LLVMContext &Context = PredBB->getContext(); 2061 identifyNoAliasScopesToClone(BI, BE, NoAliasScopes); 2062 cloneNoAliasScopes(NoAliasScopes, ClonedScopes, "thread", Context); 2063 2064 // Clone the non-phi instructions of the source basic block into NewBB, 2065 // keeping track of the mapping and using it to remap operands in the cloned 2066 // instructions. 2067 for (; BI != BE; ++BI) { 2068 Instruction *New = BI->clone(); 2069 New->setName(BI->getName()); 2070 New->insertInto(NewBB, NewBB->end()); 2071 ValueMapping[&*BI] = New; 2072 adaptNoAliasScopes(New, ClonedScopes, Context); 2073 2074 if (RetargetDbgValueIfPossible(New)) 2075 continue; 2076 2077 // Remap operands to patch up intra-block references. 2078 for (unsigned i = 0, e = New->getNumOperands(); i != e; ++i) 2079 if (Instruction *Inst = dyn_cast<Instruction>(New->getOperand(i))) { 2080 DenseMap<Instruction *, Value *>::iterator I = ValueMapping.find(Inst); 2081 if (I != ValueMapping.end()) 2082 New->setOperand(i, I->second); 2083 } 2084 } 2085 2086 return ValueMapping; 2087 } 2088 2089 /// Attempt to thread through two successive basic blocks. 2090 bool JumpThreadingPass::maybethreadThroughTwoBasicBlocks(BasicBlock *BB, 2091 Value *Cond) { 2092 // Consider: 2093 // 2094 // PredBB: 2095 // %var = phi i32* [ null, %bb1 ], [ @a, %bb2 ] 2096 // %tobool = icmp eq i32 %cond, 0 2097 // br i1 %tobool, label %BB, label ... 2098 // 2099 // BB: 2100 // %cmp = icmp eq i32* %var, null 2101 // br i1 %cmp, label ..., label ... 2102 // 2103 // We don't know the value of %var at BB even if we know which incoming edge 2104 // we take to BB. However, once we duplicate PredBB for each of its incoming 2105 // edges (say, PredBB1 and PredBB2), we know the value of %var in each copy of 2106 // PredBB. Then we can thread edges PredBB1->BB and PredBB2->BB through BB. 2107 2108 // Require that BB end with a Branch for simplicity. 2109 BranchInst *CondBr = dyn_cast<BranchInst>(BB->getTerminator()); 2110 if (!CondBr) 2111 return false; 2112 2113 // BB must have exactly one predecessor. 2114 BasicBlock *PredBB = BB->getSinglePredecessor(); 2115 if (!PredBB) 2116 return false; 2117 2118 // Require that PredBB end with a conditional Branch. If PredBB ends with an 2119 // unconditional branch, we should be merging PredBB and BB instead. For 2120 // simplicity, we don't deal with a switch. 2121 BranchInst *PredBBBranch = dyn_cast<BranchInst>(PredBB->getTerminator()); 2122 if (!PredBBBranch || PredBBBranch->isUnconditional()) 2123 return false; 2124 2125 // If PredBB has exactly one incoming edge, we don't gain anything by copying 2126 // PredBB. 2127 if (PredBB->getSinglePredecessor()) 2128 return false; 2129 2130 // Don't thread through PredBB if it contains a successor edge to itself, in 2131 // which case we would infinite loop. Suppose we are threading an edge from 2132 // PredPredBB through PredBB and BB to SuccBB with PredBB containing a 2133 // successor edge to itself. If we allowed jump threading in this case, we 2134 // could duplicate PredBB and BB as, say, PredBB.thread and BB.thread. Since 2135 // PredBB.thread has a successor edge to PredBB, we would immediately come up 2136 // with another jump threading opportunity from PredBB.thread through PredBB 2137 // and BB to SuccBB. This jump threading would repeatedly occur. That is, we 2138 // would keep peeling one iteration from PredBB. 2139 if (llvm::is_contained(successors(PredBB), PredBB)) 2140 return false; 2141 2142 // Don't thread across a loop header. 2143 if (LoopHeaders.count(PredBB)) 2144 return false; 2145 2146 // Avoid complication with duplicating EH pads. 2147 if (PredBB->isEHPad()) 2148 return false; 2149 2150 // Find a predecessor that we can thread. For simplicity, we only consider a 2151 // successor edge out of BB to which we thread exactly one incoming edge into 2152 // PredBB. 2153 unsigned ZeroCount = 0; 2154 unsigned OneCount = 0; 2155 BasicBlock *ZeroPred = nullptr; 2156 BasicBlock *OnePred = nullptr; 2157 for (BasicBlock *P : predecessors(PredBB)) { 2158 // If PredPred ends with IndirectBrInst, we can't handle it. 2159 if (isa<IndirectBrInst>(P->getTerminator())) 2160 continue; 2161 if (ConstantInt *CI = dyn_cast_or_null<ConstantInt>( 2162 evaluateOnPredecessorEdge(BB, P, Cond))) { 2163 if (CI->isZero()) { 2164 ZeroCount++; 2165 ZeroPred = P; 2166 } else if (CI->isOne()) { 2167 OneCount++; 2168 OnePred = P; 2169 } 2170 } 2171 } 2172 2173 // Disregard complicated cases where we have to thread multiple edges. 2174 BasicBlock *PredPredBB; 2175 if (ZeroCount == 1) { 2176 PredPredBB = ZeroPred; 2177 } else if (OneCount == 1) { 2178 PredPredBB = OnePred; 2179 } else { 2180 return false; 2181 } 2182 2183 BasicBlock *SuccBB = CondBr->getSuccessor(PredPredBB == ZeroPred); 2184 2185 // If threading to the same block as we come from, we would infinite loop. 2186 if (SuccBB == BB) { 2187 LLVM_DEBUG(dbgs() << " Not threading across BB '" << BB->getName() 2188 << "' - would thread to self!\n"); 2189 return false; 2190 } 2191 2192 // If threading this would thread across a loop header, don't thread the edge. 2193 // See the comments above findLoopHeaders for justifications and caveats. 2194 if (LoopHeaders.count(BB) || LoopHeaders.count(SuccBB)) { 2195 LLVM_DEBUG({ 2196 bool BBIsHeader = LoopHeaders.count(BB); 2197 bool SuccIsHeader = LoopHeaders.count(SuccBB); 2198 dbgs() << " Not threading across " 2199 << (BBIsHeader ? "loop header BB '" : "block BB '") 2200 << BB->getName() << "' to dest " 2201 << (SuccIsHeader ? "loop header BB '" : "block BB '") 2202 << SuccBB->getName() 2203 << "' - it might create an irreducible loop!\n"; 2204 }); 2205 return false; 2206 } 2207 2208 // Compute the cost of duplicating BB and PredBB. 2209 unsigned BBCost = getJumpThreadDuplicationCost( 2210 TTI, BB, BB->getTerminator(), BBDupThreshold); 2211 unsigned PredBBCost = getJumpThreadDuplicationCost( 2212 TTI, PredBB, PredBB->getTerminator(), BBDupThreshold); 2213 2214 // Give up if costs are too high. We need to check BBCost and PredBBCost 2215 // individually before checking their sum because getJumpThreadDuplicationCost 2216 // return (unsigned)~0 for those basic blocks that cannot be duplicated. 2217 if (BBCost > BBDupThreshold || PredBBCost > BBDupThreshold || 2218 BBCost + PredBBCost > BBDupThreshold) { 2219 LLVM_DEBUG(dbgs() << " Not threading BB '" << BB->getName() 2220 << "' - Cost is too high: " << PredBBCost 2221 << " for PredBB, " << BBCost << "for BB\n"); 2222 return false; 2223 } 2224 2225 // Now we are ready to duplicate PredBB. 2226 threadThroughTwoBasicBlocks(PredPredBB, PredBB, BB, SuccBB); 2227 return true; 2228 } 2229 2230 void JumpThreadingPass::threadThroughTwoBasicBlocks(BasicBlock *PredPredBB, 2231 BasicBlock *PredBB, 2232 BasicBlock *BB, 2233 BasicBlock *SuccBB) { 2234 LLVM_DEBUG(dbgs() << " Threading through '" << PredBB->getName() << "' and '" 2235 << BB->getName() << "'\n"); 2236 2237 // Build BPI/BFI before any changes are made to IR. 2238 bool HasProfile = doesBlockHaveProfileData(BB); 2239 auto *BFI = getOrCreateBFI(HasProfile); 2240 auto *BPI = getOrCreateBPI(BFI != nullptr); 2241 2242 BranchInst *CondBr = cast<BranchInst>(BB->getTerminator()); 2243 BranchInst *PredBBBranch = cast<BranchInst>(PredBB->getTerminator()); 2244 2245 BasicBlock *NewBB = 2246 BasicBlock::Create(PredBB->getContext(), PredBB->getName() + ".thread", 2247 PredBB->getParent(), PredBB); 2248 NewBB->moveAfter(PredBB); 2249 2250 // Set the block frequency of NewBB. 2251 if (BFI) { 2252 assert(BPI && "It's expected BPI to exist along with BFI"); 2253 auto NewBBFreq = BFI->getBlockFreq(PredPredBB) * 2254 BPI->getEdgeProbability(PredPredBB, PredBB); 2255 BFI->setBlockFreq(NewBB, NewBBFreq); 2256 } 2257 2258 // We are going to have to map operands from the original BB block to the new 2259 // copy of the block 'NewBB'. If there are PHI nodes in PredBB, evaluate them 2260 // to account for entry from PredPredBB. 2261 DenseMap<Instruction *, Value *> ValueMapping = 2262 cloneInstructions(PredBB->begin(), PredBB->end(), NewBB, PredPredBB); 2263 2264 // Copy the edge probabilities from PredBB to NewBB. 2265 if (BPI) 2266 BPI->copyEdgeProbabilities(PredBB, NewBB); 2267 2268 // Update the terminator of PredPredBB to jump to NewBB instead of PredBB. 2269 // This eliminates predecessors from PredPredBB, which requires us to simplify 2270 // any PHI nodes in PredBB. 2271 Instruction *PredPredTerm = PredPredBB->getTerminator(); 2272 for (unsigned i = 0, e = PredPredTerm->getNumSuccessors(); i != e; ++i) 2273 if (PredPredTerm->getSuccessor(i) == PredBB) { 2274 PredBB->removePredecessor(PredPredBB, true); 2275 PredPredTerm->setSuccessor(i, NewBB); 2276 } 2277 2278 addPHINodeEntriesForMappedBlock(PredBBBranch->getSuccessor(0), PredBB, NewBB, 2279 ValueMapping); 2280 addPHINodeEntriesForMappedBlock(PredBBBranch->getSuccessor(1), PredBB, NewBB, 2281 ValueMapping); 2282 2283 DTU->applyUpdatesPermissive( 2284 {{DominatorTree::Insert, NewBB, CondBr->getSuccessor(0)}, 2285 {DominatorTree::Insert, NewBB, CondBr->getSuccessor(1)}, 2286 {DominatorTree::Insert, PredPredBB, NewBB}, 2287 {DominatorTree::Delete, PredPredBB, PredBB}}); 2288 2289 updateSSA(PredBB, NewBB, ValueMapping); 2290 2291 // Clean up things like PHI nodes with single operands, dead instructions, 2292 // etc. 2293 SimplifyInstructionsInBlock(NewBB, TLI); 2294 SimplifyInstructionsInBlock(PredBB, TLI); 2295 2296 SmallVector<BasicBlock *, 1> PredsToFactor; 2297 PredsToFactor.push_back(NewBB); 2298 threadEdge(BB, PredsToFactor, SuccBB); 2299 } 2300 2301 /// tryThreadEdge - Thread an edge if it's safe and profitable to do so. 2302 bool JumpThreadingPass::tryThreadEdge( 2303 BasicBlock *BB, const SmallVectorImpl<BasicBlock *> &PredBBs, 2304 BasicBlock *SuccBB) { 2305 // If threading to the same block as we come from, we would infinite loop. 2306 if (SuccBB == BB) { 2307 LLVM_DEBUG(dbgs() << " Not threading across BB '" << BB->getName() 2308 << "' - would thread to self!\n"); 2309 return false; 2310 } 2311 2312 // If threading this would thread across a loop header, don't thread the edge. 2313 // See the comments above findLoopHeaders for justifications and caveats. 2314 if (LoopHeaders.count(BB) || LoopHeaders.count(SuccBB)) { 2315 LLVM_DEBUG({ 2316 bool BBIsHeader = LoopHeaders.count(BB); 2317 bool SuccIsHeader = LoopHeaders.count(SuccBB); 2318 dbgs() << " Not threading across " 2319 << (BBIsHeader ? "loop header BB '" : "block BB '") << BB->getName() 2320 << "' to dest " << (SuccIsHeader ? "loop header BB '" : "block BB '") 2321 << SuccBB->getName() << "' - it might create an irreducible loop!\n"; 2322 }); 2323 return false; 2324 } 2325 2326 unsigned JumpThreadCost = getJumpThreadDuplicationCost( 2327 TTI, BB, BB->getTerminator(), BBDupThreshold); 2328 if (JumpThreadCost > BBDupThreshold) { 2329 LLVM_DEBUG(dbgs() << " Not threading BB '" << BB->getName() 2330 << "' - Cost is too high: " << JumpThreadCost << "\n"); 2331 return false; 2332 } 2333 2334 threadEdge(BB, PredBBs, SuccBB); 2335 return true; 2336 } 2337 2338 /// threadEdge - We have decided that it is safe and profitable to factor the 2339 /// blocks in PredBBs to one predecessor, then thread an edge from it to SuccBB 2340 /// across BB. Transform the IR to reflect this change. 2341 void JumpThreadingPass::threadEdge(BasicBlock *BB, 2342 const SmallVectorImpl<BasicBlock *> &PredBBs, 2343 BasicBlock *SuccBB) { 2344 assert(SuccBB != BB && "Don't create an infinite loop"); 2345 2346 assert(!LoopHeaders.count(BB) && !LoopHeaders.count(SuccBB) && 2347 "Don't thread across loop headers"); 2348 2349 // Build BPI/BFI before any changes are made to IR. 2350 bool HasProfile = doesBlockHaveProfileData(BB); 2351 auto *BFI = getOrCreateBFI(HasProfile); 2352 auto *BPI = getOrCreateBPI(BFI != nullptr); 2353 2354 // And finally, do it! Start by factoring the predecessors if needed. 2355 BasicBlock *PredBB; 2356 if (PredBBs.size() == 1) 2357 PredBB = PredBBs[0]; 2358 else { 2359 LLVM_DEBUG(dbgs() << " Factoring out " << PredBBs.size() 2360 << " common predecessors.\n"); 2361 PredBB = splitBlockPreds(BB, PredBBs, ".thr_comm"); 2362 } 2363 2364 // And finally, do it! 2365 LLVM_DEBUG(dbgs() << " Threading edge from '" << PredBB->getName() 2366 << "' to '" << SuccBB->getName() 2367 << ", across block:\n " << *BB << "\n"); 2368 2369 LVI->threadEdge(PredBB, BB, SuccBB); 2370 2371 BasicBlock *NewBB = BasicBlock::Create(BB->getContext(), 2372 BB->getName()+".thread", 2373 BB->getParent(), BB); 2374 NewBB->moveAfter(PredBB); 2375 2376 // Set the block frequency of NewBB. 2377 if (BFI) { 2378 assert(BPI && "It's expected BPI to exist along with BFI"); 2379 auto NewBBFreq = 2380 BFI->getBlockFreq(PredBB) * BPI->getEdgeProbability(PredBB, BB); 2381 BFI->setBlockFreq(NewBB, NewBBFreq); 2382 } 2383 2384 // Copy all the instructions from BB to NewBB except the terminator. 2385 DenseMap<Instruction *, Value *> ValueMapping = 2386 cloneInstructions(BB->begin(), std::prev(BB->end()), NewBB, PredBB); 2387 2388 // We didn't copy the terminator from BB over to NewBB, because there is now 2389 // an unconditional jump to SuccBB. Insert the unconditional jump. 2390 BranchInst *NewBI = BranchInst::Create(SuccBB, NewBB); 2391 NewBI->setDebugLoc(BB->getTerminator()->getDebugLoc()); 2392 2393 // Check to see if SuccBB has PHI nodes. If so, we need to add entries to the 2394 // PHI nodes for NewBB now. 2395 addPHINodeEntriesForMappedBlock(SuccBB, BB, NewBB, ValueMapping); 2396 2397 // Update the terminator of PredBB to jump to NewBB instead of BB. This 2398 // eliminates predecessors from BB, which requires us to simplify any PHI 2399 // nodes in BB. 2400 Instruction *PredTerm = PredBB->getTerminator(); 2401 for (unsigned i = 0, e = PredTerm->getNumSuccessors(); i != e; ++i) 2402 if (PredTerm->getSuccessor(i) == BB) { 2403 BB->removePredecessor(PredBB, true); 2404 PredTerm->setSuccessor(i, NewBB); 2405 } 2406 2407 // Enqueue required DT updates. 2408 DTU->applyUpdatesPermissive({{DominatorTree::Insert, NewBB, SuccBB}, 2409 {DominatorTree::Insert, PredBB, NewBB}, 2410 {DominatorTree::Delete, PredBB, BB}}); 2411 2412 updateSSA(BB, NewBB, ValueMapping); 2413 2414 // At this point, the IR is fully up to date and consistent. Do a quick scan 2415 // over the new instructions and zap any that are constants or dead. This 2416 // frequently happens because of phi translation. 2417 SimplifyInstructionsInBlock(NewBB, TLI); 2418 2419 // Update the edge weight from BB to SuccBB, which should be less than before. 2420 updateBlockFreqAndEdgeWeight(PredBB, BB, NewBB, SuccBB, BFI, BPI, HasProfile); 2421 2422 // Threaded an edge! 2423 ++NumThreads; 2424 } 2425 2426 /// Create a new basic block that will be the predecessor of BB and successor of 2427 /// all blocks in Preds. When profile data is available, update the frequency of 2428 /// this new block. 2429 BasicBlock *JumpThreadingPass::splitBlockPreds(BasicBlock *BB, 2430 ArrayRef<BasicBlock *> Preds, 2431 const char *Suffix) { 2432 SmallVector<BasicBlock *, 2> NewBBs; 2433 2434 // Collect the frequencies of all predecessors of BB, which will be used to 2435 // update the edge weight of the result of splitting predecessors. 2436 DenseMap<BasicBlock *, BlockFrequency> FreqMap; 2437 auto *BFI = getBFI(); 2438 if (BFI) { 2439 auto *BPI = getOrCreateBPI(true); 2440 for (auto *Pred : Preds) 2441 FreqMap.insert(std::make_pair( 2442 Pred, BFI->getBlockFreq(Pred) * BPI->getEdgeProbability(Pred, BB))); 2443 } 2444 2445 // In the case when BB is a LandingPad block we create 2 new predecessors 2446 // instead of just one. 2447 if (BB->isLandingPad()) { 2448 std::string NewName = std::string(Suffix) + ".split-lp"; 2449 SplitLandingPadPredecessors(BB, Preds, Suffix, NewName.c_str(), NewBBs); 2450 } else { 2451 NewBBs.push_back(SplitBlockPredecessors(BB, Preds, Suffix)); 2452 } 2453 2454 std::vector<DominatorTree::UpdateType> Updates; 2455 Updates.reserve((2 * Preds.size()) + NewBBs.size()); 2456 for (auto *NewBB : NewBBs) { 2457 BlockFrequency NewBBFreq(0); 2458 Updates.push_back({DominatorTree::Insert, NewBB, BB}); 2459 for (auto *Pred : predecessors(NewBB)) { 2460 Updates.push_back({DominatorTree::Delete, Pred, BB}); 2461 Updates.push_back({DominatorTree::Insert, Pred, NewBB}); 2462 if (BFI) // Update frequencies between Pred -> NewBB. 2463 NewBBFreq += FreqMap.lookup(Pred); 2464 } 2465 if (BFI) // Apply the summed frequency to NewBB. 2466 BFI->setBlockFreq(NewBB, NewBBFreq); 2467 } 2468 2469 DTU->applyUpdatesPermissive(Updates); 2470 return NewBBs[0]; 2471 } 2472 2473 bool JumpThreadingPass::doesBlockHaveProfileData(BasicBlock *BB) { 2474 const Instruction *TI = BB->getTerminator(); 2475 if (!TI || TI->getNumSuccessors() < 2) 2476 return false; 2477 2478 return hasValidBranchWeightMD(*TI); 2479 } 2480 2481 /// Update the block frequency of BB and branch weight and the metadata on the 2482 /// edge BB->SuccBB. This is done by scaling the weight of BB->SuccBB by 1 - 2483 /// Freq(PredBB->BB) / Freq(BB->SuccBB). 2484 void JumpThreadingPass::updateBlockFreqAndEdgeWeight(BasicBlock *PredBB, 2485 BasicBlock *BB, 2486 BasicBlock *NewBB, 2487 BasicBlock *SuccBB, 2488 BlockFrequencyInfo *BFI, 2489 BranchProbabilityInfo *BPI, 2490 bool HasProfile) { 2491 assert(((BFI && BPI) || (!BFI && !BFI)) && 2492 "Both BFI & BPI should either be set or unset"); 2493 2494 if (!BFI) { 2495 assert(!HasProfile && 2496 "It's expected to have BFI/BPI when profile info exists"); 2497 return; 2498 } 2499 2500 // As the edge from PredBB to BB is deleted, we have to update the block 2501 // frequency of BB. 2502 auto BBOrigFreq = BFI->getBlockFreq(BB); 2503 auto NewBBFreq = BFI->getBlockFreq(NewBB); 2504 auto BB2SuccBBFreq = BBOrigFreq * BPI->getEdgeProbability(BB, SuccBB); 2505 auto BBNewFreq = BBOrigFreq - NewBBFreq; 2506 BFI->setBlockFreq(BB, BBNewFreq); 2507 2508 // Collect updated outgoing edges' frequencies from BB and use them to update 2509 // edge probabilities. 2510 SmallVector<uint64_t, 4> BBSuccFreq; 2511 for (BasicBlock *Succ : successors(BB)) { 2512 auto SuccFreq = (Succ == SuccBB) 2513 ? BB2SuccBBFreq - NewBBFreq 2514 : BBOrigFreq * BPI->getEdgeProbability(BB, Succ); 2515 BBSuccFreq.push_back(SuccFreq.getFrequency()); 2516 } 2517 2518 uint64_t MaxBBSuccFreq = 2519 *std::max_element(BBSuccFreq.begin(), BBSuccFreq.end()); 2520 2521 SmallVector<BranchProbability, 4> BBSuccProbs; 2522 if (MaxBBSuccFreq == 0) 2523 BBSuccProbs.assign(BBSuccFreq.size(), 2524 {1, static_cast<unsigned>(BBSuccFreq.size())}); 2525 else { 2526 for (uint64_t Freq : BBSuccFreq) 2527 BBSuccProbs.push_back( 2528 BranchProbability::getBranchProbability(Freq, MaxBBSuccFreq)); 2529 // Normalize edge probabilities so that they sum up to one. 2530 BranchProbability::normalizeProbabilities(BBSuccProbs.begin(), 2531 BBSuccProbs.end()); 2532 } 2533 2534 // Update edge probabilities in BPI. 2535 BPI->setEdgeProbability(BB, BBSuccProbs); 2536 2537 // Update the profile metadata as well. 2538 // 2539 // Don't do this if the profile of the transformed blocks was statically 2540 // estimated. (This could occur despite the function having an entry 2541 // frequency in completely cold parts of the CFG.) 2542 // 2543 // In this case we don't want to suggest to subsequent passes that the 2544 // calculated weights are fully consistent. Consider this graph: 2545 // 2546 // check_1 2547 // 50% / | 2548 // eq_1 | 50% 2549 // \ | 2550 // check_2 2551 // 50% / | 2552 // eq_2 | 50% 2553 // \ | 2554 // check_3 2555 // 50% / | 2556 // eq_3 | 50% 2557 // \ | 2558 // 2559 // Assuming the blocks check_* all compare the same value against 1, 2 and 3, 2560 // the overall probabilities are inconsistent; the total probability that the 2561 // value is either 1, 2 or 3 is 150%. 2562 // 2563 // As a consequence if we thread eq_1 -> check_2 to check_3, check_2->check_3 2564 // becomes 0%. This is even worse if the edge whose probability becomes 0% is 2565 // the loop exit edge. Then based solely on static estimation we would assume 2566 // the loop was extremely hot. 2567 // 2568 // FIXME this locally as well so that BPI and BFI are consistent as well. We 2569 // shouldn't make edges extremely likely or unlikely based solely on static 2570 // estimation. 2571 if (BBSuccProbs.size() >= 2 && HasProfile) { 2572 SmallVector<uint32_t, 4> Weights; 2573 for (auto Prob : BBSuccProbs) 2574 Weights.push_back(Prob.getNumerator()); 2575 2576 auto TI = BB->getTerminator(); 2577 TI->setMetadata( 2578 LLVMContext::MD_prof, 2579 MDBuilder(TI->getParent()->getContext()).createBranchWeights(Weights)); 2580 } 2581 } 2582 2583 /// duplicateCondBranchOnPHIIntoPred - PredBB contains an unconditional branch 2584 /// to BB which contains an i1 PHI node and a conditional branch on that PHI. 2585 /// If we can duplicate the contents of BB up into PredBB do so now, this 2586 /// improves the odds that the branch will be on an analyzable instruction like 2587 /// a compare. 2588 bool JumpThreadingPass::duplicateCondBranchOnPHIIntoPred( 2589 BasicBlock *BB, const SmallVectorImpl<BasicBlock *> &PredBBs) { 2590 assert(!PredBBs.empty() && "Can't handle an empty set"); 2591 2592 // If BB is a loop header, then duplicating this block outside the loop would 2593 // cause us to transform this into an irreducible loop, don't do this. 2594 // See the comments above findLoopHeaders for justifications and caveats. 2595 if (LoopHeaders.count(BB)) { 2596 LLVM_DEBUG(dbgs() << " Not duplicating loop header '" << BB->getName() 2597 << "' into predecessor block '" << PredBBs[0]->getName() 2598 << "' - it might create an irreducible loop!\n"); 2599 return false; 2600 } 2601 2602 unsigned DuplicationCost = getJumpThreadDuplicationCost( 2603 TTI, BB, BB->getTerminator(), BBDupThreshold); 2604 if (DuplicationCost > BBDupThreshold) { 2605 LLVM_DEBUG(dbgs() << " Not duplicating BB '" << BB->getName() 2606 << "' - Cost is too high: " << DuplicationCost << "\n"); 2607 return false; 2608 } 2609 2610 // And finally, do it! Start by factoring the predecessors if needed. 2611 std::vector<DominatorTree::UpdateType> Updates; 2612 BasicBlock *PredBB; 2613 if (PredBBs.size() == 1) 2614 PredBB = PredBBs[0]; 2615 else { 2616 LLVM_DEBUG(dbgs() << " Factoring out " << PredBBs.size() 2617 << " common predecessors.\n"); 2618 PredBB = splitBlockPreds(BB, PredBBs, ".thr_comm"); 2619 } 2620 Updates.push_back({DominatorTree::Delete, PredBB, BB}); 2621 2622 // Okay, we decided to do this! Clone all the instructions in BB onto the end 2623 // of PredBB. 2624 LLVM_DEBUG(dbgs() << " Duplicating block '" << BB->getName() 2625 << "' into end of '" << PredBB->getName() 2626 << "' to eliminate branch on phi. Cost: " 2627 << DuplicationCost << " block is:" << *BB << "\n"); 2628 2629 // Unless PredBB ends with an unconditional branch, split the edge so that we 2630 // can just clone the bits from BB into the end of the new PredBB. 2631 BranchInst *OldPredBranch = dyn_cast<BranchInst>(PredBB->getTerminator()); 2632 2633 if (!OldPredBranch || !OldPredBranch->isUnconditional()) { 2634 BasicBlock *OldPredBB = PredBB; 2635 PredBB = SplitEdge(OldPredBB, BB); 2636 Updates.push_back({DominatorTree::Insert, OldPredBB, PredBB}); 2637 Updates.push_back({DominatorTree::Insert, PredBB, BB}); 2638 Updates.push_back({DominatorTree::Delete, OldPredBB, BB}); 2639 OldPredBranch = cast<BranchInst>(PredBB->getTerminator()); 2640 } 2641 2642 // We are going to have to map operands from the original BB block into the 2643 // PredBB block. Evaluate PHI nodes in BB. 2644 DenseMap<Instruction*, Value*> ValueMapping; 2645 2646 BasicBlock::iterator BI = BB->begin(); 2647 for (; PHINode *PN = dyn_cast<PHINode>(BI); ++BI) 2648 ValueMapping[PN] = PN->getIncomingValueForBlock(PredBB); 2649 // Clone the non-phi instructions of BB into PredBB, keeping track of the 2650 // mapping and using it to remap operands in the cloned instructions. 2651 for (; BI != BB->end(); ++BI) { 2652 Instruction *New = BI->clone(); 2653 New->insertInto(PredBB, OldPredBranch->getIterator()); 2654 2655 // Remap operands to patch up intra-block references. 2656 for (unsigned i = 0, e = New->getNumOperands(); i != e; ++i) 2657 if (Instruction *Inst = dyn_cast<Instruction>(New->getOperand(i))) { 2658 DenseMap<Instruction*, Value*>::iterator I = ValueMapping.find(Inst); 2659 if (I != ValueMapping.end()) 2660 New->setOperand(i, I->second); 2661 } 2662 2663 // If this instruction can be simplified after the operands are updated, 2664 // just use the simplified value instead. This frequently happens due to 2665 // phi translation. 2666 if (Value *IV = simplifyInstruction( 2667 New, 2668 {BB->getModule()->getDataLayout(), TLI, nullptr, nullptr, New})) { 2669 ValueMapping[&*BI] = IV; 2670 if (!New->mayHaveSideEffects()) { 2671 New->eraseFromParent(); 2672 New = nullptr; 2673 } 2674 } else { 2675 ValueMapping[&*BI] = New; 2676 } 2677 if (New) { 2678 // Otherwise, insert the new instruction into the block. 2679 New->setName(BI->getName()); 2680 // Update Dominance from simplified New instruction operands. 2681 for (unsigned i = 0, e = New->getNumOperands(); i != e; ++i) 2682 if (BasicBlock *SuccBB = dyn_cast<BasicBlock>(New->getOperand(i))) 2683 Updates.push_back({DominatorTree::Insert, PredBB, SuccBB}); 2684 } 2685 } 2686 2687 // Check to see if the targets of the branch had PHI nodes. If so, we need to 2688 // add entries to the PHI nodes for branch from PredBB now. 2689 BranchInst *BBBranch = cast<BranchInst>(BB->getTerminator()); 2690 addPHINodeEntriesForMappedBlock(BBBranch->getSuccessor(0), BB, PredBB, 2691 ValueMapping); 2692 addPHINodeEntriesForMappedBlock(BBBranch->getSuccessor(1), BB, PredBB, 2693 ValueMapping); 2694 2695 updateSSA(BB, PredBB, ValueMapping); 2696 2697 // PredBB no longer jumps to BB, remove entries in the PHI node for the edge 2698 // that we nuked. 2699 BB->removePredecessor(PredBB, true); 2700 2701 // Remove the unconditional branch at the end of the PredBB block. 2702 OldPredBranch->eraseFromParent(); 2703 if (auto *BPI = getBPI()) 2704 BPI->copyEdgeProbabilities(BB, PredBB); 2705 DTU->applyUpdatesPermissive(Updates); 2706 2707 ++NumDupes; 2708 return true; 2709 } 2710 2711 // Pred is a predecessor of BB with an unconditional branch to BB. SI is 2712 // a Select instruction in Pred. BB has other predecessors and SI is used in 2713 // a PHI node in BB. SI has no other use. 2714 // A new basic block, NewBB, is created and SI is converted to compare and 2715 // conditional branch. SI is erased from parent. 2716 void JumpThreadingPass::unfoldSelectInstr(BasicBlock *Pred, BasicBlock *BB, 2717 SelectInst *SI, PHINode *SIUse, 2718 unsigned Idx) { 2719 // Expand the select. 2720 // 2721 // Pred -- 2722 // | v 2723 // | NewBB 2724 // | | 2725 // |----- 2726 // v 2727 // BB 2728 BranchInst *PredTerm = cast<BranchInst>(Pred->getTerminator()); 2729 BasicBlock *NewBB = BasicBlock::Create(BB->getContext(), "select.unfold", 2730 BB->getParent(), BB); 2731 // Move the unconditional branch to NewBB. 2732 PredTerm->removeFromParent(); 2733 PredTerm->insertInto(NewBB, NewBB->end()); 2734 // Create a conditional branch and update PHI nodes. 2735 auto *BI = BranchInst::Create(NewBB, BB, SI->getCondition(), Pred); 2736 BI->applyMergedLocation(PredTerm->getDebugLoc(), SI->getDebugLoc()); 2737 BI->copyMetadata(*SI, {LLVMContext::MD_prof}); 2738 SIUse->setIncomingValue(Idx, SI->getFalseValue()); 2739 SIUse->addIncoming(SI->getTrueValue(), NewBB); 2740 2741 uint64_t TrueWeight = 1; 2742 uint64_t FalseWeight = 1; 2743 // Copy probabilities from 'SI' to created conditional branch in 'Pred'. 2744 if (extractBranchWeights(*SI, TrueWeight, FalseWeight) && 2745 (TrueWeight + FalseWeight) != 0) { 2746 SmallVector<BranchProbability, 2> BP; 2747 BP.emplace_back(BranchProbability::getBranchProbability( 2748 TrueWeight, TrueWeight + FalseWeight)); 2749 BP.emplace_back(BranchProbability::getBranchProbability( 2750 FalseWeight, TrueWeight + FalseWeight)); 2751 // Update BPI if exists. 2752 if (auto *BPI = getBPI()) 2753 BPI->setEdgeProbability(Pred, BP); 2754 } 2755 // Set the block frequency of NewBB. 2756 if (auto *BFI = getBFI()) { 2757 if ((TrueWeight + FalseWeight) == 0) { 2758 TrueWeight = 1; 2759 FalseWeight = 1; 2760 } 2761 BranchProbability PredToNewBBProb = BranchProbability::getBranchProbability( 2762 TrueWeight, TrueWeight + FalseWeight); 2763 auto NewBBFreq = BFI->getBlockFreq(Pred) * PredToNewBBProb; 2764 BFI->setBlockFreq(NewBB, NewBBFreq); 2765 } 2766 2767 // The select is now dead. 2768 SI->eraseFromParent(); 2769 DTU->applyUpdatesPermissive({{DominatorTree::Insert, NewBB, BB}, 2770 {DominatorTree::Insert, Pred, NewBB}}); 2771 2772 // Update any other PHI nodes in BB. 2773 for (BasicBlock::iterator BI = BB->begin(); 2774 PHINode *Phi = dyn_cast<PHINode>(BI); ++BI) 2775 if (Phi != SIUse) 2776 Phi->addIncoming(Phi->getIncomingValueForBlock(Pred), NewBB); 2777 } 2778 2779 bool JumpThreadingPass::tryToUnfoldSelect(SwitchInst *SI, BasicBlock *BB) { 2780 PHINode *CondPHI = dyn_cast<PHINode>(SI->getCondition()); 2781 2782 if (!CondPHI || CondPHI->getParent() != BB) 2783 return false; 2784 2785 for (unsigned I = 0, E = CondPHI->getNumIncomingValues(); I != E; ++I) { 2786 BasicBlock *Pred = CondPHI->getIncomingBlock(I); 2787 SelectInst *PredSI = dyn_cast<SelectInst>(CondPHI->getIncomingValue(I)); 2788 2789 // The second and third condition can be potentially relaxed. Currently 2790 // the conditions help to simplify the code and allow us to reuse existing 2791 // code, developed for tryToUnfoldSelect(CmpInst *, BasicBlock *) 2792 if (!PredSI || PredSI->getParent() != Pred || !PredSI->hasOneUse()) 2793 continue; 2794 2795 BranchInst *PredTerm = dyn_cast<BranchInst>(Pred->getTerminator()); 2796 if (!PredTerm || !PredTerm->isUnconditional()) 2797 continue; 2798 2799 unfoldSelectInstr(Pred, BB, PredSI, CondPHI, I); 2800 return true; 2801 } 2802 return false; 2803 } 2804 2805 /// tryToUnfoldSelect - Look for blocks of the form 2806 /// bb1: 2807 /// %a = select 2808 /// br bb2 2809 /// 2810 /// bb2: 2811 /// %p = phi [%a, %bb1] ... 2812 /// %c = icmp %p 2813 /// br i1 %c 2814 /// 2815 /// And expand the select into a branch structure if one of its arms allows %c 2816 /// to be folded. This later enables threading from bb1 over bb2. 2817 bool JumpThreadingPass::tryToUnfoldSelect(CmpInst *CondCmp, BasicBlock *BB) { 2818 BranchInst *CondBr = dyn_cast<BranchInst>(BB->getTerminator()); 2819 PHINode *CondLHS = dyn_cast<PHINode>(CondCmp->getOperand(0)); 2820 Constant *CondRHS = cast<Constant>(CondCmp->getOperand(1)); 2821 2822 if (!CondBr || !CondBr->isConditional() || !CondLHS || 2823 CondLHS->getParent() != BB) 2824 return false; 2825 2826 for (unsigned I = 0, E = CondLHS->getNumIncomingValues(); I != E; ++I) { 2827 BasicBlock *Pred = CondLHS->getIncomingBlock(I); 2828 SelectInst *SI = dyn_cast<SelectInst>(CondLHS->getIncomingValue(I)); 2829 2830 // Look if one of the incoming values is a select in the corresponding 2831 // predecessor. 2832 if (!SI || SI->getParent() != Pred || !SI->hasOneUse()) 2833 continue; 2834 2835 BranchInst *PredTerm = dyn_cast<BranchInst>(Pred->getTerminator()); 2836 if (!PredTerm || !PredTerm->isUnconditional()) 2837 continue; 2838 2839 // Now check if one of the select values would allow us to constant fold the 2840 // terminator in BB. We don't do the transform if both sides fold, those 2841 // cases will be threaded in any case. 2842 LazyValueInfo::Tristate LHSFolds = 2843 LVI->getPredicateOnEdge(CondCmp->getPredicate(), SI->getOperand(1), 2844 CondRHS, Pred, BB, CondCmp); 2845 LazyValueInfo::Tristate RHSFolds = 2846 LVI->getPredicateOnEdge(CondCmp->getPredicate(), SI->getOperand(2), 2847 CondRHS, Pred, BB, CondCmp); 2848 if ((LHSFolds != LazyValueInfo::Unknown || 2849 RHSFolds != LazyValueInfo::Unknown) && 2850 LHSFolds != RHSFolds) { 2851 unfoldSelectInstr(Pred, BB, SI, CondLHS, I); 2852 return true; 2853 } 2854 } 2855 return false; 2856 } 2857 2858 /// tryToUnfoldSelectInCurrBB - Look for PHI/Select or PHI/CMP/Select in the 2859 /// same BB in the form 2860 /// bb: 2861 /// %p = phi [false, %bb1], [true, %bb2], [false, %bb3], [true, %bb4], ... 2862 /// %s = select %p, trueval, falseval 2863 /// 2864 /// or 2865 /// 2866 /// bb: 2867 /// %p = phi [0, %bb1], [1, %bb2], [0, %bb3], [1, %bb4], ... 2868 /// %c = cmp %p, 0 2869 /// %s = select %c, trueval, falseval 2870 /// 2871 /// And expand the select into a branch structure. This later enables 2872 /// jump-threading over bb in this pass. 2873 /// 2874 /// Using the similar approach of SimplifyCFG::FoldCondBranchOnPHI(), unfold 2875 /// select if the associated PHI has at least one constant. If the unfolded 2876 /// select is not jump-threaded, it will be folded again in the later 2877 /// optimizations. 2878 bool JumpThreadingPass::tryToUnfoldSelectInCurrBB(BasicBlock *BB) { 2879 // This transform would reduce the quality of msan diagnostics. 2880 // Disable this transform under MemorySanitizer. 2881 if (BB->getParent()->hasFnAttribute(Attribute::SanitizeMemory)) 2882 return false; 2883 2884 // If threading this would thread across a loop header, don't thread the edge. 2885 // See the comments above findLoopHeaders for justifications and caveats. 2886 if (LoopHeaders.count(BB)) 2887 return false; 2888 2889 for (BasicBlock::iterator BI = BB->begin(); 2890 PHINode *PN = dyn_cast<PHINode>(BI); ++BI) { 2891 // Look for a Phi having at least one constant incoming value. 2892 if (llvm::all_of(PN->incoming_values(), 2893 [](Value *V) { return !isa<ConstantInt>(V); })) 2894 continue; 2895 2896 auto isUnfoldCandidate = [BB](SelectInst *SI, Value *V) { 2897 using namespace PatternMatch; 2898 2899 // Check if SI is in BB and use V as condition. 2900 if (SI->getParent() != BB) 2901 return false; 2902 Value *Cond = SI->getCondition(); 2903 bool IsAndOr = match(SI, m_CombineOr(m_LogicalAnd(), m_LogicalOr())); 2904 return Cond && Cond == V && Cond->getType()->isIntegerTy(1) && !IsAndOr; 2905 }; 2906 2907 SelectInst *SI = nullptr; 2908 for (Use &U : PN->uses()) { 2909 if (ICmpInst *Cmp = dyn_cast<ICmpInst>(U.getUser())) { 2910 // Look for a ICmp in BB that compares PN with a constant and is the 2911 // condition of a Select. 2912 if (Cmp->getParent() == BB && Cmp->hasOneUse() && 2913 isa<ConstantInt>(Cmp->getOperand(1 - U.getOperandNo()))) 2914 if (SelectInst *SelectI = dyn_cast<SelectInst>(Cmp->user_back())) 2915 if (isUnfoldCandidate(SelectI, Cmp->use_begin()->get())) { 2916 SI = SelectI; 2917 break; 2918 } 2919 } else if (SelectInst *SelectI = dyn_cast<SelectInst>(U.getUser())) { 2920 // Look for a Select in BB that uses PN as condition. 2921 if (isUnfoldCandidate(SelectI, U.get())) { 2922 SI = SelectI; 2923 break; 2924 } 2925 } 2926 } 2927 2928 if (!SI) 2929 continue; 2930 // Expand the select. 2931 Value *Cond = SI->getCondition(); 2932 if (!isGuaranteedNotToBeUndefOrPoison(Cond, nullptr, SI)) 2933 Cond = new FreezeInst(Cond, "cond.fr", SI); 2934 MDNode *BranchWeights = getBranchWeightMDNode(*SI); 2935 Instruction *Term = 2936 SplitBlockAndInsertIfThen(Cond, SI, false, BranchWeights); 2937 BasicBlock *SplitBB = SI->getParent(); 2938 BasicBlock *NewBB = Term->getParent(); 2939 PHINode *NewPN = PHINode::Create(SI->getType(), 2, "", SI); 2940 NewPN->addIncoming(SI->getTrueValue(), Term->getParent()); 2941 NewPN->addIncoming(SI->getFalseValue(), BB); 2942 SI->replaceAllUsesWith(NewPN); 2943 SI->eraseFromParent(); 2944 // NewBB and SplitBB are newly created blocks which require insertion. 2945 std::vector<DominatorTree::UpdateType> Updates; 2946 Updates.reserve((2 * SplitBB->getTerminator()->getNumSuccessors()) + 3); 2947 Updates.push_back({DominatorTree::Insert, BB, SplitBB}); 2948 Updates.push_back({DominatorTree::Insert, BB, NewBB}); 2949 Updates.push_back({DominatorTree::Insert, NewBB, SplitBB}); 2950 // BB's successors were moved to SplitBB, update DTU accordingly. 2951 for (auto *Succ : successors(SplitBB)) { 2952 Updates.push_back({DominatorTree::Delete, BB, Succ}); 2953 Updates.push_back({DominatorTree::Insert, SplitBB, Succ}); 2954 } 2955 DTU->applyUpdatesPermissive(Updates); 2956 return true; 2957 } 2958 return false; 2959 } 2960 2961 /// Try to propagate a guard from the current BB into one of its predecessors 2962 /// in case if another branch of execution implies that the condition of this 2963 /// guard is always true. Currently we only process the simplest case that 2964 /// looks like: 2965 /// 2966 /// Start: 2967 /// %cond = ... 2968 /// br i1 %cond, label %T1, label %F1 2969 /// T1: 2970 /// br label %Merge 2971 /// F1: 2972 /// br label %Merge 2973 /// Merge: 2974 /// %condGuard = ... 2975 /// call void(i1, ...) @llvm.experimental.guard( i1 %condGuard )[ "deopt"() ] 2976 /// 2977 /// And cond either implies condGuard or !condGuard. In this case all the 2978 /// instructions before the guard can be duplicated in both branches, and the 2979 /// guard is then threaded to one of them. 2980 bool JumpThreadingPass::processGuards(BasicBlock *BB) { 2981 using namespace PatternMatch; 2982 2983 // We only want to deal with two predecessors. 2984 BasicBlock *Pred1, *Pred2; 2985 auto PI = pred_begin(BB), PE = pred_end(BB); 2986 if (PI == PE) 2987 return false; 2988 Pred1 = *PI++; 2989 if (PI == PE) 2990 return false; 2991 Pred2 = *PI++; 2992 if (PI != PE) 2993 return false; 2994 if (Pred1 == Pred2) 2995 return false; 2996 2997 // Try to thread one of the guards of the block. 2998 // TODO: Look up deeper than to immediate predecessor? 2999 auto *Parent = Pred1->getSinglePredecessor(); 3000 if (!Parent || Parent != Pred2->getSinglePredecessor()) 3001 return false; 3002 3003 if (auto *BI = dyn_cast<BranchInst>(Parent->getTerminator())) 3004 for (auto &I : *BB) 3005 if (isGuard(&I) && threadGuard(BB, cast<IntrinsicInst>(&I), BI)) 3006 return true; 3007 3008 return false; 3009 } 3010 3011 /// Try to propagate the guard from BB which is the lower block of a diamond 3012 /// to one of its branches, in case if diamond's condition implies guard's 3013 /// condition. 3014 bool JumpThreadingPass::threadGuard(BasicBlock *BB, IntrinsicInst *Guard, 3015 BranchInst *BI) { 3016 assert(BI->getNumSuccessors() == 2 && "Wrong number of successors?"); 3017 assert(BI->isConditional() && "Unconditional branch has 2 successors?"); 3018 Value *GuardCond = Guard->getArgOperand(0); 3019 Value *BranchCond = BI->getCondition(); 3020 BasicBlock *TrueDest = BI->getSuccessor(0); 3021 BasicBlock *FalseDest = BI->getSuccessor(1); 3022 3023 auto &DL = BB->getModule()->getDataLayout(); 3024 bool TrueDestIsSafe = false; 3025 bool FalseDestIsSafe = false; 3026 3027 // True dest is safe if BranchCond => GuardCond. 3028 auto Impl = isImpliedCondition(BranchCond, GuardCond, DL); 3029 if (Impl && *Impl) 3030 TrueDestIsSafe = true; 3031 else { 3032 // False dest is safe if !BranchCond => GuardCond. 3033 Impl = isImpliedCondition(BranchCond, GuardCond, DL, /* LHSIsTrue */ false); 3034 if (Impl && *Impl) 3035 FalseDestIsSafe = true; 3036 } 3037 3038 if (!TrueDestIsSafe && !FalseDestIsSafe) 3039 return false; 3040 3041 BasicBlock *PredUnguardedBlock = TrueDestIsSafe ? TrueDest : FalseDest; 3042 BasicBlock *PredGuardedBlock = FalseDestIsSafe ? TrueDest : FalseDest; 3043 3044 ValueToValueMapTy UnguardedMapping, GuardedMapping; 3045 Instruction *AfterGuard = Guard->getNextNode(); 3046 unsigned Cost = 3047 getJumpThreadDuplicationCost(TTI, BB, AfterGuard, BBDupThreshold); 3048 if (Cost > BBDupThreshold) 3049 return false; 3050 // Duplicate all instructions before the guard and the guard itself to the 3051 // branch where implication is not proved. 3052 BasicBlock *GuardedBlock = DuplicateInstructionsInSplitBetween( 3053 BB, PredGuardedBlock, AfterGuard, GuardedMapping, *DTU); 3054 assert(GuardedBlock && "Could not create the guarded block?"); 3055 // Duplicate all instructions before the guard in the unguarded branch. 3056 // Since we have successfully duplicated the guarded block and this block 3057 // has fewer instructions, we expect it to succeed. 3058 BasicBlock *UnguardedBlock = DuplicateInstructionsInSplitBetween( 3059 BB, PredUnguardedBlock, Guard, UnguardedMapping, *DTU); 3060 assert(UnguardedBlock && "Could not create the unguarded block?"); 3061 LLVM_DEBUG(dbgs() << "Moved guard " << *Guard << " to block " 3062 << GuardedBlock->getName() << "\n"); 3063 // Some instructions before the guard may still have uses. For them, we need 3064 // to create Phi nodes merging their copies in both guarded and unguarded 3065 // branches. Those instructions that have no uses can be just removed. 3066 SmallVector<Instruction *, 4> ToRemove; 3067 for (auto BI = BB->begin(); &*BI != AfterGuard; ++BI) 3068 if (!isa<PHINode>(&*BI)) 3069 ToRemove.push_back(&*BI); 3070 3071 Instruction *InsertionPoint = &*BB->getFirstInsertionPt(); 3072 assert(InsertionPoint && "Empty block?"); 3073 // Substitute with Phis & remove. 3074 for (auto *Inst : reverse(ToRemove)) { 3075 if (!Inst->use_empty()) { 3076 PHINode *NewPN = PHINode::Create(Inst->getType(), 2); 3077 NewPN->addIncoming(UnguardedMapping[Inst], UnguardedBlock); 3078 NewPN->addIncoming(GuardedMapping[Inst], GuardedBlock); 3079 NewPN->insertBefore(InsertionPoint); 3080 Inst->replaceAllUsesWith(NewPN); 3081 } 3082 Inst->eraseFromParent(); 3083 } 3084 return true; 3085 } 3086 3087 PreservedAnalyses JumpThreadingPass::getPreservedAnalysis() const { 3088 PreservedAnalyses PA; 3089 PA.preserve<LazyValueAnalysis>(); 3090 PA.preserve<DominatorTreeAnalysis>(); 3091 3092 // TODO: We would like to preserve BPI/BFI. Enable once all paths update them. 3093 // TODO: Would be nice to verify BPI/BFI consistency as well. 3094 return PA; 3095 } 3096 3097 template <typename AnalysisT> 3098 typename AnalysisT::Result *JumpThreadingPass::runExternalAnalysis() { 3099 assert(FAM && "Can't run external analysis without FunctionAnalysisManager"); 3100 3101 // If there were no changes since last call to 'runExternalAnalysis' then all 3102 // analysis is either up to date or explicitly invalidated. Just go ahead and 3103 // run the "external" analysis. 3104 if (!ChangedSinceLastAnalysisUpdate) { 3105 assert(!DTU->hasPendingUpdates() && 3106 "Lost update of 'ChangedSinceLastAnalysisUpdate'?"); 3107 // Run the "external" analysis. 3108 return &FAM->getResult<AnalysisT>(*F); 3109 } 3110 ChangedSinceLastAnalysisUpdate = false; 3111 3112 auto PA = getPreservedAnalysis(); 3113 // TODO: This shouldn't be needed once 'getPreservedAnalysis' reports BPI/BFI 3114 // as preserved. 3115 PA.preserve<BranchProbabilityAnalysis>(); 3116 PA.preserve<BlockFrequencyAnalysis>(); 3117 // Report everything except explicitly preserved as invalid. 3118 FAM->invalidate(*F, PA); 3119 // Update DT/PDT. 3120 DTU->flush(); 3121 // Make sure DT/PDT are valid before running "external" analysis. 3122 assert(DTU->getDomTree().verify(DominatorTree::VerificationLevel::Fast)); 3123 assert((!DTU->hasPostDomTree() || 3124 DTU->getPostDomTree().verify( 3125 PostDominatorTree::VerificationLevel::Fast))); 3126 // Run the "external" analysis. 3127 auto *Result = &FAM->getResult<AnalysisT>(*F); 3128 // Update analysis JumpThreading depends on and not explicitly preserved. 3129 TTI = &FAM->getResult<TargetIRAnalysis>(*F); 3130 TLI = &FAM->getResult<TargetLibraryAnalysis>(*F); 3131 AA = &FAM->getResult<AAManager>(*F); 3132 3133 return Result; 3134 } 3135 3136 BranchProbabilityInfo *JumpThreadingPass::getBPI() { 3137 if (!BPI) { 3138 assert(FAM && "Can't create BPI without FunctionAnalysisManager"); 3139 BPI = FAM->getCachedResult<BranchProbabilityAnalysis>(*F); 3140 } 3141 return *BPI; 3142 } 3143 3144 BlockFrequencyInfo *JumpThreadingPass::getBFI() { 3145 if (!BFI) { 3146 assert(FAM && "Can't create BFI without FunctionAnalysisManager"); 3147 BFI = FAM->getCachedResult<BlockFrequencyAnalysis>(*F); 3148 } 3149 return *BFI; 3150 } 3151 3152 // Important note on validity of BPI/BFI. JumpThreading tries to preserve 3153 // BPI/BFI as it goes. Thus if cached instance exists it will be updated. 3154 // Otherwise, new instance of BPI/BFI is created (up to date by definition). 3155 BranchProbabilityInfo *JumpThreadingPass::getOrCreateBPI(bool Force) { 3156 auto *Res = getBPI(); 3157 if (Res) 3158 return Res; 3159 3160 if (Force) 3161 BPI = runExternalAnalysis<BranchProbabilityAnalysis>(); 3162 3163 return *BPI; 3164 } 3165 3166 BlockFrequencyInfo *JumpThreadingPass::getOrCreateBFI(bool Force) { 3167 auto *Res = getBFI(); 3168 if (Res) 3169 return Res; 3170 3171 if (Force) 3172 BFI = runExternalAnalysis<BlockFrequencyAnalysis>(); 3173 3174 return *BFI; 3175 } 3176