1 //===- JumpThreading.cpp - Thread control through conditional blocks ------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the Jump Threading pass.
10 //
11 //===----------------------------------------------------------------------===//
12
13 #include "llvm/Transforms/Scalar/JumpThreading.h"
14 #include "llvm/ADT/DenseMap.h"
15 #include "llvm/ADT/DenseSet.h"
16 #include "llvm/ADT/MapVector.h"
17 #include "llvm/ADT/STLExtras.h"
18 #include "llvm/ADT/SmallPtrSet.h"
19 #include "llvm/ADT/SmallVector.h"
20 #include "llvm/ADT/Statistic.h"
21 #include "llvm/Analysis/AliasAnalysis.h"
22 #include "llvm/Analysis/BlockFrequencyInfo.h"
23 #include "llvm/Analysis/BranchProbabilityInfo.h"
24 #include "llvm/Analysis/CFG.h"
25 #include "llvm/Analysis/ConstantFolding.h"
26 #include "llvm/Analysis/DomTreeUpdater.h"
27 #include "llvm/Analysis/GlobalsModRef.h"
28 #include "llvm/Analysis/GuardUtils.h"
29 #include "llvm/Analysis/InstructionSimplify.h"
30 #include "llvm/Analysis/LazyValueInfo.h"
31 #include "llvm/Analysis/Loads.h"
32 #include "llvm/Analysis/LoopInfo.h"
33 #include "llvm/Analysis/MemoryLocation.h"
34 #include "llvm/Analysis/TargetLibraryInfo.h"
35 #include "llvm/Analysis/TargetTransformInfo.h"
36 #include "llvm/Analysis/ValueTracking.h"
37 #include "llvm/IR/BasicBlock.h"
38 #include "llvm/IR/CFG.h"
39 #include "llvm/IR/Constant.h"
40 #include "llvm/IR/ConstantRange.h"
41 #include "llvm/IR/Constants.h"
42 #include "llvm/IR/DataLayout.h"
43 #include "llvm/IR/Dominators.h"
44 #include "llvm/IR/Function.h"
45 #include "llvm/IR/InstrTypes.h"
46 #include "llvm/IR/Instruction.h"
47 #include "llvm/IR/Instructions.h"
48 #include "llvm/IR/IntrinsicInst.h"
49 #include "llvm/IR/Intrinsics.h"
50 #include "llvm/IR/LLVMContext.h"
51 #include "llvm/IR/MDBuilder.h"
52 #include "llvm/IR/Metadata.h"
53 #include "llvm/IR/Module.h"
54 #include "llvm/IR/PassManager.h"
55 #include "llvm/IR/PatternMatch.h"
56 #include "llvm/IR/ProfDataUtils.h"
57 #include "llvm/IR/Type.h"
58 #include "llvm/IR/Use.h"
59 #include "llvm/IR/Value.h"
60 #include "llvm/InitializePasses.h"
61 #include "llvm/Pass.h"
62 #include "llvm/Support/BlockFrequency.h"
63 #include "llvm/Support/BranchProbability.h"
64 #include "llvm/Support/Casting.h"
65 #include "llvm/Support/CommandLine.h"
66 #include "llvm/Support/Debug.h"
67 #include "llvm/Support/raw_ostream.h"
68 #include "llvm/Transforms/Scalar.h"
69 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
70 #include "llvm/Transforms/Utils/Cloning.h"
71 #include "llvm/Transforms/Utils/Local.h"
72 #include "llvm/Transforms/Utils/SSAUpdater.h"
73 #include "llvm/Transforms/Utils/ValueMapper.h"
74 #include <algorithm>
75 #include <cassert>
76 #include <cstdint>
77 #include <iterator>
78 #include <memory>
79 #include <utility>
80
81 using namespace llvm;
82 using namespace jumpthreading;
83
84 #define DEBUG_TYPE "jump-threading"
85
86 STATISTIC(NumThreads, "Number of jumps threaded");
87 STATISTIC(NumFolds, "Number of terminators folded");
88 STATISTIC(NumDupes, "Number of branch blocks duplicated to eliminate phi");
89
90 static cl::opt<unsigned>
91 BBDuplicateThreshold("jump-threading-threshold",
92 cl::desc("Max block size to duplicate for jump threading"),
93 cl::init(6), cl::Hidden);
94
95 static cl::opt<unsigned>
96 ImplicationSearchThreshold(
97 "jump-threading-implication-search-threshold",
98 cl::desc("The number of predecessors to search for a stronger "
99 "condition to use to thread over a weaker condition"),
100 cl::init(3), cl::Hidden);
101
102 static cl::opt<unsigned> PhiDuplicateThreshold(
103 "jump-threading-phi-threshold",
104 cl::desc("Max PHIs in BB to duplicate for jump threading"), cl::init(76),
105 cl::Hidden);
106
107 static cl::opt<bool> PrintLVIAfterJumpThreading(
108 "print-lvi-after-jump-threading",
109 cl::desc("Print the LazyValueInfo cache after JumpThreading"), cl::init(false),
110 cl::Hidden);
111
112 static cl::opt<bool> ThreadAcrossLoopHeaders(
113 "jump-threading-across-loop-headers",
114 cl::desc("Allow JumpThreading to thread across loop headers, for testing"),
115 cl::init(false), cl::Hidden);
116
117
118 namespace {
119
120 /// This pass performs 'jump threading', which looks at blocks that have
121 /// multiple predecessors and multiple successors. If one or more of the
122 /// predecessors of the block can be proven to always jump to one of the
123 /// successors, we forward the edge from the predecessor to the successor by
124 /// duplicating the contents of this block.
125 ///
126 /// An example of when this can occur is code like this:
127 ///
128 /// if () { ...
129 /// X = 4;
130 /// }
131 /// if (X < 3) {
132 ///
133 /// In this case, the unconditional branch at the end of the first if can be
134 /// revectored to the false side of the second if.
135 class JumpThreading : public FunctionPass {
136 JumpThreadingPass Impl;
137
138 public:
139 static char ID; // Pass identification
140
JumpThreading(int T=-1)141 JumpThreading(int T = -1) : FunctionPass(ID), Impl(T) {
142 initializeJumpThreadingPass(*PassRegistry::getPassRegistry());
143 }
144
145 bool runOnFunction(Function &F) override;
146
getAnalysisUsage(AnalysisUsage & AU) const147 void getAnalysisUsage(AnalysisUsage &AU) const override {
148 AU.addRequired<DominatorTreeWrapperPass>();
149 AU.addPreserved<DominatorTreeWrapperPass>();
150 AU.addRequired<AAResultsWrapperPass>();
151 AU.addRequired<LazyValueInfoWrapperPass>();
152 AU.addPreserved<LazyValueInfoWrapperPass>();
153 AU.addPreserved<GlobalsAAWrapperPass>();
154 AU.addRequired<TargetLibraryInfoWrapperPass>();
155 AU.addRequired<TargetTransformInfoWrapperPass>();
156 }
157
releaseMemory()158 void releaseMemory() override { Impl.releaseMemory(); }
159 };
160
161 } // end anonymous namespace
162
163 char JumpThreading::ID = 0;
164
165 INITIALIZE_PASS_BEGIN(JumpThreading, "jump-threading",
166 "Jump Threading", false, false)
INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)167 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
168 INITIALIZE_PASS_DEPENDENCY(LazyValueInfoWrapperPass)
169 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
170 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
171 INITIALIZE_PASS_END(JumpThreading, "jump-threading",
172 "Jump Threading", false, false)
173
174 // Public interface to the Jump Threading pass
175 FunctionPass *llvm::createJumpThreadingPass(int Threshold) {
176 return new JumpThreading(Threshold);
177 }
178
JumpThreadingPass(int T)179 JumpThreadingPass::JumpThreadingPass(int T) {
180 DefaultBBDupThreshold = (T == -1) ? BBDuplicateThreshold : unsigned(T);
181 }
182
183 // Update branch probability information according to conditional
184 // branch probability. This is usually made possible for cloned branches
185 // in inline instances by the context specific profile in the caller.
186 // For instance,
187 //
188 // [Block PredBB]
189 // [Branch PredBr]
190 // if (t) {
191 // Block A;
192 // } else {
193 // Block B;
194 // }
195 //
196 // [Block BB]
197 // cond = PN([true, %A], [..., %B]); // PHI node
198 // [Branch CondBr]
199 // if (cond) {
200 // ... // P(cond == true) = 1%
201 // }
202 //
203 // Here we know that when block A is taken, cond must be true, which means
204 // P(cond == true | A) = 1
205 //
206 // Given that P(cond == true) = P(cond == true | A) * P(A) +
207 // P(cond == true | B) * P(B)
208 // we get:
209 // P(cond == true ) = P(A) + P(cond == true | B) * P(B)
210 //
211 // which gives us:
212 // P(A) is less than P(cond == true), i.e.
213 // P(t == true) <= P(cond == true)
214 //
215 // In other words, if we know P(cond == true) is unlikely, we know
216 // that P(t == true) is also unlikely.
217 //
updatePredecessorProfileMetadata(PHINode * PN,BasicBlock * BB)218 static void updatePredecessorProfileMetadata(PHINode *PN, BasicBlock *BB) {
219 BranchInst *CondBr = dyn_cast<BranchInst>(BB->getTerminator());
220 if (!CondBr)
221 return;
222
223 uint64_t TrueWeight, FalseWeight;
224 if (!extractBranchWeights(*CondBr, TrueWeight, FalseWeight))
225 return;
226
227 if (TrueWeight + FalseWeight == 0)
228 // Zero branch_weights do not give a hint for getting branch probabilities.
229 // Technically it would result in division by zero denominator, which is
230 // TrueWeight + FalseWeight.
231 return;
232
233 // Returns the outgoing edge of the dominating predecessor block
234 // that leads to the PhiNode's incoming block:
235 auto GetPredOutEdge =
236 [](BasicBlock *IncomingBB,
237 BasicBlock *PhiBB) -> std::pair<BasicBlock *, BasicBlock *> {
238 auto *PredBB = IncomingBB;
239 auto *SuccBB = PhiBB;
240 SmallPtrSet<BasicBlock *, 16> Visited;
241 while (true) {
242 BranchInst *PredBr = dyn_cast<BranchInst>(PredBB->getTerminator());
243 if (PredBr && PredBr->isConditional())
244 return {PredBB, SuccBB};
245 Visited.insert(PredBB);
246 auto *SinglePredBB = PredBB->getSinglePredecessor();
247 if (!SinglePredBB)
248 return {nullptr, nullptr};
249
250 // Stop searching when SinglePredBB has been visited. It means we see
251 // an unreachable loop.
252 if (Visited.count(SinglePredBB))
253 return {nullptr, nullptr};
254
255 SuccBB = PredBB;
256 PredBB = SinglePredBB;
257 }
258 };
259
260 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
261 Value *PhiOpnd = PN->getIncomingValue(i);
262 ConstantInt *CI = dyn_cast<ConstantInt>(PhiOpnd);
263
264 if (!CI || !CI->getType()->isIntegerTy(1))
265 continue;
266
267 BranchProbability BP =
268 (CI->isOne() ? BranchProbability::getBranchProbability(
269 TrueWeight, TrueWeight + FalseWeight)
270 : BranchProbability::getBranchProbability(
271 FalseWeight, TrueWeight + FalseWeight));
272
273 auto PredOutEdge = GetPredOutEdge(PN->getIncomingBlock(i), BB);
274 if (!PredOutEdge.first)
275 return;
276
277 BasicBlock *PredBB = PredOutEdge.first;
278 BranchInst *PredBr = dyn_cast<BranchInst>(PredBB->getTerminator());
279 if (!PredBr)
280 return;
281
282 uint64_t PredTrueWeight, PredFalseWeight;
283 // FIXME: We currently only set the profile data when it is missing.
284 // With PGO, this can be used to refine even existing profile data with
285 // context information. This needs to be done after more performance
286 // testing.
287 if (extractBranchWeights(*PredBr, PredTrueWeight, PredFalseWeight))
288 continue;
289
290 // We can not infer anything useful when BP >= 50%, because BP is the
291 // upper bound probability value.
292 if (BP >= BranchProbability(50, 100))
293 continue;
294
295 SmallVector<uint32_t, 2> Weights;
296 if (PredBr->getSuccessor(0) == PredOutEdge.second) {
297 Weights.push_back(BP.getNumerator());
298 Weights.push_back(BP.getCompl().getNumerator());
299 } else {
300 Weights.push_back(BP.getCompl().getNumerator());
301 Weights.push_back(BP.getNumerator());
302 }
303 PredBr->setMetadata(LLVMContext::MD_prof,
304 MDBuilder(PredBr->getParent()->getContext())
305 .createBranchWeights(Weights));
306 }
307 }
308
309 /// runOnFunction - Toplevel algorithm.
runOnFunction(Function & F)310 bool JumpThreading::runOnFunction(Function &F) {
311 if (skipFunction(F))
312 return false;
313 auto TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
314 // Jump Threading has no sense for the targets with divergent CF
315 if (TTI->hasBranchDivergence())
316 return false;
317 auto TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
318 auto DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
319 auto LVI = &getAnalysis<LazyValueInfoWrapperPass>().getLVI();
320 auto AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
321 DomTreeUpdater DTU(*DT, DomTreeUpdater::UpdateStrategy::Lazy);
322 std::unique_ptr<BlockFrequencyInfo> BFI;
323 std::unique_ptr<BranchProbabilityInfo> BPI;
324 if (F.hasProfileData()) {
325 LoopInfo LI{*DT};
326 BPI.reset(new BranchProbabilityInfo(F, LI, TLI));
327 BFI.reset(new BlockFrequencyInfo(F, *BPI, LI));
328 }
329
330 bool Changed = Impl.runImpl(F, TLI, TTI, LVI, AA, &DTU, F.hasProfileData(),
331 std::move(BFI), std::move(BPI));
332 if (PrintLVIAfterJumpThreading) {
333 dbgs() << "LVI for function '" << F.getName() << "':\n";
334 LVI->printLVI(F, DTU.getDomTree(), dbgs());
335 }
336 return Changed;
337 }
338
run(Function & F,FunctionAnalysisManager & AM)339 PreservedAnalyses JumpThreadingPass::run(Function &F,
340 FunctionAnalysisManager &AM) {
341 auto &TTI = AM.getResult<TargetIRAnalysis>(F);
342 // Jump Threading has no sense for the targets with divergent CF
343 if (TTI.hasBranchDivergence())
344 return PreservedAnalyses::all();
345 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
346 auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
347 auto &LVI = AM.getResult<LazyValueAnalysis>(F);
348 auto &AA = AM.getResult<AAManager>(F);
349 DomTreeUpdater DTU(DT, DomTreeUpdater::UpdateStrategy::Lazy);
350
351 std::unique_ptr<BlockFrequencyInfo> BFI;
352 std::unique_ptr<BranchProbabilityInfo> BPI;
353 if (F.hasProfileData()) {
354 LoopInfo LI{DT};
355 BPI.reset(new BranchProbabilityInfo(F, LI, &TLI));
356 BFI.reset(new BlockFrequencyInfo(F, *BPI, LI));
357 }
358
359 bool Changed = runImpl(F, &TLI, &TTI, &LVI, &AA, &DTU, F.hasProfileData(),
360 std::move(BFI), std::move(BPI));
361
362 if (PrintLVIAfterJumpThreading) {
363 dbgs() << "LVI for function '" << F.getName() << "':\n";
364 LVI.printLVI(F, DTU.getDomTree(), dbgs());
365 }
366
367 if (!Changed)
368 return PreservedAnalyses::all();
369 PreservedAnalyses PA;
370 PA.preserve<DominatorTreeAnalysis>();
371 PA.preserve<LazyValueAnalysis>();
372 return PA;
373 }
374
runImpl(Function & F,TargetLibraryInfo * TLI_,TargetTransformInfo * TTI_,LazyValueInfo * LVI_,AliasAnalysis * AA_,DomTreeUpdater * DTU_,bool HasProfileData_,std::unique_ptr<BlockFrequencyInfo> BFI_,std::unique_ptr<BranchProbabilityInfo> BPI_)375 bool JumpThreadingPass::runImpl(Function &F, TargetLibraryInfo *TLI_,
376 TargetTransformInfo *TTI_, LazyValueInfo *LVI_,
377 AliasAnalysis *AA_, DomTreeUpdater *DTU_,
378 bool HasProfileData_,
379 std::unique_ptr<BlockFrequencyInfo> BFI_,
380 std::unique_ptr<BranchProbabilityInfo> BPI_) {
381 LLVM_DEBUG(dbgs() << "Jump threading on function '" << F.getName() << "'\n");
382 TLI = TLI_;
383 TTI = TTI_;
384 LVI = LVI_;
385 AA = AA_;
386 DTU = DTU_;
387 BFI.reset();
388 BPI.reset();
389 // When profile data is available, we need to update edge weights after
390 // successful jump threading, which requires both BPI and BFI being available.
391 HasProfileData = HasProfileData_;
392 auto *GuardDecl = F.getParent()->getFunction(
393 Intrinsic::getName(Intrinsic::experimental_guard));
394 HasGuards = GuardDecl && !GuardDecl->use_empty();
395 if (HasProfileData) {
396 BPI = std::move(BPI_);
397 BFI = std::move(BFI_);
398 }
399
400 // Reduce the number of instructions duplicated when optimizing strictly for
401 // size.
402 if (BBDuplicateThreshold.getNumOccurrences())
403 BBDupThreshold = BBDuplicateThreshold;
404 else if (F.hasFnAttribute(Attribute::MinSize))
405 BBDupThreshold = 3;
406 else
407 BBDupThreshold = DefaultBBDupThreshold;
408
409 // JumpThreading must not processes blocks unreachable from entry. It's a
410 // waste of compute time and can potentially lead to hangs.
411 SmallPtrSet<BasicBlock *, 16> Unreachable;
412 assert(DTU && "DTU isn't passed into JumpThreading before using it.");
413 assert(DTU->hasDomTree() && "JumpThreading relies on DomTree to proceed.");
414 DominatorTree &DT = DTU->getDomTree();
415 for (auto &BB : F)
416 if (!DT.isReachableFromEntry(&BB))
417 Unreachable.insert(&BB);
418
419 if (!ThreadAcrossLoopHeaders)
420 findLoopHeaders(F);
421
422 bool EverChanged = false;
423 bool Changed;
424 do {
425 Changed = false;
426 for (auto &BB : F) {
427 if (Unreachable.count(&BB))
428 continue;
429 while (processBlock(&BB)) // Thread all of the branches we can over BB.
430 Changed = true;
431
432 // Jump threading may have introduced redundant debug values into BB
433 // which should be removed.
434 if (Changed)
435 RemoveRedundantDbgInstrs(&BB);
436
437 // Stop processing BB if it's the entry or is now deleted. The following
438 // routines attempt to eliminate BB and locating a suitable replacement
439 // for the entry is non-trivial.
440 if (&BB == &F.getEntryBlock() || DTU->isBBPendingDeletion(&BB))
441 continue;
442
443 if (pred_empty(&BB)) {
444 // When processBlock makes BB unreachable it doesn't bother to fix up
445 // the instructions in it. We must remove BB to prevent invalid IR.
446 LLVM_DEBUG(dbgs() << " JT: Deleting dead block '" << BB.getName()
447 << "' with terminator: " << *BB.getTerminator()
448 << '\n');
449 LoopHeaders.erase(&BB);
450 LVI->eraseBlock(&BB);
451 DeleteDeadBlock(&BB, DTU);
452 Changed = true;
453 continue;
454 }
455
456 // processBlock doesn't thread BBs with unconditional TIs. However, if BB
457 // is "almost empty", we attempt to merge BB with its sole successor.
458 auto *BI = dyn_cast<BranchInst>(BB.getTerminator());
459 if (BI && BI->isUnconditional()) {
460 BasicBlock *Succ = BI->getSuccessor(0);
461 if (
462 // The terminator must be the only non-phi instruction in BB.
463 BB.getFirstNonPHIOrDbg(true)->isTerminator() &&
464 // Don't alter Loop headers and latches to ensure another pass can
465 // detect and transform nested loops later.
466 !LoopHeaders.count(&BB) && !LoopHeaders.count(Succ) &&
467 TryToSimplifyUncondBranchFromEmptyBlock(&BB, DTU)) {
468 RemoveRedundantDbgInstrs(Succ);
469 // BB is valid for cleanup here because we passed in DTU. F remains
470 // BB's parent until a DTU->getDomTree() event.
471 LVI->eraseBlock(&BB);
472 Changed = true;
473 }
474 }
475 }
476 EverChanged |= Changed;
477 } while (Changed);
478
479 LoopHeaders.clear();
480 return EverChanged;
481 }
482
483 // Replace uses of Cond with ToVal when safe to do so. If all uses are
484 // replaced, we can remove Cond. We cannot blindly replace all uses of Cond
485 // because we may incorrectly replace uses when guards/assumes are uses of
486 // of `Cond` and we used the guards/assume to reason about the `Cond` value
487 // at the end of block. RAUW unconditionally replaces all uses
488 // including the guards/assumes themselves and the uses before the
489 // guard/assume.
replaceFoldableUses(Instruction * Cond,Value * ToVal,BasicBlock * KnownAtEndOfBB)490 static bool replaceFoldableUses(Instruction *Cond, Value *ToVal,
491 BasicBlock *KnownAtEndOfBB) {
492 bool Changed = false;
493 assert(Cond->getType() == ToVal->getType());
494 // We can unconditionally replace all uses in non-local blocks (i.e. uses
495 // strictly dominated by BB), since LVI information is true from the
496 // terminator of BB.
497 if (Cond->getParent() == KnownAtEndOfBB)
498 Changed |= replaceNonLocalUsesWith(Cond, ToVal);
499 for (Instruction &I : reverse(*KnownAtEndOfBB)) {
500 // Reached the Cond whose uses we are trying to replace, so there are no
501 // more uses.
502 if (&I == Cond)
503 break;
504 // We only replace uses in instructions that are guaranteed to reach the end
505 // of BB, where we know Cond is ToVal.
506 if (!isGuaranteedToTransferExecutionToSuccessor(&I))
507 break;
508 Changed |= I.replaceUsesOfWith(Cond, ToVal);
509 }
510 if (Cond->use_empty() && !Cond->mayHaveSideEffects()) {
511 Cond->eraseFromParent();
512 Changed = true;
513 }
514 return Changed;
515 }
516
517 /// Return the cost of duplicating a piece of this block from first non-phi
518 /// and before StopAt instruction to thread across it. Stop scanning the block
519 /// when exceeding the threshold. If duplication is impossible, returns ~0U.
getJumpThreadDuplicationCost(const TargetTransformInfo * TTI,BasicBlock * BB,Instruction * StopAt,unsigned Threshold)520 static unsigned getJumpThreadDuplicationCost(const TargetTransformInfo *TTI,
521 BasicBlock *BB,
522 Instruction *StopAt,
523 unsigned Threshold) {
524 assert(StopAt->getParent() == BB && "Not an instruction from proper BB?");
525
526 // Do not duplicate the BB if it has a lot of PHI nodes.
527 // If a threadable chain is too long then the number of PHI nodes can add up,
528 // leading to a substantial increase in compile time when rewriting the SSA.
529 unsigned PhiCount = 0;
530 Instruction *FirstNonPHI = nullptr;
531 for (Instruction &I : *BB) {
532 if (!isa<PHINode>(&I)) {
533 FirstNonPHI = &I;
534 break;
535 }
536 if (++PhiCount > PhiDuplicateThreshold)
537 return ~0U;
538 }
539
540 /// Ignore PHI nodes, these will be flattened when duplication happens.
541 BasicBlock::const_iterator I(FirstNonPHI);
542
543 // FIXME: THREADING will delete values that are just used to compute the
544 // branch, so they shouldn't count against the duplication cost.
545
546 unsigned Bonus = 0;
547 if (BB->getTerminator() == StopAt) {
548 // Threading through a switch statement is particularly profitable. If this
549 // block ends in a switch, decrease its cost to make it more likely to
550 // happen.
551 if (isa<SwitchInst>(StopAt))
552 Bonus = 6;
553
554 // The same holds for indirect branches, but slightly more so.
555 if (isa<IndirectBrInst>(StopAt))
556 Bonus = 8;
557 }
558
559 // Bump the threshold up so the early exit from the loop doesn't skip the
560 // terminator-based Size adjustment at the end.
561 Threshold += Bonus;
562
563 // Sum up the cost of each instruction until we get to the terminator. Don't
564 // include the terminator because the copy won't include it.
565 unsigned Size = 0;
566 for (; &*I != StopAt; ++I) {
567
568 // Stop scanning the block if we've reached the threshold.
569 if (Size > Threshold)
570 return Size;
571
572 // Bail out if this instruction gives back a token type, it is not possible
573 // to duplicate it if it is used outside this BB.
574 if (I->getType()->isTokenTy() && I->isUsedOutsideOfBlock(BB))
575 return ~0U;
576
577 // Blocks with NoDuplicate are modelled as having infinite cost, so they
578 // are never duplicated.
579 if (const CallInst *CI = dyn_cast<CallInst>(I))
580 if (CI->cannotDuplicate() || CI->isConvergent())
581 return ~0U;
582
583 if (TTI->getInstructionCost(&*I, TargetTransformInfo::TCK_SizeAndLatency) ==
584 TargetTransformInfo::TCC_Free)
585 continue;
586
587 // All other instructions count for at least one unit.
588 ++Size;
589
590 // Calls are more expensive. If they are non-intrinsic calls, we model them
591 // as having cost of 4. If they are a non-vector intrinsic, we model them
592 // as having cost of 2 total, and if they are a vector intrinsic, we model
593 // them as having cost 1.
594 if (const CallInst *CI = dyn_cast<CallInst>(I)) {
595 if (!isa<IntrinsicInst>(CI))
596 Size += 3;
597 else if (!CI->getType()->isVectorTy())
598 Size += 1;
599 }
600 }
601
602 return Size > Bonus ? Size - Bonus : 0;
603 }
604
605 /// findLoopHeaders - We do not want jump threading to turn proper loop
606 /// structures into irreducible loops. Doing this breaks up the loop nesting
607 /// hierarchy and pessimizes later transformations. To prevent this from
608 /// happening, we first have to find the loop headers. Here we approximate this
609 /// by finding targets of backedges in the CFG.
610 ///
611 /// Note that there definitely are cases when we want to allow threading of
612 /// edges across a loop header. For example, threading a jump from outside the
613 /// loop (the preheader) to an exit block of the loop is definitely profitable.
614 /// It is also almost always profitable to thread backedges from within the loop
615 /// to exit blocks, and is often profitable to thread backedges to other blocks
616 /// within the loop (forming a nested loop). This simple analysis is not rich
617 /// enough to track all of these properties and keep it up-to-date as the CFG
618 /// mutates, so we don't allow any of these transformations.
findLoopHeaders(Function & F)619 void JumpThreadingPass::findLoopHeaders(Function &F) {
620 SmallVector<std::pair<const BasicBlock*,const BasicBlock*>, 32> Edges;
621 FindFunctionBackedges(F, Edges);
622
623 for (const auto &Edge : Edges)
624 LoopHeaders.insert(Edge.second);
625 }
626
627 /// getKnownConstant - Helper method to determine if we can thread over a
628 /// terminator with the given value as its condition, and if so what value to
629 /// use for that. What kind of value this is depends on whether we want an
630 /// integer or a block address, but an undef is always accepted.
631 /// Returns null if Val is null or not an appropriate constant.
getKnownConstant(Value * Val,ConstantPreference Preference)632 static Constant *getKnownConstant(Value *Val, ConstantPreference Preference) {
633 if (!Val)
634 return nullptr;
635
636 // Undef is "known" enough.
637 if (UndefValue *U = dyn_cast<UndefValue>(Val))
638 return U;
639
640 if (Preference == WantBlockAddress)
641 return dyn_cast<BlockAddress>(Val->stripPointerCasts());
642
643 return dyn_cast<ConstantInt>(Val);
644 }
645
646 /// computeValueKnownInPredecessors - Given a basic block BB and a value V, see
647 /// if we can infer that the value is a known ConstantInt/BlockAddress or undef
648 /// in any of our predecessors. If so, return the known list of value and pred
649 /// BB in the result vector.
650 ///
651 /// This returns true if there were any known values.
computeValueKnownInPredecessorsImpl(Value * V,BasicBlock * BB,PredValueInfo & Result,ConstantPreference Preference,DenseSet<Value * > & RecursionSet,Instruction * CxtI)652 bool JumpThreadingPass::computeValueKnownInPredecessorsImpl(
653 Value *V, BasicBlock *BB, PredValueInfo &Result,
654 ConstantPreference Preference, DenseSet<Value *> &RecursionSet,
655 Instruction *CxtI) {
656 // This method walks up use-def chains recursively. Because of this, we could
657 // get into an infinite loop going around loops in the use-def chain. To
658 // prevent this, keep track of what (value, block) pairs we've already visited
659 // and terminate the search if we loop back to them
660 if (!RecursionSet.insert(V).second)
661 return false;
662
663 // If V is a constant, then it is known in all predecessors.
664 if (Constant *KC = getKnownConstant(V, Preference)) {
665 for (BasicBlock *Pred : predecessors(BB))
666 Result.emplace_back(KC, Pred);
667
668 return !Result.empty();
669 }
670
671 // If V is a non-instruction value, or an instruction in a different block,
672 // then it can't be derived from a PHI.
673 Instruction *I = dyn_cast<Instruction>(V);
674 if (!I || I->getParent() != BB) {
675
676 // Okay, if this is a live-in value, see if it has a known value at the any
677 // edge from our predecessors.
678 for (BasicBlock *P : predecessors(BB)) {
679 using namespace PatternMatch;
680 // If the value is known by LazyValueInfo to be a constant in a
681 // predecessor, use that information to try to thread this block.
682 Constant *PredCst = LVI->getConstantOnEdge(V, P, BB, CxtI);
683 // If I is a non-local compare-with-constant instruction, use more-rich
684 // 'getPredicateOnEdge' method. This would be able to handle value
685 // inequalities better, for example if the compare is "X < 4" and "X < 3"
686 // is known true but "X < 4" itself is not available.
687 CmpInst::Predicate Pred;
688 Value *Val;
689 Constant *Cst;
690 if (!PredCst && match(V, m_Cmp(Pred, m_Value(Val), m_Constant(Cst)))) {
691 auto Res = LVI->getPredicateOnEdge(Pred, Val, Cst, P, BB, CxtI);
692 if (Res != LazyValueInfo::Unknown)
693 PredCst = ConstantInt::getBool(V->getContext(), Res);
694 }
695 if (Constant *KC = getKnownConstant(PredCst, Preference))
696 Result.emplace_back(KC, P);
697 }
698
699 return !Result.empty();
700 }
701
702 /// If I is a PHI node, then we know the incoming values for any constants.
703 if (PHINode *PN = dyn_cast<PHINode>(I)) {
704 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
705 Value *InVal = PN->getIncomingValue(i);
706 if (Constant *KC = getKnownConstant(InVal, Preference)) {
707 Result.emplace_back(KC, PN->getIncomingBlock(i));
708 } else {
709 Constant *CI = LVI->getConstantOnEdge(InVal,
710 PN->getIncomingBlock(i),
711 BB, CxtI);
712 if (Constant *KC = getKnownConstant(CI, Preference))
713 Result.emplace_back(KC, PN->getIncomingBlock(i));
714 }
715 }
716
717 return !Result.empty();
718 }
719
720 // Handle Cast instructions.
721 if (CastInst *CI = dyn_cast<CastInst>(I)) {
722 Value *Source = CI->getOperand(0);
723 computeValueKnownInPredecessorsImpl(Source, BB, Result, Preference,
724 RecursionSet, CxtI);
725 if (Result.empty())
726 return false;
727
728 // Convert the known values.
729 for (auto &R : Result)
730 R.first = ConstantExpr::getCast(CI->getOpcode(), R.first, CI->getType());
731
732 return true;
733 }
734
735 if (FreezeInst *FI = dyn_cast<FreezeInst>(I)) {
736 Value *Source = FI->getOperand(0);
737 computeValueKnownInPredecessorsImpl(Source, BB, Result, Preference,
738 RecursionSet, CxtI);
739
740 erase_if(Result, [](auto &Pair) {
741 return !isGuaranteedNotToBeUndefOrPoison(Pair.first);
742 });
743
744 return !Result.empty();
745 }
746
747 // Handle some boolean conditions.
748 if (I->getType()->getPrimitiveSizeInBits() == 1) {
749 using namespace PatternMatch;
750 if (Preference != WantInteger)
751 return false;
752 // X | true -> true
753 // X & false -> false
754 Value *Op0, *Op1;
755 if (match(I, m_LogicalOr(m_Value(Op0), m_Value(Op1))) ||
756 match(I, m_LogicalAnd(m_Value(Op0), m_Value(Op1)))) {
757 PredValueInfoTy LHSVals, RHSVals;
758
759 computeValueKnownInPredecessorsImpl(Op0, BB, LHSVals, WantInteger,
760 RecursionSet, CxtI);
761 computeValueKnownInPredecessorsImpl(Op1, BB, RHSVals, WantInteger,
762 RecursionSet, CxtI);
763
764 if (LHSVals.empty() && RHSVals.empty())
765 return false;
766
767 ConstantInt *InterestingVal;
768 if (match(I, m_LogicalOr()))
769 InterestingVal = ConstantInt::getTrue(I->getContext());
770 else
771 InterestingVal = ConstantInt::getFalse(I->getContext());
772
773 SmallPtrSet<BasicBlock*, 4> LHSKnownBBs;
774
775 // Scan for the sentinel. If we find an undef, force it to the
776 // interesting value: x|undef -> true and x&undef -> false.
777 for (const auto &LHSVal : LHSVals)
778 if (LHSVal.first == InterestingVal || isa<UndefValue>(LHSVal.first)) {
779 Result.emplace_back(InterestingVal, LHSVal.second);
780 LHSKnownBBs.insert(LHSVal.second);
781 }
782 for (const auto &RHSVal : RHSVals)
783 if (RHSVal.first == InterestingVal || isa<UndefValue>(RHSVal.first)) {
784 // If we already inferred a value for this block on the LHS, don't
785 // re-add it.
786 if (!LHSKnownBBs.count(RHSVal.second))
787 Result.emplace_back(InterestingVal, RHSVal.second);
788 }
789
790 return !Result.empty();
791 }
792
793 // Handle the NOT form of XOR.
794 if (I->getOpcode() == Instruction::Xor &&
795 isa<ConstantInt>(I->getOperand(1)) &&
796 cast<ConstantInt>(I->getOperand(1))->isOne()) {
797 computeValueKnownInPredecessorsImpl(I->getOperand(0), BB, Result,
798 WantInteger, RecursionSet, CxtI);
799 if (Result.empty())
800 return false;
801
802 // Invert the known values.
803 for (auto &R : Result)
804 R.first = ConstantExpr::getNot(R.first);
805
806 return true;
807 }
808
809 // Try to simplify some other binary operator values.
810 } else if (BinaryOperator *BO = dyn_cast<BinaryOperator>(I)) {
811 if (Preference != WantInteger)
812 return false;
813 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->getOperand(1))) {
814 const DataLayout &DL = BO->getModule()->getDataLayout();
815 PredValueInfoTy LHSVals;
816 computeValueKnownInPredecessorsImpl(BO->getOperand(0), BB, LHSVals,
817 WantInteger, RecursionSet, CxtI);
818
819 // Try to use constant folding to simplify the binary operator.
820 for (const auto &LHSVal : LHSVals) {
821 Constant *V = LHSVal.first;
822 Constant *Folded =
823 ConstantFoldBinaryOpOperands(BO->getOpcode(), V, CI, DL);
824
825 if (Constant *KC = getKnownConstant(Folded, WantInteger))
826 Result.emplace_back(KC, LHSVal.second);
827 }
828 }
829
830 return !Result.empty();
831 }
832
833 // Handle compare with phi operand, where the PHI is defined in this block.
834 if (CmpInst *Cmp = dyn_cast<CmpInst>(I)) {
835 if (Preference != WantInteger)
836 return false;
837 Type *CmpType = Cmp->getType();
838 Value *CmpLHS = Cmp->getOperand(0);
839 Value *CmpRHS = Cmp->getOperand(1);
840 CmpInst::Predicate Pred = Cmp->getPredicate();
841
842 PHINode *PN = dyn_cast<PHINode>(CmpLHS);
843 if (!PN)
844 PN = dyn_cast<PHINode>(CmpRHS);
845 if (PN && PN->getParent() == BB) {
846 const DataLayout &DL = PN->getModule()->getDataLayout();
847 // We can do this simplification if any comparisons fold to true or false.
848 // See if any do.
849 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
850 BasicBlock *PredBB = PN->getIncomingBlock(i);
851 Value *LHS, *RHS;
852 if (PN == CmpLHS) {
853 LHS = PN->getIncomingValue(i);
854 RHS = CmpRHS->DoPHITranslation(BB, PredBB);
855 } else {
856 LHS = CmpLHS->DoPHITranslation(BB, PredBB);
857 RHS = PN->getIncomingValue(i);
858 }
859 Value *Res = simplifyCmpInst(Pred, LHS, RHS, {DL});
860 if (!Res) {
861 if (!isa<Constant>(RHS))
862 continue;
863
864 // getPredicateOnEdge call will make no sense if LHS is defined in BB.
865 auto LHSInst = dyn_cast<Instruction>(LHS);
866 if (LHSInst && LHSInst->getParent() == BB)
867 continue;
868
869 LazyValueInfo::Tristate
870 ResT = LVI->getPredicateOnEdge(Pred, LHS,
871 cast<Constant>(RHS), PredBB, BB,
872 CxtI ? CxtI : Cmp);
873 if (ResT == LazyValueInfo::Unknown)
874 continue;
875 Res = ConstantInt::get(Type::getInt1Ty(LHS->getContext()), ResT);
876 }
877
878 if (Constant *KC = getKnownConstant(Res, WantInteger))
879 Result.emplace_back(KC, PredBB);
880 }
881
882 return !Result.empty();
883 }
884
885 // If comparing a live-in value against a constant, see if we know the
886 // live-in value on any predecessors.
887 if (isa<Constant>(CmpRHS) && !CmpType->isVectorTy()) {
888 Constant *CmpConst = cast<Constant>(CmpRHS);
889
890 if (!isa<Instruction>(CmpLHS) ||
891 cast<Instruction>(CmpLHS)->getParent() != BB) {
892 for (BasicBlock *P : predecessors(BB)) {
893 // If the value is known by LazyValueInfo to be a constant in a
894 // predecessor, use that information to try to thread this block.
895 LazyValueInfo::Tristate Res =
896 LVI->getPredicateOnEdge(Pred, CmpLHS,
897 CmpConst, P, BB, CxtI ? CxtI : Cmp);
898 if (Res == LazyValueInfo::Unknown)
899 continue;
900
901 Constant *ResC = ConstantInt::get(CmpType, Res);
902 Result.emplace_back(ResC, P);
903 }
904
905 return !Result.empty();
906 }
907
908 // InstCombine can fold some forms of constant range checks into
909 // (icmp (add (x, C1)), C2). See if we have we have such a thing with
910 // x as a live-in.
911 {
912 using namespace PatternMatch;
913
914 Value *AddLHS;
915 ConstantInt *AddConst;
916 if (isa<ConstantInt>(CmpConst) &&
917 match(CmpLHS, m_Add(m_Value(AddLHS), m_ConstantInt(AddConst)))) {
918 if (!isa<Instruction>(AddLHS) ||
919 cast<Instruction>(AddLHS)->getParent() != BB) {
920 for (BasicBlock *P : predecessors(BB)) {
921 // If the value is known by LazyValueInfo to be a ConstantRange in
922 // a predecessor, use that information to try to thread this
923 // block.
924 ConstantRange CR = LVI->getConstantRangeOnEdge(
925 AddLHS, P, BB, CxtI ? CxtI : cast<Instruction>(CmpLHS));
926 // Propagate the range through the addition.
927 CR = CR.add(AddConst->getValue());
928
929 // Get the range where the compare returns true.
930 ConstantRange CmpRange = ConstantRange::makeExactICmpRegion(
931 Pred, cast<ConstantInt>(CmpConst)->getValue());
932
933 Constant *ResC;
934 if (CmpRange.contains(CR))
935 ResC = ConstantInt::getTrue(CmpType);
936 else if (CmpRange.inverse().contains(CR))
937 ResC = ConstantInt::getFalse(CmpType);
938 else
939 continue;
940
941 Result.emplace_back(ResC, P);
942 }
943
944 return !Result.empty();
945 }
946 }
947 }
948
949 // Try to find a constant value for the LHS of a comparison,
950 // and evaluate it statically if we can.
951 PredValueInfoTy LHSVals;
952 computeValueKnownInPredecessorsImpl(I->getOperand(0), BB, LHSVals,
953 WantInteger, RecursionSet, CxtI);
954
955 for (const auto &LHSVal : LHSVals) {
956 Constant *V = LHSVal.first;
957 Constant *Folded = ConstantExpr::getCompare(Pred, V, CmpConst);
958 if (Constant *KC = getKnownConstant(Folded, WantInteger))
959 Result.emplace_back(KC, LHSVal.second);
960 }
961
962 return !Result.empty();
963 }
964 }
965
966 if (SelectInst *SI = dyn_cast<SelectInst>(I)) {
967 // Handle select instructions where at least one operand is a known constant
968 // and we can figure out the condition value for any predecessor block.
969 Constant *TrueVal = getKnownConstant(SI->getTrueValue(), Preference);
970 Constant *FalseVal = getKnownConstant(SI->getFalseValue(), Preference);
971 PredValueInfoTy Conds;
972 if ((TrueVal || FalseVal) &&
973 computeValueKnownInPredecessorsImpl(SI->getCondition(), BB, Conds,
974 WantInteger, RecursionSet, CxtI)) {
975 for (auto &C : Conds) {
976 Constant *Cond = C.first;
977
978 // Figure out what value to use for the condition.
979 bool KnownCond;
980 if (ConstantInt *CI = dyn_cast<ConstantInt>(Cond)) {
981 // A known boolean.
982 KnownCond = CI->isOne();
983 } else {
984 assert(isa<UndefValue>(Cond) && "Unexpected condition value");
985 // Either operand will do, so be sure to pick the one that's a known
986 // constant.
987 // FIXME: Do this more cleverly if both values are known constants?
988 KnownCond = (TrueVal != nullptr);
989 }
990
991 // See if the select has a known constant value for this predecessor.
992 if (Constant *Val = KnownCond ? TrueVal : FalseVal)
993 Result.emplace_back(Val, C.second);
994 }
995
996 return !Result.empty();
997 }
998 }
999
1000 // If all else fails, see if LVI can figure out a constant value for us.
1001 assert(CxtI->getParent() == BB && "CxtI should be in BB");
1002 Constant *CI = LVI->getConstant(V, CxtI);
1003 if (Constant *KC = getKnownConstant(CI, Preference)) {
1004 for (BasicBlock *Pred : predecessors(BB))
1005 Result.emplace_back(KC, Pred);
1006 }
1007
1008 return !Result.empty();
1009 }
1010
1011 /// GetBestDestForBranchOnUndef - If we determine that the specified block ends
1012 /// in an undefined jump, decide which block is best to revector to.
1013 ///
1014 /// Since we can pick an arbitrary destination, we pick the successor with the
1015 /// fewest predecessors. This should reduce the in-degree of the others.
getBestDestForJumpOnUndef(BasicBlock * BB)1016 static unsigned getBestDestForJumpOnUndef(BasicBlock *BB) {
1017 Instruction *BBTerm = BB->getTerminator();
1018 unsigned MinSucc = 0;
1019 BasicBlock *TestBB = BBTerm->getSuccessor(MinSucc);
1020 // Compute the successor with the minimum number of predecessors.
1021 unsigned MinNumPreds = pred_size(TestBB);
1022 for (unsigned i = 1, e = BBTerm->getNumSuccessors(); i != e; ++i) {
1023 TestBB = BBTerm->getSuccessor(i);
1024 unsigned NumPreds = pred_size(TestBB);
1025 if (NumPreds < MinNumPreds) {
1026 MinSucc = i;
1027 MinNumPreds = NumPreds;
1028 }
1029 }
1030
1031 return MinSucc;
1032 }
1033
hasAddressTakenAndUsed(BasicBlock * BB)1034 static bool hasAddressTakenAndUsed(BasicBlock *BB) {
1035 if (!BB->hasAddressTaken()) return false;
1036
1037 // If the block has its address taken, it may be a tree of dead constants
1038 // hanging off of it. These shouldn't keep the block alive.
1039 BlockAddress *BA = BlockAddress::get(BB);
1040 BA->removeDeadConstantUsers();
1041 return !BA->use_empty();
1042 }
1043
1044 /// processBlock - If there are any predecessors whose control can be threaded
1045 /// through to a successor, transform them now.
processBlock(BasicBlock * BB)1046 bool JumpThreadingPass::processBlock(BasicBlock *BB) {
1047 // If the block is trivially dead, just return and let the caller nuke it.
1048 // This simplifies other transformations.
1049 if (DTU->isBBPendingDeletion(BB) ||
1050 (pred_empty(BB) && BB != &BB->getParent()->getEntryBlock()))
1051 return false;
1052
1053 // If this block has a single predecessor, and if that pred has a single
1054 // successor, merge the blocks. This encourages recursive jump threading
1055 // because now the condition in this block can be threaded through
1056 // predecessors of our predecessor block.
1057 if (maybeMergeBasicBlockIntoOnlyPred(BB))
1058 return true;
1059
1060 if (tryToUnfoldSelectInCurrBB(BB))
1061 return true;
1062
1063 // Look if we can propagate guards to predecessors.
1064 if (HasGuards && processGuards(BB))
1065 return true;
1066
1067 // What kind of constant we're looking for.
1068 ConstantPreference Preference = WantInteger;
1069
1070 // Look to see if the terminator is a conditional branch, switch or indirect
1071 // branch, if not we can't thread it.
1072 Value *Condition;
1073 Instruction *Terminator = BB->getTerminator();
1074 if (BranchInst *BI = dyn_cast<BranchInst>(Terminator)) {
1075 // Can't thread an unconditional jump.
1076 if (BI->isUnconditional()) return false;
1077 Condition = BI->getCondition();
1078 } else if (SwitchInst *SI = dyn_cast<SwitchInst>(Terminator)) {
1079 Condition = SI->getCondition();
1080 } else if (IndirectBrInst *IB = dyn_cast<IndirectBrInst>(Terminator)) {
1081 // Can't thread indirect branch with no successors.
1082 if (IB->getNumSuccessors() == 0) return false;
1083 Condition = IB->getAddress()->stripPointerCasts();
1084 Preference = WantBlockAddress;
1085 } else {
1086 return false; // Must be an invoke or callbr.
1087 }
1088
1089 // Keep track if we constant folded the condition in this invocation.
1090 bool ConstantFolded = false;
1091
1092 // Run constant folding to see if we can reduce the condition to a simple
1093 // constant.
1094 if (Instruction *I = dyn_cast<Instruction>(Condition)) {
1095 Value *SimpleVal =
1096 ConstantFoldInstruction(I, BB->getModule()->getDataLayout(), TLI);
1097 if (SimpleVal) {
1098 I->replaceAllUsesWith(SimpleVal);
1099 if (isInstructionTriviallyDead(I, TLI))
1100 I->eraseFromParent();
1101 Condition = SimpleVal;
1102 ConstantFolded = true;
1103 }
1104 }
1105
1106 // If the terminator is branching on an undef or freeze undef, we can pick any
1107 // of the successors to branch to. Let getBestDestForJumpOnUndef decide.
1108 auto *FI = dyn_cast<FreezeInst>(Condition);
1109 if (isa<UndefValue>(Condition) ||
1110 (FI && isa<UndefValue>(FI->getOperand(0)) && FI->hasOneUse())) {
1111 unsigned BestSucc = getBestDestForJumpOnUndef(BB);
1112 std::vector<DominatorTree::UpdateType> Updates;
1113
1114 // Fold the branch/switch.
1115 Instruction *BBTerm = BB->getTerminator();
1116 Updates.reserve(BBTerm->getNumSuccessors());
1117 for (unsigned i = 0, e = BBTerm->getNumSuccessors(); i != e; ++i) {
1118 if (i == BestSucc) continue;
1119 BasicBlock *Succ = BBTerm->getSuccessor(i);
1120 Succ->removePredecessor(BB, true);
1121 Updates.push_back({DominatorTree::Delete, BB, Succ});
1122 }
1123
1124 LLVM_DEBUG(dbgs() << " In block '" << BB->getName()
1125 << "' folding undef terminator: " << *BBTerm << '\n');
1126 BranchInst::Create(BBTerm->getSuccessor(BestSucc), BBTerm);
1127 ++NumFolds;
1128 BBTerm->eraseFromParent();
1129 DTU->applyUpdatesPermissive(Updates);
1130 if (FI)
1131 FI->eraseFromParent();
1132 return true;
1133 }
1134
1135 // If the terminator of this block is branching on a constant, simplify the
1136 // terminator to an unconditional branch. This can occur due to threading in
1137 // other blocks.
1138 if (getKnownConstant(Condition, Preference)) {
1139 LLVM_DEBUG(dbgs() << " In block '" << BB->getName()
1140 << "' folding terminator: " << *BB->getTerminator()
1141 << '\n');
1142 ++NumFolds;
1143 ConstantFoldTerminator(BB, true, nullptr, DTU);
1144 if (HasProfileData)
1145 BPI->eraseBlock(BB);
1146 return true;
1147 }
1148
1149 Instruction *CondInst = dyn_cast<Instruction>(Condition);
1150
1151 // All the rest of our checks depend on the condition being an instruction.
1152 if (!CondInst) {
1153 // FIXME: Unify this with code below.
1154 if (processThreadableEdges(Condition, BB, Preference, Terminator))
1155 return true;
1156 return ConstantFolded;
1157 }
1158
1159 // Some of the following optimization can safely work on the unfrozen cond.
1160 Value *CondWithoutFreeze = CondInst;
1161 if (auto *FI = dyn_cast<FreezeInst>(CondInst))
1162 CondWithoutFreeze = FI->getOperand(0);
1163
1164 if (CmpInst *CondCmp = dyn_cast<CmpInst>(CondWithoutFreeze)) {
1165 // If we're branching on a conditional, LVI might be able to determine
1166 // it's value at the branch instruction. We only handle comparisons
1167 // against a constant at this time.
1168 if (Constant *CondConst = dyn_cast<Constant>(CondCmp->getOperand(1))) {
1169 LazyValueInfo::Tristate Ret =
1170 LVI->getPredicateAt(CondCmp->getPredicate(), CondCmp->getOperand(0),
1171 CondConst, BB->getTerminator(),
1172 /*UseBlockValue=*/false);
1173 if (Ret != LazyValueInfo::Unknown) {
1174 // We can safely replace *some* uses of the CondInst if it has
1175 // exactly one value as returned by LVI. RAUW is incorrect in the
1176 // presence of guards and assumes, that have the `Cond` as the use. This
1177 // is because we use the guards/assume to reason about the `Cond` value
1178 // at the end of block, but RAUW unconditionally replaces all uses
1179 // including the guards/assumes themselves and the uses before the
1180 // guard/assume.
1181 auto *CI = Ret == LazyValueInfo::True ?
1182 ConstantInt::getTrue(CondCmp->getType()) :
1183 ConstantInt::getFalse(CondCmp->getType());
1184 if (replaceFoldableUses(CondCmp, CI, BB))
1185 return true;
1186 }
1187
1188 // We did not manage to simplify this branch, try to see whether
1189 // CondCmp depends on a known phi-select pattern.
1190 if (tryToUnfoldSelect(CondCmp, BB))
1191 return true;
1192 }
1193 }
1194
1195 if (SwitchInst *SI = dyn_cast<SwitchInst>(BB->getTerminator()))
1196 if (tryToUnfoldSelect(SI, BB))
1197 return true;
1198
1199 // Check for some cases that are worth simplifying. Right now we want to look
1200 // for loads that are used by a switch or by the condition for the branch. If
1201 // we see one, check to see if it's partially redundant. If so, insert a PHI
1202 // which can then be used to thread the values.
1203 Value *SimplifyValue = CondWithoutFreeze;
1204
1205 if (CmpInst *CondCmp = dyn_cast<CmpInst>(SimplifyValue))
1206 if (isa<Constant>(CondCmp->getOperand(1)))
1207 SimplifyValue = CondCmp->getOperand(0);
1208
1209 // TODO: There are other places where load PRE would be profitable, such as
1210 // more complex comparisons.
1211 if (LoadInst *LoadI = dyn_cast<LoadInst>(SimplifyValue))
1212 if (simplifyPartiallyRedundantLoad(LoadI))
1213 return true;
1214
1215 // Before threading, try to propagate profile data backwards:
1216 if (PHINode *PN = dyn_cast<PHINode>(CondInst))
1217 if (PN->getParent() == BB && isa<BranchInst>(BB->getTerminator()))
1218 updatePredecessorProfileMetadata(PN, BB);
1219
1220 // Handle a variety of cases where we are branching on something derived from
1221 // a PHI node in the current block. If we can prove that any predecessors
1222 // compute a predictable value based on a PHI node, thread those predecessors.
1223 if (processThreadableEdges(CondInst, BB, Preference, Terminator))
1224 return true;
1225
1226 // If this is an otherwise-unfoldable branch on a phi node or freeze(phi) in
1227 // the current block, see if we can simplify.
1228 PHINode *PN = dyn_cast<PHINode>(CondWithoutFreeze);
1229 if (PN && PN->getParent() == BB && isa<BranchInst>(BB->getTerminator()))
1230 return processBranchOnPHI(PN);
1231
1232 // If this is an otherwise-unfoldable branch on a XOR, see if we can simplify.
1233 if (CondInst->getOpcode() == Instruction::Xor &&
1234 CondInst->getParent() == BB && isa<BranchInst>(BB->getTerminator()))
1235 return processBranchOnXOR(cast<BinaryOperator>(CondInst));
1236
1237 // Search for a stronger dominating condition that can be used to simplify a
1238 // conditional branch leaving BB.
1239 if (processImpliedCondition(BB))
1240 return true;
1241
1242 return false;
1243 }
1244
processImpliedCondition(BasicBlock * BB)1245 bool JumpThreadingPass::processImpliedCondition(BasicBlock *BB) {
1246 auto *BI = dyn_cast<BranchInst>(BB->getTerminator());
1247 if (!BI || !BI->isConditional())
1248 return false;
1249
1250 Value *Cond = BI->getCondition();
1251 // Assuming that predecessor's branch was taken, if pred's branch condition
1252 // (V) implies Cond, Cond can be either true, undef, or poison. In this case,
1253 // freeze(Cond) is either true or a nondeterministic value.
1254 // If freeze(Cond) has only one use, we can freely fold freeze(Cond) to true
1255 // without affecting other instructions.
1256 auto *FICond = dyn_cast<FreezeInst>(Cond);
1257 if (FICond && FICond->hasOneUse())
1258 Cond = FICond->getOperand(0);
1259 else
1260 FICond = nullptr;
1261
1262 BasicBlock *CurrentBB = BB;
1263 BasicBlock *CurrentPred = BB->getSinglePredecessor();
1264 unsigned Iter = 0;
1265
1266 auto &DL = BB->getModule()->getDataLayout();
1267
1268 while (CurrentPred && Iter++ < ImplicationSearchThreshold) {
1269 auto *PBI = dyn_cast<BranchInst>(CurrentPred->getTerminator());
1270 if (!PBI || !PBI->isConditional())
1271 return false;
1272 if (PBI->getSuccessor(0) != CurrentBB && PBI->getSuccessor(1) != CurrentBB)
1273 return false;
1274
1275 bool CondIsTrue = PBI->getSuccessor(0) == CurrentBB;
1276 std::optional<bool> Implication =
1277 isImpliedCondition(PBI->getCondition(), Cond, DL, CondIsTrue);
1278
1279 // If the branch condition of BB (which is Cond) and CurrentPred are
1280 // exactly the same freeze instruction, Cond can be folded into CondIsTrue.
1281 if (!Implication && FICond && isa<FreezeInst>(PBI->getCondition())) {
1282 if (cast<FreezeInst>(PBI->getCondition())->getOperand(0) ==
1283 FICond->getOperand(0))
1284 Implication = CondIsTrue;
1285 }
1286
1287 if (Implication) {
1288 BasicBlock *KeepSucc = BI->getSuccessor(*Implication ? 0 : 1);
1289 BasicBlock *RemoveSucc = BI->getSuccessor(*Implication ? 1 : 0);
1290 RemoveSucc->removePredecessor(BB);
1291 BranchInst *UncondBI = BranchInst::Create(KeepSucc, BI);
1292 UncondBI->setDebugLoc(BI->getDebugLoc());
1293 ++NumFolds;
1294 BI->eraseFromParent();
1295 if (FICond)
1296 FICond->eraseFromParent();
1297
1298 DTU->applyUpdatesPermissive({{DominatorTree::Delete, BB, RemoveSucc}});
1299 if (HasProfileData)
1300 BPI->eraseBlock(BB);
1301 return true;
1302 }
1303 CurrentBB = CurrentPred;
1304 CurrentPred = CurrentBB->getSinglePredecessor();
1305 }
1306
1307 return false;
1308 }
1309
1310 /// Return true if Op is an instruction defined in the given block.
isOpDefinedInBlock(Value * Op,BasicBlock * BB)1311 static bool isOpDefinedInBlock(Value *Op, BasicBlock *BB) {
1312 if (Instruction *OpInst = dyn_cast<Instruction>(Op))
1313 if (OpInst->getParent() == BB)
1314 return true;
1315 return false;
1316 }
1317
1318 /// simplifyPartiallyRedundantLoad - If LoadI is an obviously partially
1319 /// redundant load instruction, eliminate it by replacing it with a PHI node.
1320 /// This is an important optimization that encourages jump threading, and needs
1321 /// to be run interlaced with other jump threading tasks.
simplifyPartiallyRedundantLoad(LoadInst * LoadI)1322 bool JumpThreadingPass::simplifyPartiallyRedundantLoad(LoadInst *LoadI) {
1323 // Don't hack volatile and ordered loads.
1324 if (!LoadI->isUnordered()) return false;
1325
1326 // If the load is defined in a block with exactly one predecessor, it can't be
1327 // partially redundant.
1328 BasicBlock *LoadBB = LoadI->getParent();
1329 if (LoadBB->getSinglePredecessor())
1330 return false;
1331
1332 // If the load is defined in an EH pad, it can't be partially redundant,
1333 // because the edges between the invoke and the EH pad cannot have other
1334 // instructions between them.
1335 if (LoadBB->isEHPad())
1336 return false;
1337
1338 Value *LoadedPtr = LoadI->getOperand(0);
1339
1340 // If the loaded operand is defined in the LoadBB and its not a phi,
1341 // it can't be available in predecessors.
1342 if (isOpDefinedInBlock(LoadedPtr, LoadBB) && !isa<PHINode>(LoadedPtr))
1343 return false;
1344
1345 // Scan a few instructions up from the load, to see if it is obviously live at
1346 // the entry to its block.
1347 BasicBlock::iterator BBIt(LoadI);
1348 bool IsLoadCSE;
1349 if (Value *AvailableVal = FindAvailableLoadedValue(
1350 LoadI, LoadBB, BBIt, DefMaxInstsToScan, AA, &IsLoadCSE)) {
1351 // If the value of the load is locally available within the block, just use
1352 // it. This frequently occurs for reg2mem'd allocas.
1353
1354 if (IsLoadCSE) {
1355 LoadInst *NLoadI = cast<LoadInst>(AvailableVal);
1356 combineMetadataForCSE(NLoadI, LoadI, false);
1357 };
1358
1359 // If the returned value is the load itself, replace with poison. This can
1360 // only happen in dead loops.
1361 if (AvailableVal == LoadI)
1362 AvailableVal = PoisonValue::get(LoadI->getType());
1363 if (AvailableVal->getType() != LoadI->getType())
1364 AvailableVal = CastInst::CreateBitOrPointerCast(
1365 AvailableVal, LoadI->getType(), "", LoadI);
1366 LoadI->replaceAllUsesWith(AvailableVal);
1367 LoadI->eraseFromParent();
1368 return true;
1369 }
1370
1371 // Otherwise, if we scanned the whole block and got to the top of the block,
1372 // we know the block is locally transparent to the load. If not, something
1373 // might clobber its value.
1374 if (BBIt != LoadBB->begin())
1375 return false;
1376
1377 // If all of the loads and stores that feed the value have the same AA tags,
1378 // then we can propagate them onto any newly inserted loads.
1379 AAMDNodes AATags = LoadI->getAAMetadata();
1380
1381 SmallPtrSet<BasicBlock*, 8> PredsScanned;
1382
1383 using AvailablePredsTy = SmallVector<std::pair<BasicBlock *, Value *>, 8>;
1384
1385 AvailablePredsTy AvailablePreds;
1386 BasicBlock *OneUnavailablePred = nullptr;
1387 SmallVector<LoadInst*, 8> CSELoads;
1388
1389 // If we got here, the loaded value is transparent through to the start of the
1390 // block. Check to see if it is available in any of the predecessor blocks.
1391 for (BasicBlock *PredBB : predecessors(LoadBB)) {
1392 // If we already scanned this predecessor, skip it.
1393 if (!PredsScanned.insert(PredBB).second)
1394 continue;
1395
1396 BBIt = PredBB->end();
1397 unsigned NumScanedInst = 0;
1398 Value *PredAvailable = nullptr;
1399 // NOTE: We don't CSE load that is volatile or anything stronger than
1400 // unordered, that should have been checked when we entered the function.
1401 assert(LoadI->isUnordered() &&
1402 "Attempting to CSE volatile or atomic loads");
1403 // If this is a load on a phi pointer, phi-translate it and search
1404 // for available load/store to the pointer in predecessors.
1405 Type *AccessTy = LoadI->getType();
1406 const auto &DL = LoadI->getModule()->getDataLayout();
1407 MemoryLocation Loc(LoadedPtr->DoPHITranslation(LoadBB, PredBB),
1408 LocationSize::precise(DL.getTypeStoreSize(AccessTy)),
1409 AATags);
1410 PredAvailable = findAvailablePtrLoadStore(Loc, AccessTy, LoadI->isAtomic(),
1411 PredBB, BBIt, DefMaxInstsToScan,
1412 AA, &IsLoadCSE, &NumScanedInst);
1413
1414 // If PredBB has a single predecessor, continue scanning through the
1415 // single predecessor.
1416 BasicBlock *SinglePredBB = PredBB;
1417 while (!PredAvailable && SinglePredBB && BBIt == SinglePredBB->begin() &&
1418 NumScanedInst < DefMaxInstsToScan) {
1419 SinglePredBB = SinglePredBB->getSinglePredecessor();
1420 if (SinglePredBB) {
1421 BBIt = SinglePredBB->end();
1422 PredAvailable = findAvailablePtrLoadStore(
1423 Loc, AccessTy, LoadI->isAtomic(), SinglePredBB, BBIt,
1424 (DefMaxInstsToScan - NumScanedInst), AA, &IsLoadCSE,
1425 &NumScanedInst);
1426 }
1427 }
1428
1429 if (!PredAvailable) {
1430 OneUnavailablePred = PredBB;
1431 continue;
1432 }
1433
1434 if (IsLoadCSE)
1435 CSELoads.push_back(cast<LoadInst>(PredAvailable));
1436
1437 // If so, this load is partially redundant. Remember this info so that we
1438 // can create a PHI node.
1439 AvailablePreds.emplace_back(PredBB, PredAvailable);
1440 }
1441
1442 // If the loaded value isn't available in any predecessor, it isn't partially
1443 // redundant.
1444 if (AvailablePreds.empty()) return false;
1445
1446 // Okay, the loaded value is available in at least one (and maybe all!)
1447 // predecessors. If the value is unavailable in more than one unique
1448 // predecessor, we want to insert a merge block for those common predecessors.
1449 // This ensures that we only have to insert one reload, thus not increasing
1450 // code size.
1451 BasicBlock *UnavailablePred = nullptr;
1452
1453 // If the value is unavailable in one of predecessors, we will end up
1454 // inserting a new instruction into them. It is only valid if all the
1455 // instructions before LoadI are guaranteed to pass execution to its
1456 // successor, or if LoadI is safe to speculate.
1457 // TODO: If this logic becomes more complex, and we will perform PRE insertion
1458 // farther than to a predecessor, we need to reuse the code from GVN's PRE.
1459 // It requires domination tree analysis, so for this simple case it is an
1460 // overkill.
1461 if (PredsScanned.size() != AvailablePreds.size() &&
1462 !isSafeToSpeculativelyExecute(LoadI))
1463 for (auto I = LoadBB->begin(); &*I != LoadI; ++I)
1464 if (!isGuaranteedToTransferExecutionToSuccessor(&*I))
1465 return false;
1466
1467 // If there is exactly one predecessor where the value is unavailable, the
1468 // already computed 'OneUnavailablePred' block is it. If it ends in an
1469 // unconditional branch, we know that it isn't a critical edge.
1470 if (PredsScanned.size() == AvailablePreds.size()+1 &&
1471 OneUnavailablePred->getTerminator()->getNumSuccessors() == 1) {
1472 UnavailablePred = OneUnavailablePred;
1473 } else if (PredsScanned.size() != AvailablePreds.size()) {
1474 // Otherwise, we had multiple unavailable predecessors or we had a critical
1475 // edge from the one.
1476 SmallVector<BasicBlock*, 8> PredsToSplit;
1477 SmallPtrSet<BasicBlock*, 8> AvailablePredSet;
1478
1479 for (const auto &AvailablePred : AvailablePreds)
1480 AvailablePredSet.insert(AvailablePred.first);
1481
1482 // Add all the unavailable predecessors to the PredsToSplit list.
1483 for (BasicBlock *P : predecessors(LoadBB)) {
1484 // If the predecessor is an indirect goto, we can't split the edge.
1485 if (isa<IndirectBrInst>(P->getTerminator()))
1486 return false;
1487
1488 if (!AvailablePredSet.count(P))
1489 PredsToSplit.push_back(P);
1490 }
1491
1492 // Split them out to their own block.
1493 UnavailablePred = splitBlockPreds(LoadBB, PredsToSplit, "thread-pre-split");
1494 }
1495
1496 // If the value isn't available in all predecessors, then there will be
1497 // exactly one where it isn't available. Insert a load on that edge and add
1498 // it to the AvailablePreds list.
1499 if (UnavailablePred) {
1500 assert(UnavailablePred->getTerminator()->getNumSuccessors() == 1 &&
1501 "Can't handle critical edge here!");
1502 LoadInst *NewVal = new LoadInst(
1503 LoadI->getType(), LoadedPtr->DoPHITranslation(LoadBB, UnavailablePred),
1504 LoadI->getName() + ".pr", false, LoadI->getAlign(),
1505 LoadI->getOrdering(), LoadI->getSyncScopeID(),
1506 UnavailablePred->getTerminator());
1507 NewVal->setDebugLoc(LoadI->getDebugLoc());
1508 if (AATags)
1509 NewVal->setAAMetadata(AATags);
1510
1511 AvailablePreds.emplace_back(UnavailablePred, NewVal);
1512 }
1513
1514 // Now we know that each predecessor of this block has a value in
1515 // AvailablePreds, sort them for efficient access as we're walking the preds.
1516 array_pod_sort(AvailablePreds.begin(), AvailablePreds.end());
1517
1518 // Create a PHI node at the start of the block for the PRE'd load value.
1519 pred_iterator PB = pred_begin(LoadBB), PE = pred_end(LoadBB);
1520 PHINode *PN = PHINode::Create(LoadI->getType(), std::distance(PB, PE), "",
1521 &LoadBB->front());
1522 PN->takeName(LoadI);
1523 PN->setDebugLoc(LoadI->getDebugLoc());
1524
1525 // Insert new entries into the PHI for each predecessor. A single block may
1526 // have multiple entries here.
1527 for (pred_iterator PI = PB; PI != PE; ++PI) {
1528 BasicBlock *P = *PI;
1529 AvailablePredsTy::iterator I =
1530 llvm::lower_bound(AvailablePreds, std::make_pair(P, (Value *)nullptr));
1531
1532 assert(I != AvailablePreds.end() && I->first == P &&
1533 "Didn't find entry for predecessor!");
1534
1535 // If we have an available predecessor but it requires casting, insert the
1536 // cast in the predecessor and use the cast. Note that we have to update the
1537 // AvailablePreds vector as we go so that all of the PHI entries for this
1538 // predecessor use the same bitcast.
1539 Value *&PredV = I->second;
1540 if (PredV->getType() != LoadI->getType())
1541 PredV = CastInst::CreateBitOrPointerCast(PredV, LoadI->getType(), "",
1542 P->getTerminator());
1543
1544 PN->addIncoming(PredV, I->first);
1545 }
1546
1547 for (LoadInst *PredLoadI : CSELoads) {
1548 combineMetadataForCSE(PredLoadI, LoadI, true);
1549 }
1550
1551 LoadI->replaceAllUsesWith(PN);
1552 LoadI->eraseFromParent();
1553
1554 return true;
1555 }
1556
1557 /// findMostPopularDest - The specified list contains multiple possible
1558 /// threadable destinations. Pick the one that occurs the most frequently in
1559 /// the list.
1560 static BasicBlock *
findMostPopularDest(BasicBlock * BB,const SmallVectorImpl<std::pair<BasicBlock *,BasicBlock * >> & PredToDestList)1561 findMostPopularDest(BasicBlock *BB,
1562 const SmallVectorImpl<std::pair<BasicBlock *,
1563 BasicBlock *>> &PredToDestList) {
1564 assert(!PredToDestList.empty());
1565
1566 // Determine popularity. If there are multiple possible destinations, we
1567 // explicitly choose to ignore 'undef' destinations. We prefer to thread
1568 // blocks with known and real destinations to threading undef. We'll handle
1569 // them later if interesting.
1570 MapVector<BasicBlock *, unsigned> DestPopularity;
1571
1572 // Populate DestPopularity with the successors in the order they appear in the
1573 // successor list. This way, we ensure determinism by iterating it in the
1574 // same order in std::max_element below. We map nullptr to 0 so that we can
1575 // return nullptr when PredToDestList contains nullptr only.
1576 DestPopularity[nullptr] = 0;
1577 for (auto *SuccBB : successors(BB))
1578 DestPopularity[SuccBB] = 0;
1579
1580 for (const auto &PredToDest : PredToDestList)
1581 if (PredToDest.second)
1582 DestPopularity[PredToDest.second]++;
1583
1584 // Find the most popular dest.
1585 auto MostPopular = std::max_element(
1586 DestPopularity.begin(), DestPopularity.end(), llvm::less_second());
1587
1588 // Okay, we have finally picked the most popular destination.
1589 return MostPopular->first;
1590 }
1591
1592 // Try to evaluate the value of V when the control flows from PredPredBB to
1593 // BB->getSinglePredecessor() and then on to BB.
evaluateOnPredecessorEdge(BasicBlock * BB,BasicBlock * PredPredBB,Value * V)1594 Constant *JumpThreadingPass::evaluateOnPredecessorEdge(BasicBlock *BB,
1595 BasicBlock *PredPredBB,
1596 Value *V) {
1597 BasicBlock *PredBB = BB->getSinglePredecessor();
1598 assert(PredBB && "Expected a single predecessor");
1599
1600 if (Constant *Cst = dyn_cast<Constant>(V)) {
1601 return Cst;
1602 }
1603
1604 // Consult LVI if V is not an instruction in BB or PredBB.
1605 Instruction *I = dyn_cast<Instruction>(V);
1606 if (!I || (I->getParent() != BB && I->getParent() != PredBB)) {
1607 return LVI->getConstantOnEdge(V, PredPredBB, PredBB, nullptr);
1608 }
1609
1610 // Look into a PHI argument.
1611 if (PHINode *PHI = dyn_cast<PHINode>(V)) {
1612 if (PHI->getParent() == PredBB)
1613 return dyn_cast<Constant>(PHI->getIncomingValueForBlock(PredPredBB));
1614 return nullptr;
1615 }
1616
1617 // If we have a CmpInst, try to fold it for each incoming edge into PredBB.
1618 if (CmpInst *CondCmp = dyn_cast<CmpInst>(V)) {
1619 if (CondCmp->getParent() == BB) {
1620 Constant *Op0 =
1621 evaluateOnPredecessorEdge(BB, PredPredBB, CondCmp->getOperand(0));
1622 Constant *Op1 =
1623 evaluateOnPredecessorEdge(BB, PredPredBB, CondCmp->getOperand(1));
1624 if (Op0 && Op1) {
1625 return ConstantExpr::getCompare(CondCmp->getPredicate(), Op0, Op1);
1626 }
1627 }
1628 return nullptr;
1629 }
1630
1631 return nullptr;
1632 }
1633
processThreadableEdges(Value * Cond,BasicBlock * BB,ConstantPreference Preference,Instruction * CxtI)1634 bool JumpThreadingPass::processThreadableEdges(Value *Cond, BasicBlock *BB,
1635 ConstantPreference Preference,
1636 Instruction *CxtI) {
1637 // If threading this would thread across a loop header, don't even try to
1638 // thread the edge.
1639 if (LoopHeaders.count(BB))
1640 return false;
1641
1642 PredValueInfoTy PredValues;
1643 if (!computeValueKnownInPredecessors(Cond, BB, PredValues, Preference,
1644 CxtI)) {
1645 // We don't have known values in predecessors. See if we can thread through
1646 // BB and its sole predecessor.
1647 return maybethreadThroughTwoBasicBlocks(BB, Cond);
1648 }
1649
1650 assert(!PredValues.empty() &&
1651 "computeValueKnownInPredecessors returned true with no values");
1652
1653 LLVM_DEBUG(dbgs() << "IN BB: " << *BB;
1654 for (const auto &PredValue : PredValues) {
1655 dbgs() << " BB '" << BB->getName()
1656 << "': FOUND condition = " << *PredValue.first
1657 << " for pred '" << PredValue.second->getName() << "'.\n";
1658 });
1659
1660 // Decide what we want to thread through. Convert our list of known values to
1661 // a list of known destinations for each pred. This also discards duplicate
1662 // predecessors and keeps track of the undefined inputs (which are represented
1663 // as a null dest in the PredToDestList).
1664 SmallPtrSet<BasicBlock*, 16> SeenPreds;
1665 SmallVector<std::pair<BasicBlock*, BasicBlock*>, 16> PredToDestList;
1666
1667 BasicBlock *OnlyDest = nullptr;
1668 BasicBlock *MultipleDestSentinel = (BasicBlock*)(intptr_t)~0ULL;
1669 Constant *OnlyVal = nullptr;
1670 Constant *MultipleVal = (Constant *)(intptr_t)~0ULL;
1671
1672 for (const auto &PredValue : PredValues) {
1673 BasicBlock *Pred = PredValue.second;
1674 if (!SeenPreds.insert(Pred).second)
1675 continue; // Duplicate predecessor entry.
1676
1677 Constant *Val = PredValue.first;
1678
1679 BasicBlock *DestBB;
1680 if (isa<UndefValue>(Val))
1681 DestBB = nullptr;
1682 else if (BranchInst *BI = dyn_cast<BranchInst>(BB->getTerminator())) {
1683 assert(isa<ConstantInt>(Val) && "Expecting a constant integer");
1684 DestBB = BI->getSuccessor(cast<ConstantInt>(Val)->isZero());
1685 } else if (SwitchInst *SI = dyn_cast<SwitchInst>(BB->getTerminator())) {
1686 assert(isa<ConstantInt>(Val) && "Expecting a constant integer");
1687 DestBB = SI->findCaseValue(cast<ConstantInt>(Val))->getCaseSuccessor();
1688 } else {
1689 assert(isa<IndirectBrInst>(BB->getTerminator())
1690 && "Unexpected terminator");
1691 assert(isa<BlockAddress>(Val) && "Expecting a constant blockaddress");
1692 DestBB = cast<BlockAddress>(Val)->getBasicBlock();
1693 }
1694
1695 // If we have exactly one destination, remember it for efficiency below.
1696 if (PredToDestList.empty()) {
1697 OnlyDest = DestBB;
1698 OnlyVal = Val;
1699 } else {
1700 if (OnlyDest != DestBB)
1701 OnlyDest = MultipleDestSentinel;
1702 // It possible we have same destination, but different value, e.g. default
1703 // case in switchinst.
1704 if (Val != OnlyVal)
1705 OnlyVal = MultipleVal;
1706 }
1707
1708 // If the predecessor ends with an indirect goto, we can't change its
1709 // destination.
1710 if (isa<IndirectBrInst>(Pred->getTerminator()))
1711 continue;
1712
1713 PredToDestList.emplace_back(Pred, DestBB);
1714 }
1715
1716 // If all edges were unthreadable, we fail.
1717 if (PredToDestList.empty())
1718 return false;
1719
1720 // If all the predecessors go to a single known successor, we want to fold,
1721 // not thread. By doing so, we do not need to duplicate the current block and
1722 // also miss potential opportunities in case we dont/cant duplicate.
1723 if (OnlyDest && OnlyDest != MultipleDestSentinel) {
1724 if (BB->hasNPredecessors(PredToDestList.size())) {
1725 bool SeenFirstBranchToOnlyDest = false;
1726 std::vector <DominatorTree::UpdateType> Updates;
1727 Updates.reserve(BB->getTerminator()->getNumSuccessors() - 1);
1728 for (BasicBlock *SuccBB : successors(BB)) {
1729 if (SuccBB == OnlyDest && !SeenFirstBranchToOnlyDest) {
1730 SeenFirstBranchToOnlyDest = true; // Don't modify the first branch.
1731 } else {
1732 SuccBB->removePredecessor(BB, true); // This is unreachable successor.
1733 Updates.push_back({DominatorTree::Delete, BB, SuccBB});
1734 }
1735 }
1736
1737 // Finally update the terminator.
1738 Instruction *Term = BB->getTerminator();
1739 BranchInst::Create(OnlyDest, Term);
1740 ++NumFolds;
1741 Term->eraseFromParent();
1742 DTU->applyUpdatesPermissive(Updates);
1743 if (HasProfileData)
1744 BPI->eraseBlock(BB);
1745
1746 // If the condition is now dead due to the removal of the old terminator,
1747 // erase it.
1748 if (auto *CondInst = dyn_cast<Instruction>(Cond)) {
1749 if (CondInst->use_empty() && !CondInst->mayHaveSideEffects())
1750 CondInst->eraseFromParent();
1751 // We can safely replace *some* uses of the CondInst if it has
1752 // exactly one value as returned by LVI. RAUW is incorrect in the
1753 // presence of guards and assumes, that have the `Cond` as the use. This
1754 // is because we use the guards/assume to reason about the `Cond` value
1755 // at the end of block, but RAUW unconditionally replaces all uses
1756 // including the guards/assumes themselves and the uses before the
1757 // guard/assume.
1758 else if (OnlyVal && OnlyVal != MultipleVal)
1759 replaceFoldableUses(CondInst, OnlyVal, BB);
1760 }
1761 return true;
1762 }
1763 }
1764
1765 // Determine which is the most common successor. If we have many inputs and
1766 // this block is a switch, we want to start by threading the batch that goes
1767 // to the most popular destination first. If we only know about one
1768 // threadable destination (the common case) we can avoid this.
1769 BasicBlock *MostPopularDest = OnlyDest;
1770
1771 if (MostPopularDest == MultipleDestSentinel) {
1772 // Remove any loop headers from the Dest list, threadEdge conservatively
1773 // won't process them, but we might have other destination that are eligible
1774 // and we still want to process.
1775 erase_if(PredToDestList,
1776 [&](const std::pair<BasicBlock *, BasicBlock *> &PredToDest) {
1777 return LoopHeaders.contains(PredToDest.second);
1778 });
1779
1780 if (PredToDestList.empty())
1781 return false;
1782
1783 MostPopularDest = findMostPopularDest(BB, PredToDestList);
1784 }
1785
1786 // Now that we know what the most popular destination is, factor all
1787 // predecessors that will jump to it into a single predecessor.
1788 SmallVector<BasicBlock*, 16> PredsToFactor;
1789 for (const auto &PredToDest : PredToDestList)
1790 if (PredToDest.second == MostPopularDest) {
1791 BasicBlock *Pred = PredToDest.first;
1792
1793 // This predecessor may be a switch or something else that has multiple
1794 // edges to the block. Factor each of these edges by listing them
1795 // according to # occurrences in PredsToFactor.
1796 for (BasicBlock *Succ : successors(Pred))
1797 if (Succ == BB)
1798 PredsToFactor.push_back(Pred);
1799 }
1800
1801 // If the threadable edges are branching on an undefined value, we get to pick
1802 // the destination that these predecessors should get to.
1803 if (!MostPopularDest)
1804 MostPopularDest = BB->getTerminator()->
1805 getSuccessor(getBestDestForJumpOnUndef(BB));
1806
1807 // Ok, try to thread it!
1808 return tryThreadEdge(BB, PredsToFactor, MostPopularDest);
1809 }
1810
1811 /// processBranchOnPHI - We have an otherwise unthreadable conditional branch on
1812 /// a PHI node (or freeze PHI) in the current block. See if there are any
1813 /// simplifications we can do based on inputs to the phi node.
processBranchOnPHI(PHINode * PN)1814 bool JumpThreadingPass::processBranchOnPHI(PHINode *PN) {
1815 BasicBlock *BB = PN->getParent();
1816
1817 // TODO: We could make use of this to do it once for blocks with common PHI
1818 // values.
1819 SmallVector<BasicBlock*, 1> PredBBs;
1820 PredBBs.resize(1);
1821
1822 // If any of the predecessor blocks end in an unconditional branch, we can
1823 // *duplicate* the conditional branch into that block in order to further
1824 // encourage jump threading and to eliminate cases where we have branch on a
1825 // phi of an icmp (branch on icmp is much better).
1826 // This is still beneficial when a frozen phi is used as the branch condition
1827 // because it allows CodeGenPrepare to further canonicalize br(freeze(icmp))
1828 // to br(icmp(freeze ...)).
1829 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
1830 BasicBlock *PredBB = PN->getIncomingBlock(i);
1831 if (BranchInst *PredBr = dyn_cast<BranchInst>(PredBB->getTerminator()))
1832 if (PredBr->isUnconditional()) {
1833 PredBBs[0] = PredBB;
1834 // Try to duplicate BB into PredBB.
1835 if (duplicateCondBranchOnPHIIntoPred(BB, PredBBs))
1836 return true;
1837 }
1838 }
1839
1840 return false;
1841 }
1842
1843 /// processBranchOnXOR - We have an otherwise unthreadable conditional branch on
1844 /// a xor instruction in the current block. See if there are any
1845 /// simplifications we can do based on inputs to the xor.
processBranchOnXOR(BinaryOperator * BO)1846 bool JumpThreadingPass::processBranchOnXOR(BinaryOperator *BO) {
1847 BasicBlock *BB = BO->getParent();
1848
1849 // If either the LHS or RHS of the xor is a constant, don't do this
1850 // optimization.
1851 if (isa<ConstantInt>(BO->getOperand(0)) ||
1852 isa<ConstantInt>(BO->getOperand(1)))
1853 return false;
1854
1855 // If the first instruction in BB isn't a phi, we won't be able to infer
1856 // anything special about any particular predecessor.
1857 if (!isa<PHINode>(BB->front()))
1858 return false;
1859
1860 // If this BB is a landing pad, we won't be able to split the edge into it.
1861 if (BB->isEHPad())
1862 return false;
1863
1864 // If we have a xor as the branch input to this block, and we know that the
1865 // LHS or RHS of the xor in any predecessor is true/false, then we can clone
1866 // the condition into the predecessor and fix that value to true, saving some
1867 // logical ops on that path and encouraging other paths to simplify.
1868 //
1869 // This copies something like this:
1870 //
1871 // BB:
1872 // %X = phi i1 [1], [%X']
1873 // %Y = icmp eq i32 %A, %B
1874 // %Z = xor i1 %X, %Y
1875 // br i1 %Z, ...
1876 //
1877 // Into:
1878 // BB':
1879 // %Y = icmp ne i32 %A, %B
1880 // br i1 %Y, ...
1881
1882 PredValueInfoTy XorOpValues;
1883 bool isLHS = true;
1884 if (!computeValueKnownInPredecessors(BO->getOperand(0), BB, XorOpValues,
1885 WantInteger, BO)) {
1886 assert(XorOpValues.empty());
1887 if (!computeValueKnownInPredecessors(BO->getOperand(1), BB, XorOpValues,
1888 WantInteger, BO))
1889 return false;
1890 isLHS = false;
1891 }
1892
1893 assert(!XorOpValues.empty() &&
1894 "computeValueKnownInPredecessors returned true with no values");
1895
1896 // Scan the information to see which is most popular: true or false. The
1897 // predecessors can be of the set true, false, or undef.
1898 unsigned NumTrue = 0, NumFalse = 0;
1899 for (const auto &XorOpValue : XorOpValues) {
1900 if (isa<UndefValue>(XorOpValue.first))
1901 // Ignore undefs for the count.
1902 continue;
1903 if (cast<ConstantInt>(XorOpValue.first)->isZero())
1904 ++NumFalse;
1905 else
1906 ++NumTrue;
1907 }
1908
1909 // Determine which value to split on, true, false, or undef if neither.
1910 ConstantInt *SplitVal = nullptr;
1911 if (NumTrue > NumFalse)
1912 SplitVal = ConstantInt::getTrue(BB->getContext());
1913 else if (NumTrue != 0 || NumFalse != 0)
1914 SplitVal = ConstantInt::getFalse(BB->getContext());
1915
1916 // Collect all of the blocks that this can be folded into so that we can
1917 // factor this once and clone it once.
1918 SmallVector<BasicBlock*, 8> BlocksToFoldInto;
1919 for (const auto &XorOpValue : XorOpValues) {
1920 if (XorOpValue.first != SplitVal && !isa<UndefValue>(XorOpValue.first))
1921 continue;
1922
1923 BlocksToFoldInto.push_back(XorOpValue.second);
1924 }
1925
1926 // If we inferred a value for all of the predecessors, then duplication won't
1927 // help us. However, we can just replace the LHS or RHS with the constant.
1928 if (BlocksToFoldInto.size() ==
1929 cast<PHINode>(BB->front()).getNumIncomingValues()) {
1930 if (!SplitVal) {
1931 // If all preds provide undef, just nuke the xor, because it is undef too.
1932 BO->replaceAllUsesWith(UndefValue::get(BO->getType()));
1933 BO->eraseFromParent();
1934 } else if (SplitVal->isZero() && BO != BO->getOperand(isLHS)) {
1935 // If all preds provide 0, replace the xor with the other input.
1936 BO->replaceAllUsesWith(BO->getOperand(isLHS));
1937 BO->eraseFromParent();
1938 } else {
1939 // If all preds provide 1, set the computed value to 1.
1940 BO->setOperand(!isLHS, SplitVal);
1941 }
1942
1943 return true;
1944 }
1945
1946 // If any of predecessors end with an indirect goto, we can't change its
1947 // destination.
1948 if (any_of(BlocksToFoldInto, [](BasicBlock *Pred) {
1949 return isa<IndirectBrInst>(Pred->getTerminator());
1950 }))
1951 return false;
1952
1953 // Try to duplicate BB into PredBB.
1954 return duplicateCondBranchOnPHIIntoPred(BB, BlocksToFoldInto);
1955 }
1956
1957 /// addPHINodeEntriesForMappedBlock - We're adding 'NewPred' as a new
1958 /// predecessor to the PHIBB block. If it has PHI nodes, add entries for
1959 /// NewPred using the entries from OldPred (suitably mapped).
addPHINodeEntriesForMappedBlock(BasicBlock * PHIBB,BasicBlock * OldPred,BasicBlock * NewPred,DenseMap<Instruction *,Value * > & ValueMap)1960 static void addPHINodeEntriesForMappedBlock(BasicBlock *PHIBB,
1961 BasicBlock *OldPred,
1962 BasicBlock *NewPred,
1963 DenseMap<Instruction*, Value*> &ValueMap) {
1964 for (PHINode &PN : PHIBB->phis()) {
1965 // Ok, we have a PHI node. Figure out what the incoming value was for the
1966 // DestBlock.
1967 Value *IV = PN.getIncomingValueForBlock(OldPred);
1968
1969 // Remap the value if necessary.
1970 if (Instruction *Inst = dyn_cast<Instruction>(IV)) {
1971 DenseMap<Instruction*, Value*>::iterator I = ValueMap.find(Inst);
1972 if (I != ValueMap.end())
1973 IV = I->second;
1974 }
1975
1976 PN.addIncoming(IV, NewPred);
1977 }
1978 }
1979
1980 /// Merge basic block BB into its sole predecessor if possible.
maybeMergeBasicBlockIntoOnlyPred(BasicBlock * BB)1981 bool JumpThreadingPass::maybeMergeBasicBlockIntoOnlyPred(BasicBlock *BB) {
1982 BasicBlock *SinglePred = BB->getSinglePredecessor();
1983 if (!SinglePred)
1984 return false;
1985
1986 const Instruction *TI = SinglePred->getTerminator();
1987 if (TI->isExceptionalTerminator() || TI->getNumSuccessors() != 1 ||
1988 SinglePred == BB || hasAddressTakenAndUsed(BB))
1989 return false;
1990
1991 // If SinglePred was a loop header, BB becomes one.
1992 if (LoopHeaders.erase(SinglePred))
1993 LoopHeaders.insert(BB);
1994
1995 LVI->eraseBlock(SinglePred);
1996 MergeBasicBlockIntoOnlyPred(BB, DTU);
1997
1998 // Now that BB is merged into SinglePred (i.e. SinglePred code followed by
1999 // BB code within one basic block `BB`), we need to invalidate the LVI
2000 // information associated with BB, because the LVI information need not be
2001 // true for all of BB after the merge. For example,
2002 // Before the merge, LVI info and code is as follows:
2003 // SinglePred: <LVI info1 for %p val>
2004 // %y = use of %p
2005 // call @exit() // need not transfer execution to successor.
2006 // assume(%p) // from this point on %p is true
2007 // br label %BB
2008 // BB: <LVI info2 for %p val, i.e. %p is true>
2009 // %x = use of %p
2010 // br label exit
2011 //
2012 // Note that this LVI info for blocks BB and SinglPred is correct for %p
2013 // (info2 and info1 respectively). After the merge and the deletion of the
2014 // LVI info1 for SinglePred. We have the following code:
2015 // BB: <LVI info2 for %p val>
2016 // %y = use of %p
2017 // call @exit()
2018 // assume(%p)
2019 // %x = use of %p <-- LVI info2 is correct from here onwards.
2020 // br label exit
2021 // LVI info2 for BB is incorrect at the beginning of BB.
2022
2023 // Invalidate LVI information for BB if the LVI is not provably true for
2024 // all of BB.
2025 if (!isGuaranteedToTransferExecutionToSuccessor(BB))
2026 LVI->eraseBlock(BB);
2027 return true;
2028 }
2029
2030 /// Update the SSA form. NewBB contains instructions that are copied from BB.
2031 /// ValueMapping maps old values in BB to new ones in NewBB.
updateSSA(BasicBlock * BB,BasicBlock * NewBB,DenseMap<Instruction *,Value * > & ValueMapping)2032 void JumpThreadingPass::updateSSA(
2033 BasicBlock *BB, BasicBlock *NewBB,
2034 DenseMap<Instruction *, Value *> &ValueMapping) {
2035 // If there were values defined in BB that are used outside the block, then we
2036 // now have to update all uses of the value to use either the original value,
2037 // the cloned value, or some PHI derived value. This can require arbitrary
2038 // PHI insertion, of which we are prepared to do, clean these up now.
2039 SSAUpdater SSAUpdate;
2040 SmallVector<Use *, 16> UsesToRename;
2041
2042 for (Instruction &I : *BB) {
2043 // Scan all uses of this instruction to see if it is used outside of its
2044 // block, and if so, record them in UsesToRename.
2045 for (Use &U : I.uses()) {
2046 Instruction *User = cast<Instruction>(U.getUser());
2047 if (PHINode *UserPN = dyn_cast<PHINode>(User)) {
2048 if (UserPN->getIncomingBlock(U) == BB)
2049 continue;
2050 } else if (User->getParent() == BB)
2051 continue;
2052
2053 UsesToRename.push_back(&U);
2054 }
2055
2056 // If there are no uses outside the block, we're done with this instruction.
2057 if (UsesToRename.empty())
2058 continue;
2059 LLVM_DEBUG(dbgs() << "JT: Renaming non-local uses of: " << I << "\n");
2060
2061 // We found a use of I outside of BB. Rename all uses of I that are outside
2062 // its block to be uses of the appropriate PHI node etc. See ValuesInBlocks
2063 // with the two values we know.
2064 SSAUpdate.Initialize(I.getType(), I.getName());
2065 SSAUpdate.AddAvailableValue(BB, &I);
2066 SSAUpdate.AddAvailableValue(NewBB, ValueMapping[&I]);
2067
2068 while (!UsesToRename.empty())
2069 SSAUpdate.RewriteUse(*UsesToRename.pop_back_val());
2070 LLVM_DEBUG(dbgs() << "\n");
2071 }
2072 }
2073
2074 /// Clone instructions in range [BI, BE) to NewBB. For PHI nodes, we only clone
2075 /// arguments that come from PredBB. Return the map from the variables in the
2076 /// source basic block to the variables in the newly created basic block.
2077 DenseMap<Instruction *, Value *>
cloneInstructions(BasicBlock::iterator BI,BasicBlock::iterator BE,BasicBlock * NewBB,BasicBlock * PredBB)2078 JumpThreadingPass::cloneInstructions(BasicBlock::iterator BI,
2079 BasicBlock::iterator BE, BasicBlock *NewBB,
2080 BasicBlock *PredBB) {
2081 // We are going to have to map operands from the source basic block to the new
2082 // copy of the block 'NewBB'. If there are PHI nodes in the source basic
2083 // block, evaluate them to account for entry from PredBB.
2084 DenseMap<Instruction *, Value *> ValueMapping;
2085
2086 // Retargets llvm.dbg.value to any renamed variables.
2087 auto RetargetDbgValueIfPossible = [&](Instruction *NewInst) -> bool {
2088 auto DbgInstruction = dyn_cast<DbgValueInst>(NewInst);
2089 if (!DbgInstruction)
2090 return false;
2091
2092 SmallSet<std::pair<Value *, Value *>, 16> OperandsToRemap;
2093 for (auto DbgOperand : DbgInstruction->location_ops()) {
2094 auto DbgOperandInstruction = dyn_cast<Instruction>(DbgOperand);
2095 if (!DbgOperandInstruction)
2096 continue;
2097
2098 auto I = ValueMapping.find(DbgOperandInstruction);
2099 if (I != ValueMapping.end()) {
2100 OperandsToRemap.insert(
2101 std::pair<Value *, Value *>(DbgOperand, I->second));
2102 }
2103 }
2104
2105 for (auto &[OldOp, MappedOp] : OperandsToRemap)
2106 DbgInstruction->replaceVariableLocationOp(OldOp, MappedOp);
2107 return true;
2108 };
2109
2110 // Clone the phi nodes of the source basic block into NewBB. The resulting
2111 // phi nodes are trivial since NewBB only has one predecessor, but SSAUpdater
2112 // might need to rewrite the operand of the cloned phi.
2113 for (; PHINode *PN = dyn_cast<PHINode>(BI); ++BI) {
2114 PHINode *NewPN = PHINode::Create(PN->getType(), 1, PN->getName(), NewBB);
2115 NewPN->addIncoming(PN->getIncomingValueForBlock(PredBB), PredBB);
2116 ValueMapping[PN] = NewPN;
2117 }
2118
2119 // Clone noalias scope declarations in the threaded block. When threading a
2120 // loop exit, we would otherwise end up with two idential scope declarations
2121 // visible at the same time.
2122 SmallVector<MDNode *> NoAliasScopes;
2123 DenseMap<MDNode *, MDNode *> ClonedScopes;
2124 LLVMContext &Context = PredBB->getContext();
2125 identifyNoAliasScopesToClone(BI, BE, NoAliasScopes);
2126 cloneNoAliasScopes(NoAliasScopes, ClonedScopes, "thread", Context);
2127
2128 // Clone the non-phi instructions of the source basic block into NewBB,
2129 // keeping track of the mapping and using it to remap operands in the cloned
2130 // instructions.
2131 for (; BI != BE; ++BI) {
2132 Instruction *New = BI->clone();
2133 New->setName(BI->getName());
2134 New->insertInto(NewBB, NewBB->end());
2135 ValueMapping[&*BI] = New;
2136 adaptNoAliasScopes(New, ClonedScopes, Context);
2137
2138 if (RetargetDbgValueIfPossible(New))
2139 continue;
2140
2141 // Remap operands to patch up intra-block references.
2142 for (unsigned i = 0, e = New->getNumOperands(); i != e; ++i)
2143 if (Instruction *Inst = dyn_cast<Instruction>(New->getOperand(i))) {
2144 DenseMap<Instruction *, Value *>::iterator I = ValueMapping.find(Inst);
2145 if (I != ValueMapping.end())
2146 New->setOperand(i, I->second);
2147 }
2148 }
2149
2150 return ValueMapping;
2151 }
2152
2153 /// Attempt to thread through two successive basic blocks.
maybethreadThroughTwoBasicBlocks(BasicBlock * BB,Value * Cond)2154 bool JumpThreadingPass::maybethreadThroughTwoBasicBlocks(BasicBlock *BB,
2155 Value *Cond) {
2156 // Consider:
2157 //
2158 // PredBB:
2159 // %var = phi i32* [ null, %bb1 ], [ @a, %bb2 ]
2160 // %tobool = icmp eq i32 %cond, 0
2161 // br i1 %tobool, label %BB, label ...
2162 //
2163 // BB:
2164 // %cmp = icmp eq i32* %var, null
2165 // br i1 %cmp, label ..., label ...
2166 //
2167 // We don't know the value of %var at BB even if we know which incoming edge
2168 // we take to BB. However, once we duplicate PredBB for each of its incoming
2169 // edges (say, PredBB1 and PredBB2), we know the value of %var in each copy of
2170 // PredBB. Then we can thread edges PredBB1->BB and PredBB2->BB through BB.
2171
2172 // Require that BB end with a Branch for simplicity.
2173 BranchInst *CondBr = dyn_cast<BranchInst>(BB->getTerminator());
2174 if (!CondBr)
2175 return false;
2176
2177 // BB must have exactly one predecessor.
2178 BasicBlock *PredBB = BB->getSinglePredecessor();
2179 if (!PredBB)
2180 return false;
2181
2182 // Require that PredBB end with a conditional Branch. If PredBB ends with an
2183 // unconditional branch, we should be merging PredBB and BB instead. For
2184 // simplicity, we don't deal with a switch.
2185 BranchInst *PredBBBranch = dyn_cast<BranchInst>(PredBB->getTerminator());
2186 if (!PredBBBranch || PredBBBranch->isUnconditional())
2187 return false;
2188
2189 // If PredBB has exactly one incoming edge, we don't gain anything by copying
2190 // PredBB.
2191 if (PredBB->getSinglePredecessor())
2192 return false;
2193
2194 // Don't thread through PredBB if it contains a successor edge to itself, in
2195 // which case we would infinite loop. Suppose we are threading an edge from
2196 // PredPredBB through PredBB and BB to SuccBB with PredBB containing a
2197 // successor edge to itself. If we allowed jump threading in this case, we
2198 // could duplicate PredBB and BB as, say, PredBB.thread and BB.thread. Since
2199 // PredBB.thread has a successor edge to PredBB, we would immediately come up
2200 // with another jump threading opportunity from PredBB.thread through PredBB
2201 // and BB to SuccBB. This jump threading would repeatedly occur. That is, we
2202 // would keep peeling one iteration from PredBB.
2203 if (llvm::is_contained(successors(PredBB), PredBB))
2204 return false;
2205
2206 // Don't thread across a loop header.
2207 if (LoopHeaders.count(PredBB))
2208 return false;
2209
2210 // Avoid complication with duplicating EH pads.
2211 if (PredBB->isEHPad())
2212 return false;
2213
2214 // Find a predecessor that we can thread. For simplicity, we only consider a
2215 // successor edge out of BB to which we thread exactly one incoming edge into
2216 // PredBB.
2217 unsigned ZeroCount = 0;
2218 unsigned OneCount = 0;
2219 BasicBlock *ZeroPred = nullptr;
2220 BasicBlock *OnePred = nullptr;
2221 for (BasicBlock *P : predecessors(PredBB)) {
2222 // If PredPred ends with IndirectBrInst, we can't handle it.
2223 if (isa<IndirectBrInst>(P->getTerminator()))
2224 continue;
2225 if (ConstantInt *CI = dyn_cast_or_null<ConstantInt>(
2226 evaluateOnPredecessorEdge(BB, P, Cond))) {
2227 if (CI->isZero()) {
2228 ZeroCount++;
2229 ZeroPred = P;
2230 } else if (CI->isOne()) {
2231 OneCount++;
2232 OnePred = P;
2233 }
2234 }
2235 }
2236
2237 // Disregard complicated cases where we have to thread multiple edges.
2238 BasicBlock *PredPredBB;
2239 if (ZeroCount == 1) {
2240 PredPredBB = ZeroPred;
2241 } else if (OneCount == 1) {
2242 PredPredBB = OnePred;
2243 } else {
2244 return false;
2245 }
2246
2247 BasicBlock *SuccBB = CondBr->getSuccessor(PredPredBB == ZeroPred);
2248
2249 // If threading to the same block as we come from, we would infinite loop.
2250 if (SuccBB == BB) {
2251 LLVM_DEBUG(dbgs() << " Not threading across BB '" << BB->getName()
2252 << "' - would thread to self!\n");
2253 return false;
2254 }
2255
2256 // If threading this would thread across a loop header, don't thread the edge.
2257 // See the comments above findLoopHeaders for justifications and caveats.
2258 if (LoopHeaders.count(BB) || LoopHeaders.count(SuccBB)) {
2259 LLVM_DEBUG({
2260 bool BBIsHeader = LoopHeaders.count(BB);
2261 bool SuccIsHeader = LoopHeaders.count(SuccBB);
2262 dbgs() << " Not threading across "
2263 << (BBIsHeader ? "loop header BB '" : "block BB '")
2264 << BB->getName() << "' to dest "
2265 << (SuccIsHeader ? "loop header BB '" : "block BB '")
2266 << SuccBB->getName()
2267 << "' - it might create an irreducible loop!\n";
2268 });
2269 return false;
2270 }
2271
2272 // Compute the cost of duplicating BB and PredBB.
2273 unsigned BBCost = getJumpThreadDuplicationCost(
2274 TTI, BB, BB->getTerminator(), BBDupThreshold);
2275 unsigned PredBBCost = getJumpThreadDuplicationCost(
2276 TTI, PredBB, PredBB->getTerminator(), BBDupThreshold);
2277
2278 // Give up if costs are too high. We need to check BBCost and PredBBCost
2279 // individually before checking their sum because getJumpThreadDuplicationCost
2280 // return (unsigned)~0 for those basic blocks that cannot be duplicated.
2281 if (BBCost > BBDupThreshold || PredBBCost > BBDupThreshold ||
2282 BBCost + PredBBCost > BBDupThreshold) {
2283 LLVM_DEBUG(dbgs() << " Not threading BB '" << BB->getName()
2284 << "' - Cost is too high: " << PredBBCost
2285 << " for PredBB, " << BBCost << "for BB\n");
2286 return false;
2287 }
2288
2289 // Now we are ready to duplicate PredBB.
2290 threadThroughTwoBasicBlocks(PredPredBB, PredBB, BB, SuccBB);
2291 return true;
2292 }
2293
threadThroughTwoBasicBlocks(BasicBlock * PredPredBB,BasicBlock * PredBB,BasicBlock * BB,BasicBlock * SuccBB)2294 void JumpThreadingPass::threadThroughTwoBasicBlocks(BasicBlock *PredPredBB,
2295 BasicBlock *PredBB,
2296 BasicBlock *BB,
2297 BasicBlock *SuccBB) {
2298 LLVM_DEBUG(dbgs() << " Threading through '" << PredBB->getName() << "' and '"
2299 << BB->getName() << "'\n");
2300
2301 BranchInst *CondBr = cast<BranchInst>(BB->getTerminator());
2302 BranchInst *PredBBBranch = cast<BranchInst>(PredBB->getTerminator());
2303
2304 BasicBlock *NewBB =
2305 BasicBlock::Create(PredBB->getContext(), PredBB->getName() + ".thread",
2306 PredBB->getParent(), PredBB);
2307 NewBB->moveAfter(PredBB);
2308
2309 // Set the block frequency of NewBB.
2310 if (HasProfileData) {
2311 auto NewBBFreq = BFI->getBlockFreq(PredPredBB) *
2312 BPI->getEdgeProbability(PredPredBB, PredBB);
2313 BFI->setBlockFreq(NewBB, NewBBFreq.getFrequency());
2314 }
2315
2316 // We are going to have to map operands from the original BB block to the new
2317 // copy of the block 'NewBB'. If there are PHI nodes in PredBB, evaluate them
2318 // to account for entry from PredPredBB.
2319 DenseMap<Instruction *, Value *> ValueMapping =
2320 cloneInstructions(PredBB->begin(), PredBB->end(), NewBB, PredPredBB);
2321
2322 // Copy the edge probabilities from PredBB to NewBB.
2323 if (HasProfileData)
2324 BPI->copyEdgeProbabilities(PredBB, NewBB);
2325
2326 // Update the terminator of PredPredBB to jump to NewBB instead of PredBB.
2327 // This eliminates predecessors from PredPredBB, which requires us to simplify
2328 // any PHI nodes in PredBB.
2329 Instruction *PredPredTerm = PredPredBB->getTerminator();
2330 for (unsigned i = 0, e = PredPredTerm->getNumSuccessors(); i != e; ++i)
2331 if (PredPredTerm->getSuccessor(i) == PredBB) {
2332 PredBB->removePredecessor(PredPredBB, true);
2333 PredPredTerm->setSuccessor(i, NewBB);
2334 }
2335
2336 addPHINodeEntriesForMappedBlock(PredBBBranch->getSuccessor(0), PredBB, NewBB,
2337 ValueMapping);
2338 addPHINodeEntriesForMappedBlock(PredBBBranch->getSuccessor(1), PredBB, NewBB,
2339 ValueMapping);
2340
2341 DTU->applyUpdatesPermissive(
2342 {{DominatorTree::Insert, NewBB, CondBr->getSuccessor(0)},
2343 {DominatorTree::Insert, NewBB, CondBr->getSuccessor(1)},
2344 {DominatorTree::Insert, PredPredBB, NewBB},
2345 {DominatorTree::Delete, PredPredBB, PredBB}});
2346
2347 updateSSA(PredBB, NewBB, ValueMapping);
2348
2349 // Clean up things like PHI nodes with single operands, dead instructions,
2350 // etc.
2351 SimplifyInstructionsInBlock(NewBB, TLI);
2352 SimplifyInstructionsInBlock(PredBB, TLI);
2353
2354 SmallVector<BasicBlock *, 1> PredsToFactor;
2355 PredsToFactor.push_back(NewBB);
2356 threadEdge(BB, PredsToFactor, SuccBB);
2357 }
2358
2359 /// tryThreadEdge - Thread an edge if it's safe and profitable to do so.
tryThreadEdge(BasicBlock * BB,const SmallVectorImpl<BasicBlock * > & PredBBs,BasicBlock * SuccBB)2360 bool JumpThreadingPass::tryThreadEdge(
2361 BasicBlock *BB, const SmallVectorImpl<BasicBlock *> &PredBBs,
2362 BasicBlock *SuccBB) {
2363 // If threading to the same block as we come from, we would infinite loop.
2364 if (SuccBB == BB) {
2365 LLVM_DEBUG(dbgs() << " Not threading across BB '" << BB->getName()
2366 << "' - would thread to self!\n");
2367 return false;
2368 }
2369
2370 // If threading this would thread across a loop header, don't thread the edge.
2371 // See the comments above findLoopHeaders for justifications and caveats.
2372 if (LoopHeaders.count(BB) || LoopHeaders.count(SuccBB)) {
2373 LLVM_DEBUG({
2374 bool BBIsHeader = LoopHeaders.count(BB);
2375 bool SuccIsHeader = LoopHeaders.count(SuccBB);
2376 dbgs() << " Not threading across "
2377 << (BBIsHeader ? "loop header BB '" : "block BB '") << BB->getName()
2378 << "' to dest " << (SuccIsHeader ? "loop header BB '" : "block BB '")
2379 << SuccBB->getName() << "' - it might create an irreducible loop!\n";
2380 });
2381 return false;
2382 }
2383
2384 unsigned JumpThreadCost = getJumpThreadDuplicationCost(
2385 TTI, BB, BB->getTerminator(), BBDupThreshold);
2386 if (JumpThreadCost > BBDupThreshold) {
2387 LLVM_DEBUG(dbgs() << " Not threading BB '" << BB->getName()
2388 << "' - Cost is too high: " << JumpThreadCost << "\n");
2389 return false;
2390 }
2391
2392 threadEdge(BB, PredBBs, SuccBB);
2393 return true;
2394 }
2395
2396 /// threadEdge - We have decided that it is safe and profitable to factor the
2397 /// blocks in PredBBs to one predecessor, then thread an edge from it to SuccBB
2398 /// across BB. Transform the IR to reflect this change.
threadEdge(BasicBlock * BB,const SmallVectorImpl<BasicBlock * > & PredBBs,BasicBlock * SuccBB)2399 void JumpThreadingPass::threadEdge(BasicBlock *BB,
2400 const SmallVectorImpl<BasicBlock *> &PredBBs,
2401 BasicBlock *SuccBB) {
2402 assert(SuccBB != BB && "Don't create an infinite loop");
2403
2404 assert(!LoopHeaders.count(BB) && !LoopHeaders.count(SuccBB) &&
2405 "Don't thread across loop headers");
2406
2407 // And finally, do it! Start by factoring the predecessors if needed.
2408 BasicBlock *PredBB;
2409 if (PredBBs.size() == 1)
2410 PredBB = PredBBs[0];
2411 else {
2412 LLVM_DEBUG(dbgs() << " Factoring out " << PredBBs.size()
2413 << " common predecessors.\n");
2414 PredBB = splitBlockPreds(BB, PredBBs, ".thr_comm");
2415 }
2416
2417 // And finally, do it!
2418 LLVM_DEBUG(dbgs() << " Threading edge from '" << PredBB->getName()
2419 << "' to '" << SuccBB->getName()
2420 << ", across block:\n " << *BB << "\n");
2421
2422 LVI->threadEdge(PredBB, BB, SuccBB);
2423
2424 BasicBlock *NewBB = BasicBlock::Create(BB->getContext(),
2425 BB->getName()+".thread",
2426 BB->getParent(), BB);
2427 NewBB->moveAfter(PredBB);
2428
2429 // Set the block frequency of NewBB.
2430 if (HasProfileData) {
2431 auto NewBBFreq =
2432 BFI->getBlockFreq(PredBB) * BPI->getEdgeProbability(PredBB, BB);
2433 BFI->setBlockFreq(NewBB, NewBBFreq.getFrequency());
2434 }
2435
2436 // Copy all the instructions from BB to NewBB except the terminator.
2437 DenseMap<Instruction *, Value *> ValueMapping =
2438 cloneInstructions(BB->begin(), std::prev(BB->end()), NewBB, PredBB);
2439
2440 // We didn't copy the terminator from BB over to NewBB, because there is now
2441 // an unconditional jump to SuccBB. Insert the unconditional jump.
2442 BranchInst *NewBI = BranchInst::Create(SuccBB, NewBB);
2443 NewBI->setDebugLoc(BB->getTerminator()->getDebugLoc());
2444
2445 // Check to see if SuccBB has PHI nodes. If so, we need to add entries to the
2446 // PHI nodes for NewBB now.
2447 addPHINodeEntriesForMappedBlock(SuccBB, BB, NewBB, ValueMapping);
2448
2449 // Update the terminator of PredBB to jump to NewBB instead of BB. This
2450 // eliminates predecessors from BB, which requires us to simplify any PHI
2451 // nodes in BB.
2452 Instruction *PredTerm = PredBB->getTerminator();
2453 for (unsigned i = 0, e = PredTerm->getNumSuccessors(); i != e; ++i)
2454 if (PredTerm->getSuccessor(i) == BB) {
2455 BB->removePredecessor(PredBB, true);
2456 PredTerm->setSuccessor(i, NewBB);
2457 }
2458
2459 // Enqueue required DT updates.
2460 DTU->applyUpdatesPermissive({{DominatorTree::Insert, NewBB, SuccBB},
2461 {DominatorTree::Insert, PredBB, NewBB},
2462 {DominatorTree::Delete, PredBB, BB}});
2463
2464 updateSSA(BB, NewBB, ValueMapping);
2465
2466 // At this point, the IR is fully up to date and consistent. Do a quick scan
2467 // over the new instructions and zap any that are constants or dead. This
2468 // frequently happens because of phi translation.
2469 SimplifyInstructionsInBlock(NewBB, TLI);
2470
2471 // Update the edge weight from BB to SuccBB, which should be less than before.
2472 updateBlockFreqAndEdgeWeight(PredBB, BB, NewBB, SuccBB);
2473
2474 // Threaded an edge!
2475 ++NumThreads;
2476 }
2477
2478 /// Create a new basic block that will be the predecessor of BB and successor of
2479 /// all blocks in Preds. When profile data is available, update the frequency of
2480 /// this new block.
splitBlockPreds(BasicBlock * BB,ArrayRef<BasicBlock * > Preds,const char * Suffix)2481 BasicBlock *JumpThreadingPass::splitBlockPreds(BasicBlock *BB,
2482 ArrayRef<BasicBlock *> Preds,
2483 const char *Suffix) {
2484 SmallVector<BasicBlock *, 2> NewBBs;
2485
2486 // Collect the frequencies of all predecessors of BB, which will be used to
2487 // update the edge weight of the result of splitting predecessors.
2488 DenseMap<BasicBlock *, BlockFrequency> FreqMap;
2489 if (HasProfileData)
2490 for (auto *Pred : Preds)
2491 FreqMap.insert(std::make_pair(
2492 Pred, BFI->getBlockFreq(Pred) * BPI->getEdgeProbability(Pred, BB)));
2493
2494 // In the case when BB is a LandingPad block we create 2 new predecessors
2495 // instead of just one.
2496 if (BB->isLandingPad()) {
2497 std::string NewName = std::string(Suffix) + ".split-lp";
2498 SplitLandingPadPredecessors(BB, Preds, Suffix, NewName.c_str(), NewBBs);
2499 } else {
2500 NewBBs.push_back(SplitBlockPredecessors(BB, Preds, Suffix));
2501 }
2502
2503 std::vector<DominatorTree::UpdateType> Updates;
2504 Updates.reserve((2 * Preds.size()) + NewBBs.size());
2505 for (auto *NewBB : NewBBs) {
2506 BlockFrequency NewBBFreq(0);
2507 Updates.push_back({DominatorTree::Insert, NewBB, BB});
2508 for (auto *Pred : predecessors(NewBB)) {
2509 Updates.push_back({DominatorTree::Delete, Pred, BB});
2510 Updates.push_back({DominatorTree::Insert, Pred, NewBB});
2511 if (HasProfileData) // Update frequencies between Pred -> NewBB.
2512 NewBBFreq += FreqMap.lookup(Pred);
2513 }
2514 if (HasProfileData) // Apply the summed frequency to NewBB.
2515 BFI->setBlockFreq(NewBB, NewBBFreq.getFrequency());
2516 }
2517
2518 DTU->applyUpdatesPermissive(Updates);
2519 return NewBBs[0];
2520 }
2521
doesBlockHaveProfileData(BasicBlock * BB)2522 bool JumpThreadingPass::doesBlockHaveProfileData(BasicBlock *BB) {
2523 const Instruction *TI = BB->getTerminator();
2524 assert(TI->getNumSuccessors() > 1 && "not a split");
2525 return hasValidBranchWeightMD(*TI);
2526 }
2527
2528 /// Update the block frequency of BB and branch weight and the metadata on the
2529 /// edge BB->SuccBB. This is done by scaling the weight of BB->SuccBB by 1 -
2530 /// Freq(PredBB->BB) / Freq(BB->SuccBB).
updateBlockFreqAndEdgeWeight(BasicBlock * PredBB,BasicBlock * BB,BasicBlock * NewBB,BasicBlock * SuccBB)2531 void JumpThreadingPass::updateBlockFreqAndEdgeWeight(BasicBlock *PredBB,
2532 BasicBlock *BB,
2533 BasicBlock *NewBB,
2534 BasicBlock *SuccBB) {
2535 if (!HasProfileData)
2536 return;
2537
2538 assert(BFI && BPI && "BFI & BPI should have been created here");
2539
2540 // As the edge from PredBB to BB is deleted, we have to update the block
2541 // frequency of BB.
2542 auto BBOrigFreq = BFI->getBlockFreq(BB);
2543 auto NewBBFreq = BFI->getBlockFreq(NewBB);
2544 auto BB2SuccBBFreq = BBOrigFreq * BPI->getEdgeProbability(BB, SuccBB);
2545 auto BBNewFreq = BBOrigFreq - NewBBFreq;
2546 BFI->setBlockFreq(BB, BBNewFreq.getFrequency());
2547
2548 // Collect updated outgoing edges' frequencies from BB and use them to update
2549 // edge probabilities.
2550 SmallVector<uint64_t, 4> BBSuccFreq;
2551 for (BasicBlock *Succ : successors(BB)) {
2552 auto SuccFreq = (Succ == SuccBB)
2553 ? BB2SuccBBFreq - NewBBFreq
2554 : BBOrigFreq * BPI->getEdgeProbability(BB, Succ);
2555 BBSuccFreq.push_back(SuccFreq.getFrequency());
2556 }
2557
2558 uint64_t MaxBBSuccFreq =
2559 *std::max_element(BBSuccFreq.begin(), BBSuccFreq.end());
2560
2561 SmallVector<BranchProbability, 4> BBSuccProbs;
2562 if (MaxBBSuccFreq == 0)
2563 BBSuccProbs.assign(BBSuccFreq.size(),
2564 {1, static_cast<unsigned>(BBSuccFreq.size())});
2565 else {
2566 for (uint64_t Freq : BBSuccFreq)
2567 BBSuccProbs.push_back(
2568 BranchProbability::getBranchProbability(Freq, MaxBBSuccFreq));
2569 // Normalize edge probabilities so that they sum up to one.
2570 BranchProbability::normalizeProbabilities(BBSuccProbs.begin(),
2571 BBSuccProbs.end());
2572 }
2573
2574 // Update edge probabilities in BPI.
2575 BPI->setEdgeProbability(BB, BBSuccProbs);
2576
2577 // Update the profile metadata as well.
2578 //
2579 // Don't do this if the profile of the transformed blocks was statically
2580 // estimated. (This could occur despite the function having an entry
2581 // frequency in completely cold parts of the CFG.)
2582 //
2583 // In this case we don't want to suggest to subsequent passes that the
2584 // calculated weights are fully consistent. Consider this graph:
2585 //
2586 // check_1
2587 // 50% / |
2588 // eq_1 | 50%
2589 // \ |
2590 // check_2
2591 // 50% / |
2592 // eq_2 | 50%
2593 // \ |
2594 // check_3
2595 // 50% / |
2596 // eq_3 | 50%
2597 // \ |
2598 //
2599 // Assuming the blocks check_* all compare the same value against 1, 2 and 3,
2600 // the overall probabilities are inconsistent; the total probability that the
2601 // value is either 1, 2 or 3 is 150%.
2602 //
2603 // As a consequence if we thread eq_1 -> check_2 to check_3, check_2->check_3
2604 // becomes 0%. This is even worse if the edge whose probability becomes 0% is
2605 // the loop exit edge. Then based solely on static estimation we would assume
2606 // the loop was extremely hot.
2607 //
2608 // FIXME this locally as well so that BPI and BFI are consistent as well. We
2609 // shouldn't make edges extremely likely or unlikely based solely on static
2610 // estimation.
2611 if (BBSuccProbs.size() >= 2 && doesBlockHaveProfileData(BB)) {
2612 SmallVector<uint32_t, 4> Weights;
2613 for (auto Prob : BBSuccProbs)
2614 Weights.push_back(Prob.getNumerator());
2615
2616 auto TI = BB->getTerminator();
2617 TI->setMetadata(
2618 LLVMContext::MD_prof,
2619 MDBuilder(TI->getParent()->getContext()).createBranchWeights(Weights));
2620 }
2621 }
2622
2623 /// duplicateCondBranchOnPHIIntoPred - PredBB contains an unconditional branch
2624 /// to BB which contains an i1 PHI node and a conditional branch on that PHI.
2625 /// If we can duplicate the contents of BB up into PredBB do so now, this
2626 /// improves the odds that the branch will be on an analyzable instruction like
2627 /// a compare.
duplicateCondBranchOnPHIIntoPred(BasicBlock * BB,const SmallVectorImpl<BasicBlock * > & PredBBs)2628 bool JumpThreadingPass::duplicateCondBranchOnPHIIntoPred(
2629 BasicBlock *BB, const SmallVectorImpl<BasicBlock *> &PredBBs) {
2630 assert(!PredBBs.empty() && "Can't handle an empty set");
2631
2632 // If BB is a loop header, then duplicating this block outside the loop would
2633 // cause us to transform this into an irreducible loop, don't do this.
2634 // See the comments above findLoopHeaders for justifications and caveats.
2635 if (LoopHeaders.count(BB)) {
2636 LLVM_DEBUG(dbgs() << " Not duplicating loop header '" << BB->getName()
2637 << "' into predecessor block '" << PredBBs[0]->getName()
2638 << "' - it might create an irreducible loop!\n");
2639 return false;
2640 }
2641
2642 unsigned DuplicationCost = getJumpThreadDuplicationCost(
2643 TTI, BB, BB->getTerminator(), BBDupThreshold);
2644 if (DuplicationCost > BBDupThreshold) {
2645 LLVM_DEBUG(dbgs() << " Not duplicating BB '" << BB->getName()
2646 << "' - Cost is too high: " << DuplicationCost << "\n");
2647 return false;
2648 }
2649
2650 // And finally, do it! Start by factoring the predecessors if needed.
2651 std::vector<DominatorTree::UpdateType> Updates;
2652 BasicBlock *PredBB;
2653 if (PredBBs.size() == 1)
2654 PredBB = PredBBs[0];
2655 else {
2656 LLVM_DEBUG(dbgs() << " Factoring out " << PredBBs.size()
2657 << " common predecessors.\n");
2658 PredBB = splitBlockPreds(BB, PredBBs, ".thr_comm");
2659 }
2660 Updates.push_back({DominatorTree::Delete, PredBB, BB});
2661
2662 // Okay, we decided to do this! Clone all the instructions in BB onto the end
2663 // of PredBB.
2664 LLVM_DEBUG(dbgs() << " Duplicating block '" << BB->getName()
2665 << "' into end of '" << PredBB->getName()
2666 << "' to eliminate branch on phi. Cost: "
2667 << DuplicationCost << " block is:" << *BB << "\n");
2668
2669 // Unless PredBB ends with an unconditional branch, split the edge so that we
2670 // can just clone the bits from BB into the end of the new PredBB.
2671 BranchInst *OldPredBranch = dyn_cast<BranchInst>(PredBB->getTerminator());
2672
2673 if (!OldPredBranch || !OldPredBranch->isUnconditional()) {
2674 BasicBlock *OldPredBB = PredBB;
2675 PredBB = SplitEdge(OldPredBB, BB);
2676 Updates.push_back({DominatorTree::Insert, OldPredBB, PredBB});
2677 Updates.push_back({DominatorTree::Insert, PredBB, BB});
2678 Updates.push_back({DominatorTree::Delete, OldPredBB, BB});
2679 OldPredBranch = cast<BranchInst>(PredBB->getTerminator());
2680 }
2681
2682 // We are going to have to map operands from the original BB block into the
2683 // PredBB block. Evaluate PHI nodes in BB.
2684 DenseMap<Instruction*, Value*> ValueMapping;
2685
2686 BasicBlock::iterator BI = BB->begin();
2687 for (; PHINode *PN = dyn_cast<PHINode>(BI); ++BI)
2688 ValueMapping[PN] = PN->getIncomingValueForBlock(PredBB);
2689 // Clone the non-phi instructions of BB into PredBB, keeping track of the
2690 // mapping and using it to remap operands in the cloned instructions.
2691 for (; BI != BB->end(); ++BI) {
2692 Instruction *New = BI->clone();
2693
2694 // Remap operands to patch up intra-block references.
2695 for (unsigned i = 0, e = New->getNumOperands(); i != e; ++i)
2696 if (Instruction *Inst = dyn_cast<Instruction>(New->getOperand(i))) {
2697 DenseMap<Instruction*, Value*>::iterator I = ValueMapping.find(Inst);
2698 if (I != ValueMapping.end())
2699 New->setOperand(i, I->second);
2700 }
2701
2702 // If this instruction can be simplified after the operands are updated,
2703 // just use the simplified value instead. This frequently happens due to
2704 // phi translation.
2705 if (Value *IV = simplifyInstruction(
2706 New,
2707 {BB->getModule()->getDataLayout(), TLI, nullptr, nullptr, New})) {
2708 ValueMapping[&*BI] = IV;
2709 if (!New->mayHaveSideEffects()) {
2710 New->deleteValue();
2711 New = nullptr;
2712 }
2713 } else {
2714 ValueMapping[&*BI] = New;
2715 }
2716 if (New) {
2717 // Otherwise, insert the new instruction into the block.
2718 New->setName(BI->getName());
2719 New->insertInto(PredBB, OldPredBranch->getIterator());
2720 // Update Dominance from simplified New instruction operands.
2721 for (unsigned i = 0, e = New->getNumOperands(); i != e; ++i)
2722 if (BasicBlock *SuccBB = dyn_cast<BasicBlock>(New->getOperand(i)))
2723 Updates.push_back({DominatorTree::Insert, PredBB, SuccBB});
2724 }
2725 }
2726
2727 // Check to see if the targets of the branch had PHI nodes. If so, we need to
2728 // add entries to the PHI nodes for branch from PredBB now.
2729 BranchInst *BBBranch = cast<BranchInst>(BB->getTerminator());
2730 addPHINodeEntriesForMappedBlock(BBBranch->getSuccessor(0), BB, PredBB,
2731 ValueMapping);
2732 addPHINodeEntriesForMappedBlock(BBBranch->getSuccessor(1), BB, PredBB,
2733 ValueMapping);
2734
2735 updateSSA(BB, PredBB, ValueMapping);
2736
2737 // PredBB no longer jumps to BB, remove entries in the PHI node for the edge
2738 // that we nuked.
2739 BB->removePredecessor(PredBB, true);
2740
2741 // Remove the unconditional branch at the end of the PredBB block.
2742 OldPredBranch->eraseFromParent();
2743 if (HasProfileData)
2744 BPI->copyEdgeProbabilities(BB, PredBB);
2745 DTU->applyUpdatesPermissive(Updates);
2746
2747 ++NumDupes;
2748 return true;
2749 }
2750
2751 // Pred is a predecessor of BB with an unconditional branch to BB. SI is
2752 // a Select instruction in Pred. BB has other predecessors and SI is used in
2753 // a PHI node in BB. SI has no other use.
2754 // A new basic block, NewBB, is created and SI is converted to compare and
2755 // conditional branch. SI is erased from parent.
unfoldSelectInstr(BasicBlock * Pred,BasicBlock * BB,SelectInst * SI,PHINode * SIUse,unsigned Idx)2756 void JumpThreadingPass::unfoldSelectInstr(BasicBlock *Pred, BasicBlock *BB,
2757 SelectInst *SI, PHINode *SIUse,
2758 unsigned Idx) {
2759 // Expand the select.
2760 //
2761 // Pred --
2762 // | v
2763 // | NewBB
2764 // | |
2765 // |-----
2766 // v
2767 // BB
2768 BranchInst *PredTerm = cast<BranchInst>(Pred->getTerminator());
2769 BasicBlock *NewBB = BasicBlock::Create(BB->getContext(), "select.unfold",
2770 BB->getParent(), BB);
2771 // Move the unconditional branch to NewBB.
2772 PredTerm->removeFromParent();
2773 PredTerm->insertInto(NewBB, NewBB->end());
2774 // Create a conditional branch and update PHI nodes.
2775 auto *BI = BranchInst::Create(NewBB, BB, SI->getCondition(), Pred);
2776 BI->applyMergedLocation(PredTerm->getDebugLoc(), SI->getDebugLoc());
2777 BI->copyMetadata(*SI, {LLVMContext::MD_prof});
2778 SIUse->setIncomingValue(Idx, SI->getFalseValue());
2779 SIUse->addIncoming(SI->getTrueValue(), NewBB);
2780 // Set the block frequency of NewBB.
2781 if (HasProfileData) {
2782 uint64_t TrueWeight, FalseWeight;
2783 if (extractBranchWeights(*SI, TrueWeight, FalseWeight) &&
2784 (TrueWeight + FalseWeight) != 0) {
2785 SmallVector<BranchProbability, 2> BP;
2786 BP.emplace_back(BranchProbability::getBranchProbability(
2787 TrueWeight, TrueWeight + FalseWeight));
2788 BP.emplace_back(BranchProbability::getBranchProbability(
2789 FalseWeight, TrueWeight + FalseWeight));
2790 BPI->setEdgeProbability(Pred, BP);
2791 }
2792
2793 auto NewBBFreq =
2794 BFI->getBlockFreq(Pred) * BPI->getEdgeProbability(Pred, NewBB);
2795 BFI->setBlockFreq(NewBB, NewBBFreq.getFrequency());
2796 }
2797
2798 // The select is now dead.
2799 SI->eraseFromParent();
2800 DTU->applyUpdatesPermissive({{DominatorTree::Insert, NewBB, BB},
2801 {DominatorTree::Insert, Pred, NewBB}});
2802
2803 // Update any other PHI nodes in BB.
2804 for (BasicBlock::iterator BI = BB->begin();
2805 PHINode *Phi = dyn_cast<PHINode>(BI); ++BI)
2806 if (Phi != SIUse)
2807 Phi->addIncoming(Phi->getIncomingValueForBlock(Pred), NewBB);
2808 }
2809
tryToUnfoldSelect(SwitchInst * SI,BasicBlock * BB)2810 bool JumpThreadingPass::tryToUnfoldSelect(SwitchInst *SI, BasicBlock *BB) {
2811 PHINode *CondPHI = dyn_cast<PHINode>(SI->getCondition());
2812
2813 if (!CondPHI || CondPHI->getParent() != BB)
2814 return false;
2815
2816 for (unsigned I = 0, E = CondPHI->getNumIncomingValues(); I != E; ++I) {
2817 BasicBlock *Pred = CondPHI->getIncomingBlock(I);
2818 SelectInst *PredSI = dyn_cast<SelectInst>(CondPHI->getIncomingValue(I));
2819
2820 // The second and third condition can be potentially relaxed. Currently
2821 // the conditions help to simplify the code and allow us to reuse existing
2822 // code, developed for tryToUnfoldSelect(CmpInst *, BasicBlock *)
2823 if (!PredSI || PredSI->getParent() != Pred || !PredSI->hasOneUse())
2824 continue;
2825
2826 BranchInst *PredTerm = dyn_cast<BranchInst>(Pred->getTerminator());
2827 if (!PredTerm || !PredTerm->isUnconditional())
2828 continue;
2829
2830 unfoldSelectInstr(Pred, BB, PredSI, CondPHI, I);
2831 return true;
2832 }
2833 return false;
2834 }
2835
2836 /// tryToUnfoldSelect - Look for blocks of the form
2837 /// bb1:
2838 /// %a = select
2839 /// br bb2
2840 ///
2841 /// bb2:
2842 /// %p = phi [%a, %bb1] ...
2843 /// %c = icmp %p
2844 /// br i1 %c
2845 ///
2846 /// And expand the select into a branch structure if one of its arms allows %c
2847 /// to be folded. This later enables threading from bb1 over bb2.
tryToUnfoldSelect(CmpInst * CondCmp,BasicBlock * BB)2848 bool JumpThreadingPass::tryToUnfoldSelect(CmpInst *CondCmp, BasicBlock *BB) {
2849 BranchInst *CondBr = dyn_cast<BranchInst>(BB->getTerminator());
2850 PHINode *CondLHS = dyn_cast<PHINode>(CondCmp->getOperand(0));
2851 Constant *CondRHS = cast<Constant>(CondCmp->getOperand(1));
2852
2853 if (!CondBr || !CondBr->isConditional() || !CondLHS ||
2854 CondLHS->getParent() != BB)
2855 return false;
2856
2857 for (unsigned I = 0, E = CondLHS->getNumIncomingValues(); I != E; ++I) {
2858 BasicBlock *Pred = CondLHS->getIncomingBlock(I);
2859 SelectInst *SI = dyn_cast<SelectInst>(CondLHS->getIncomingValue(I));
2860
2861 // Look if one of the incoming values is a select in the corresponding
2862 // predecessor.
2863 if (!SI || SI->getParent() != Pred || !SI->hasOneUse())
2864 continue;
2865
2866 BranchInst *PredTerm = dyn_cast<BranchInst>(Pred->getTerminator());
2867 if (!PredTerm || !PredTerm->isUnconditional())
2868 continue;
2869
2870 // Now check if one of the select values would allow us to constant fold the
2871 // terminator in BB. We don't do the transform if both sides fold, those
2872 // cases will be threaded in any case.
2873 LazyValueInfo::Tristate LHSFolds =
2874 LVI->getPredicateOnEdge(CondCmp->getPredicate(), SI->getOperand(1),
2875 CondRHS, Pred, BB, CondCmp);
2876 LazyValueInfo::Tristate RHSFolds =
2877 LVI->getPredicateOnEdge(CondCmp->getPredicate(), SI->getOperand(2),
2878 CondRHS, Pred, BB, CondCmp);
2879 if ((LHSFolds != LazyValueInfo::Unknown ||
2880 RHSFolds != LazyValueInfo::Unknown) &&
2881 LHSFolds != RHSFolds) {
2882 unfoldSelectInstr(Pred, BB, SI, CondLHS, I);
2883 return true;
2884 }
2885 }
2886 return false;
2887 }
2888
2889 /// tryToUnfoldSelectInCurrBB - Look for PHI/Select or PHI/CMP/Select in the
2890 /// same BB in the form
2891 /// bb:
2892 /// %p = phi [false, %bb1], [true, %bb2], [false, %bb3], [true, %bb4], ...
2893 /// %s = select %p, trueval, falseval
2894 ///
2895 /// or
2896 ///
2897 /// bb:
2898 /// %p = phi [0, %bb1], [1, %bb2], [0, %bb3], [1, %bb4], ...
2899 /// %c = cmp %p, 0
2900 /// %s = select %c, trueval, falseval
2901 ///
2902 /// And expand the select into a branch structure. This later enables
2903 /// jump-threading over bb in this pass.
2904 ///
2905 /// Using the similar approach of SimplifyCFG::FoldCondBranchOnPHI(), unfold
2906 /// select if the associated PHI has at least one constant. If the unfolded
2907 /// select is not jump-threaded, it will be folded again in the later
2908 /// optimizations.
tryToUnfoldSelectInCurrBB(BasicBlock * BB)2909 bool JumpThreadingPass::tryToUnfoldSelectInCurrBB(BasicBlock *BB) {
2910 // This transform would reduce the quality of msan diagnostics.
2911 // Disable this transform under MemorySanitizer.
2912 if (BB->getParent()->hasFnAttribute(Attribute::SanitizeMemory))
2913 return false;
2914
2915 // If threading this would thread across a loop header, don't thread the edge.
2916 // See the comments above findLoopHeaders for justifications and caveats.
2917 if (LoopHeaders.count(BB))
2918 return false;
2919
2920 for (BasicBlock::iterator BI = BB->begin();
2921 PHINode *PN = dyn_cast<PHINode>(BI); ++BI) {
2922 // Look for a Phi having at least one constant incoming value.
2923 if (llvm::all_of(PN->incoming_values(),
2924 [](Value *V) { return !isa<ConstantInt>(V); }))
2925 continue;
2926
2927 auto isUnfoldCandidate = [BB](SelectInst *SI, Value *V) {
2928 using namespace PatternMatch;
2929
2930 // Check if SI is in BB and use V as condition.
2931 if (SI->getParent() != BB)
2932 return false;
2933 Value *Cond = SI->getCondition();
2934 bool IsAndOr = match(SI, m_CombineOr(m_LogicalAnd(), m_LogicalOr()));
2935 return Cond && Cond == V && Cond->getType()->isIntegerTy(1) && !IsAndOr;
2936 };
2937
2938 SelectInst *SI = nullptr;
2939 for (Use &U : PN->uses()) {
2940 if (ICmpInst *Cmp = dyn_cast<ICmpInst>(U.getUser())) {
2941 // Look for a ICmp in BB that compares PN with a constant and is the
2942 // condition of a Select.
2943 if (Cmp->getParent() == BB && Cmp->hasOneUse() &&
2944 isa<ConstantInt>(Cmp->getOperand(1 - U.getOperandNo())))
2945 if (SelectInst *SelectI = dyn_cast<SelectInst>(Cmp->user_back()))
2946 if (isUnfoldCandidate(SelectI, Cmp->use_begin()->get())) {
2947 SI = SelectI;
2948 break;
2949 }
2950 } else if (SelectInst *SelectI = dyn_cast<SelectInst>(U.getUser())) {
2951 // Look for a Select in BB that uses PN as condition.
2952 if (isUnfoldCandidate(SelectI, U.get())) {
2953 SI = SelectI;
2954 break;
2955 }
2956 }
2957 }
2958
2959 if (!SI)
2960 continue;
2961 // Expand the select.
2962 Value *Cond = SI->getCondition();
2963 if (!isGuaranteedNotToBeUndefOrPoison(Cond, nullptr, SI))
2964 Cond = new FreezeInst(Cond, "cond.fr", SI);
2965 Instruction *Term = SplitBlockAndInsertIfThen(Cond, SI, false);
2966 BasicBlock *SplitBB = SI->getParent();
2967 BasicBlock *NewBB = Term->getParent();
2968 PHINode *NewPN = PHINode::Create(SI->getType(), 2, "", SI);
2969 NewPN->addIncoming(SI->getTrueValue(), Term->getParent());
2970 NewPN->addIncoming(SI->getFalseValue(), BB);
2971 SI->replaceAllUsesWith(NewPN);
2972 SI->eraseFromParent();
2973 // NewBB and SplitBB are newly created blocks which require insertion.
2974 std::vector<DominatorTree::UpdateType> Updates;
2975 Updates.reserve((2 * SplitBB->getTerminator()->getNumSuccessors()) + 3);
2976 Updates.push_back({DominatorTree::Insert, BB, SplitBB});
2977 Updates.push_back({DominatorTree::Insert, BB, NewBB});
2978 Updates.push_back({DominatorTree::Insert, NewBB, SplitBB});
2979 // BB's successors were moved to SplitBB, update DTU accordingly.
2980 for (auto *Succ : successors(SplitBB)) {
2981 Updates.push_back({DominatorTree::Delete, BB, Succ});
2982 Updates.push_back({DominatorTree::Insert, SplitBB, Succ});
2983 }
2984 DTU->applyUpdatesPermissive(Updates);
2985 return true;
2986 }
2987 return false;
2988 }
2989
2990 /// Try to propagate a guard from the current BB into one of its predecessors
2991 /// in case if another branch of execution implies that the condition of this
2992 /// guard is always true. Currently we only process the simplest case that
2993 /// looks like:
2994 ///
2995 /// Start:
2996 /// %cond = ...
2997 /// br i1 %cond, label %T1, label %F1
2998 /// T1:
2999 /// br label %Merge
3000 /// F1:
3001 /// br label %Merge
3002 /// Merge:
3003 /// %condGuard = ...
3004 /// call void(i1, ...) @llvm.experimental.guard( i1 %condGuard )[ "deopt"() ]
3005 ///
3006 /// And cond either implies condGuard or !condGuard. In this case all the
3007 /// instructions before the guard can be duplicated in both branches, and the
3008 /// guard is then threaded to one of them.
processGuards(BasicBlock * BB)3009 bool JumpThreadingPass::processGuards(BasicBlock *BB) {
3010 using namespace PatternMatch;
3011
3012 // We only want to deal with two predecessors.
3013 BasicBlock *Pred1, *Pred2;
3014 auto PI = pred_begin(BB), PE = pred_end(BB);
3015 if (PI == PE)
3016 return false;
3017 Pred1 = *PI++;
3018 if (PI == PE)
3019 return false;
3020 Pred2 = *PI++;
3021 if (PI != PE)
3022 return false;
3023 if (Pred1 == Pred2)
3024 return false;
3025
3026 // Try to thread one of the guards of the block.
3027 // TODO: Look up deeper than to immediate predecessor?
3028 auto *Parent = Pred1->getSinglePredecessor();
3029 if (!Parent || Parent != Pred2->getSinglePredecessor())
3030 return false;
3031
3032 if (auto *BI = dyn_cast<BranchInst>(Parent->getTerminator()))
3033 for (auto &I : *BB)
3034 if (isGuard(&I) && threadGuard(BB, cast<IntrinsicInst>(&I), BI))
3035 return true;
3036
3037 return false;
3038 }
3039
3040 /// Try to propagate the guard from BB which is the lower block of a diamond
3041 /// to one of its branches, in case if diamond's condition implies guard's
3042 /// condition.
threadGuard(BasicBlock * BB,IntrinsicInst * Guard,BranchInst * BI)3043 bool JumpThreadingPass::threadGuard(BasicBlock *BB, IntrinsicInst *Guard,
3044 BranchInst *BI) {
3045 assert(BI->getNumSuccessors() == 2 && "Wrong number of successors?");
3046 assert(BI->isConditional() && "Unconditional branch has 2 successors?");
3047 Value *GuardCond = Guard->getArgOperand(0);
3048 Value *BranchCond = BI->getCondition();
3049 BasicBlock *TrueDest = BI->getSuccessor(0);
3050 BasicBlock *FalseDest = BI->getSuccessor(1);
3051
3052 auto &DL = BB->getModule()->getDataLayout();
3053 bool TrueDestIsSafe = false;
3054 bool FalseDestIsSafe = false;
3055
3056 // True dest is safe if BranchCond => GuardCond.
3057 auto Impl = isImpliedCondition(BranchCond, GuardCond, DL);
3058 if (Impl && *Impl)
3059 TrueDestIsSafe = true;
3060 else {
3061 // False dest is safe if !BranchCond => GuardCond.
3062 Impl = isImpliedCondition(BranchCond, GuardCond, DL, /* LHSIsTrue */ false);
3063 if (Impl && *Impl)
3064 FalseDestIsSafe = true;
3065 }
3066
3067 if (!TrueDestIsSafe && !FalseDestIsSafe)
3068 return false;
3069
3070 BasicBlock *PredUnguardedBlock = TrueDestIsSafe ? TrueDest : FalseDest;
3071 BasicBlock *PredGuardedBlock = FalseDestIsSafe ? TrueDest : FalseDest;
3072
3073 ValueToValueMapTy UnguardedMapping, GuardedMapping;
3074 Instruction *AfterGuard = Guard->getNextNode();
3075 unsigned Cost =
3076 getJumpThreadDuplicationCost(TTI, BB, AfterGuard, BBDupThreshold);
3077 if (Cost > BBDupThreshold)
3078 return false;
3079 // Duplicate all instructions before the guard and the guard itself to the
3080 // branch where implication is not proved.
3081 BasicBlock *GuardedBlock = DuplicateInstructionsInSplitBetween(
3082 BB, PredGuardedBlock, AfterGuard, GuardedMapping, *DTU);
3083 assert(GuardedBlock && "Could not create the guarded block?");
3084 // Duplicate all instructions before the guard in the unguarded branch.
3085 // Since we have successfully duplicated the guarded block and this block
3086 // has fewer instructions, we expect it to succeed.
3087 BasicBlock *UnguardedBlock = DuplicateInstructionsInSplitBetween(
3088 BB, PredUnguardedBlock, Guard, UnguardedMapping, *DTU);
3089 assert(UnguardedBlock && "Could not create the unguarded block?");
3090 LLVM_DEBUG(dbgs() << "Moved guard " << *Guard << " to block "
3091 << GuardedBlock->getName() << "\n");
3092 // Some instructions before the guard may still have uses. For them, we need
3093 // to create Phi nodes merging their copies in both guarded and unguarded
3094 // branches. Those instructions that have no uses can be just removed.
3095 SmallVector<Instruction *, 4> ToRemove;
3096 for (auto BI = BB->begin(); &*BI != AfterGuard; ++BI)
3097 if (!isa<PHINode>(&*BI))
3098 ToRemove.push_back(&*BI);
3099
3100 Instruction *InsertionPoint = &*BB->getFirstInsertionPt();
3101 assert(InsertionPoint && "Empty block?");
3102 // Substitute with Phis & remove.
3103 for (auto *Inst : reverse(ToRemove)) {
3104 if (!Inst->use_empty()) {
3105 PHINode *NewPN = PHINode::Create(Inst->getType(), 2);
3106 NewPN->addIncoming(UnguardedMapping[Inst], UnguardedBlock);
3107 NewPN->addIncoming(GuardedMapping[Inst], GuardedBlock);
3108 NewPN->insertBefore(InsertionPoint);
3109 Inst->replaceAllUsesWith(NewPN);
3110 }
3111 Inst->eraseFromParent();
3112 }
3113 return true;
3114 }
3115