1 //===-- LICM.cpp - Loop Invariant Code Motion Pass ------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This pass performs loop invariant code motion, attempting to remove as much 10 // code from the body of a loop as possible. It does this by either hoisting 11 // code into the preheader block, or by sinking code to the exit blocks if it is 12 // safe. This pass also promotes must-aliased memory locations in the loop to 13 // live in registers, thus hoisting and sinking "invariant" loads and stores. 14 // 15 // Hoisting operations out of loops is a canonicalization transform. It 16 // enables and simplifies subsequent optimizations in the middle-end. 17 // Rematerialization of hoisted instructions to reduce register pressure is the 18 // responsibility of the back-end, which has more accurate information about 19 // register pressure and also handles other optimizations than LICM that 20 // increase live-ranges. 21 // 22 // This pass uses alias analysis for two purposes: 23 // 24 // 1. Moving loop invariant loads and calls out of loops. If we can determine 25 // that a load or call inside of a loop never aliases anything stored to, 26 // we can hoist it or sink it like any other instruction. 27 // 2. Scalar Promotion of Memory - If there is a store instruction inside of 28 // the loop, we try to move the store to happen AFTER the loop instead of 29 // inside of the loop. This can only happen if a few conditions are true: 30 // A. The pointer stored through is loop invariant 31 // B. There are no stores or loads in the loop which _may_ alias the 32 // pointer. There are no calls in the loop which mod/ref the pointer. 33 // If these conditions are true, we can promote the loads and stores in the 34 // loop of the pointer to use a temporary alloca'd variable. We then use 35 // the SSAUpdater to construct the appropriate SSA form for the value. 36 // 37 //===----------------------------------------------------------------------===// 38 39 #include "llvm/Transforms/Scalar/LICM.h" 40 #include "llvm/ADT/PriorityWorklist.h" 41 #include "llvm/ADT/SetOperations.h" 42 #include "llvm/ADT/Statistic.h" 43 #include "llvm/Analysis/AliasAnalysis.h" 44 #include "llvm/Analysis/AliasSetTracker.h" 45 #include "llvm/Analysis/AssumptionCache.h" 46 #include "llvm/Analysis/CaptureTracking.h" 47 #include "llvm/Analysis/DomTreeUpdater.h" 48 #include "llvm/Analysis/GuardUtils.h" 49 #include "llvm/Analysis/LazyBlockFrequencyInfo.h" 50 #include "llvm/Analysis/Loads.h" 51 #include "llvm/Analysis/LoopInfo.h" 52 #include "llvm/Analysis/LoopIterator.h" 53 #include "llvm/Analysis/LoopNestAnalysis.h" 54 #include "llvm/Analysis/LoopPass.h" 55 #include "llvm/Analysis/MemorySSA.h" 56 #include "llvm/Analysis/MemorySSAUpdater.h" 57 #include "llvm/Analysis/MustExecute.h" 58 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 59 #include "llvm/Analysis/ScalarEvolution.h" 60 #include "llvm/Analysis/TargetLibraryInfo.h" 61 #include "llvm/Analysis/TargetTransformInfo.h" 62 #include "llvm/Analysis/ValueTracking.h" 63 #include "llvm/IR/CFG.h" 64 #include "llvm/IR/Constants.h" 65 #include "llvm/IR/DataLayout.h" 66 #include "llvm/IR/DebugInfoMetadata.h" 67 #include "llvm/IR/DerivedTypes.h" 68 #include "llvm/IR/Dominators.h" 69 #include "llvm/IR/IRBuilder.h" 70 #include "llvm/IR/Instructions.h" 71 #include "llvm/IR/IntrinsicInst.h" 72 #include "llvm/IR/LLVMContext.h" 73 #include "llvm/IR/Metadata.h" 74 #include "llvm/IR/PatternMatch.h" 75 #include "llvm/IR/PredIteratorCache.h" 76 #include "llvm/InitializePasses.h" 77 #include "llvm/Support/CommandLine.h" 78 #include "llvm/Support/Debug.h" 79 #include "llvm/Support/raw_ostream.h" 80 #include "llvm/Transforms/Scalar.h" 81 #include "llvm/Transforms/Utils/AssumeBundleBuilder.h" 82 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 83 #include "llvm/Transforms/Utils/Local.h" 84 #include "llvm/Transforms/Utils/LoopUtils.h" 85 #include "llvm/Transforms/Utils/SSAUpdater.h" 86 #include <algorithm> 87 #include <utility> 88 using namespace llvm; 89 90 namespace llvm { 91 class LPMUpdater; 92 } // namespace llvm 93 94 #define DEBUG_TYPE "licm" 95 96 STATISTIC(NumCreatedBlocks, "Number of blocks created"); 97 STATISTIC(NumClonedBranches, "Number of branches cloned"); 98 STATISTIC(NumSunk, "Number of instructions sunk out of loop"); 99 STATISTIC(NumHoisted, "Number of instructions hoisted out of loop"); 100 STATISTIC(NumMovedLoads, "Number of load insts hoisted or sunk"); 101 STATISTIC(NumMovedCalls, "Number of call insts hoisted or sunk"); 102 STATISTIC(NumPromotionCandidates, "Number of promotion candidates"); 103 STATISTIC(NumLoadPromoted, "Number of load-only promotions"); 104 STATISTIC(NumLoadStorePromoted, "Number of load and store promotions"); 105 STATISTIC(NumMinMaxHoisted, 106 "Number of min/max expressions hoisted out of the loop"); 107 STATISTIC(NumGEPsHoisted, 108 "Number of geps reassociated and hoisted out of the loop"); 109 STATISTIC(NumAddSubHoisted, "Number of add/subtract expressions reassociated " 110 "and hoisted out of the loop"); 111 STATISTIC(NumFPAssociationsHoisted, "Number of invariant FP expressions " 112 "reassociated and hoisted out of the loop"); 113 STATISTIC(NumIntAssociationsHoisted, 114 "Number of invariant int expressions " 115 "reassociated and hoisted out of the loop"); 116 STATISTIC(NumBOAssociationsHoisted, "Number of invariant BinaryOp expressions " 117 "reassociated and hoisted out of the loop"); 118 119 /// Memory promotion is enabled by default. 120 static cl::opt<bool> 121 DisablePromotion("disable-licm-promotion", cl::Hidden, cl::init(false), 122 cl::desc("Disable memory promotion in LICM pass")); 123 124 static cl::opt<bool> ControlFlowHoisting( 125 "licm-control-flow-hoisting", cl::Hidden, cl::init(false), 126 cl::desc("Enable control flow (and PHI) hoisting in LICM")); 127 128 static cl::opt<bool> 129 SingleThread("licm-force-thread-model-single", cl::Hidden, cl::init(false), 130 cl::desc("Force thread model single in LICM pass")); 131 132 static cl::opt<uint32_t> MaxNumUsesTraversed( 133 "licm-max-num-uses-traversed", cl::Hidden, cl::init(8), 134 cl::desc("Max num uses visited for identifying load " 135 "invariance in loop using invariant start (default = 8)")); 136 137 static cl::opt<unsigned> FPAssociationUpperLimit( 138 "licm-max-num-fp-reassociations", cl::init(5U), cl::Hidden, 139 cl::desc( 140 "Set upper limit for the number of transformations performed " 141 "during a single round of hoisting the reassociated expressions.")); 142 143 cl::opt<unsigned> IntAssociationUpperLimit( 144 "licm-max-num-int-reassociations", cl::init(5U), cl::Hidden, 145 cl::desc( 146 "Set upper limit for the number of transformations performed " 147 "during a single round of hoisting the reassociated expressions.")); 148 149 // Experimental option to allow imprecision in LICM in pathological cases, in 150 // exchange for faster compile. This is to be removed if MemorySSA starts to 151 // address the same issue. LICM calls MemorySSAWalker's 152 // getClobberingMemoryAccess, up to the value of the Cap, getting perfect 153 // accuracy. Afterwards, LICM will call into MemorySSA's getDefiningAccess, 154 // which may not be precise, since optimizeUses is capped. The result is 155 // correct, but we may not get as "far up" as possible to get which access is 156 // clobbering the one queried. 157 cl::opt<unsigned> llvm::SetLicmMssaOptCap( 158 "licm-mssa-optimization-cap", cl::init(100), cl::Hidden, 159 cl::desc("Enable imprecision in LICM in pathological cases, in exchange " 160 "for faster compile. Caps the MemorySSA clobbering calls.")); 161 162 // Experimentally, memory promotion carries less importance than sinking and 163 // hoisting. Limit when we do promotion when using MemorySSA, in order to save 164 // compile time. 165 cl::opt<unsigned> llvm::SetLicmMssaNoAccForPromotionCap( 166 "licm-mssa-max-acc-promotion", cl::init(250), cl::Hidden, 167 cl::desc("[LICM & MemorySSA] When MSSA in LICM is disabled, this has no " 168 "effect. When MSSA in LICM is enabled, then this is the maximum " 169 "number of accesses allowed to be present in a loop in order to " 170 "enable memory promotion.")); 171 172 static bool inSubLoop(BasicBlock *BB, Loop *CurLoop, LoopInfo *LI); 173 static bool isNotUsedOrFoldableInLoop(const Instruction &I, const Loop *CurLoop, 174 const LoopSafetyInfo *SafetyInfo, 175 TargetTransformInfo *TTI, 176 bool &FoldableInLoop, bool LoopNestMode); 177 static void hoist(Instruction &I, const DominatorTree *DT, const Loop *CurLoop, 178 BasicBlock *Dest, ICFLoopSafetyInfo *SafetyInfo, 179 MemorySSAUpdater &MSSAU, ScalarEvolution *SE, 180 OptimizationRemarkEmitter *ORE); 181 static bool sink(Instruction &I, LoopInfo *LI, DominatorTree *DT, 182 const Loop *CurLoop, ICFLoopSafetyInfo *SafetyInfo, 183 MemorySSAUpdater &MSSAU, OptimizationRemarkEmitter *ORE); 184 static bool isSafeToExecuteUnconditionally( 185 Instruction &Inst, const DominatorTree *DT, const TargetLibraryInfo *TLI, 186 const Loop *CurLoop, const LoopSafetyInfo *SafetyInfo, 187 OptimizationRemarkEmitter *ORE, const Instruction *CtxI, 188 AssumptionCache *AC, bool AllowSpeculation); 189 static bool pointerInvalidatedByLoop(MemorySSA *MSSA, MemoryUse *MU, 190 Loop *CurLoop, Instruction &I, 191 SinkAndHoistLICMFlags &Flags, 192 bool InvariantGroup); 193 static bool pointerInvalidatedByBlock(BasicBlock &BB, MemorySSA &MSSA, 194 MemoryUse &MU); 195 /// Aggregates various functions for hoisting computations out of loop. 196 static bool hoistArithmetics(Instruction &I, Loop &L, 197 ICFLoopSafetyInfo &SafetyInfo, 198 MemorySSAUpdater &MSSAU, AssumptionCache *AC, 199 DominatorTree *DT); 200 static Instruction *cloneInstructionInExitBlock( 201 Instruction &I, BasicBlock &ExitBlock, PHINode &PN, const LoopInfo *LI, 202 const LoopSafetyInfo *SafetyInfo, MemorySSAUpdater &MSSAU); 203 204 static void eraseInstruction(Instruction &I, ICFLoopSafetyInfo &SafetyInfo, 205 MemorySSAUpdater &MSSAU); 206 207 static void moveInstructionBefore(Instruction &I, BasicBlock::iterator Dest, 208 ICFLoopSafetyInfo &SafetyInfo, 209 MemorySSAUpdater &MSSAU, ScalarEvolution *SE); 210 211 static void foreachMemoryAccess(MemorySSA *MSSA, Loop *L, 212 function_ref<void(Instruction *)> Fn); 213 using PointersAndHasReadsOutsideSet = 214 std::pair<SmallSetVector<Value *, 8>, bool>; 215 static SmallVector<PointersAndHasReadsOutsideSet, 0> 216 collectPromotionCandidates(MemorySSA *MSSA, AliasAnalysis *AA, Loop *L); 217 218 namespace { 219 struct LoopInvariantCodeMotion { 220 bool runOnLoop(Loop *L, AAResults *AA, LoopInfo *LI, DominatorTree *DT, 221 AssumptionCache *AC, TargetLibraryInfo *TLI, 222 TargetTransformInfo *TTI, ScalarEvolution *SE, MemorySSA *MSSA, 223 OptimizationRemarkEmitter *ORE, bool LoopNestMode = false); 224 225 LoopInvariantCodeMotion(unsigned LicmMssaOptCap, 226 unsigned LicmMssaNoAccForPromotionCap, 227 bool LicmAllowSpeculation) 228 : LicmMssaOptCap(LicmMssaOptCap), 229 LicmMssaNoAccForPromotionCap(LicmMssaNoAccForPromotionCap), 230 LicmAllowSpeculation(LicmAllowSpeculation) {} 231 232 private: 233 unsigned LicmMssaOptCap; 234 unsigned LicmMssaNoAccForPromotionCap; 235 bool LicmAllowSpeculation; 236 }; 237 238 struct LegacyLICMPass : public LoopPass { 239 static char ID; // Pass identification, replacement for typeid 240 LegacyLICMPass( 241 unsigned LicmMssaOptCap = SetLicmMssaOptCap, 242 unsigned LicmMssaNoAccForPromotionCap = SetLicmMssaNoAccForPromotionCap, 243 bool LicmAllowSpeculation = true) 244 : LoopPass(ID), LICM(LicmMssaOptCap, LicmMssaNoAccForPromotionCap, 245 LicmAllowSpeculation) { 246 initializeLegacyLICMPassPass(*PassRegistry::getPassRegistry()); 247 } 248 249 bool runOnLoop(Loop *L, LPPassManager &LPM) override { 250 if (skipLoop(L)) 251 return false; 252 253 LLVM_DEBUG(dbgs() << "Perform LICM on Loop with header at block " 254 << L->getHeader()->getNameOrAsOperand() << "\n"); 255 256 Function *F = L->getHeader()->getParent(); 257 258 auto *SE = getAnalysisIfAvailable<ScalarEvolutionWrapperPass>(); 259 MemorySSA *MSSA = &getAnalysis<MemorySSAWrapperPass>().getMSSA(); 260 // For the old PM, we can't use OptimizationRemarkEmitter as an analysis 261 // pass. Function analyses need to be preserved across loop transformations 262 // but ORE cannot be preserved (see comment before the pass definition). 263 OptimizationRemarkEmitter ORE(L->getHeader()->getParent()); 264 return LICM.runOnLoop( 265 L, &getAnalysis<AAResultsWrapperPass>().getAAResults(), 266 &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(), 267 &getAnalysis<DominatorTreeWrapperPass>().getDomTree(), 268 &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(*F), 269 &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(*F), 270 &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(*F), 271 SE ? &SE->getSE() : nullptr, MSSA, &ORE); 272 } 273 274 /// This transformation requires natural loop information & requires that 275 /// loop preheaders be inserted into the CFG... 276 /// 277 void getAnalysisUsage(AnalysisUsage &AU) const override { 278 AU.addPreserved<DominatorTreeWrapperPass>(); 279 AU.addPreserved<LoopInfoWrapperPass>(); 280 AU.addRequired<TargetLibraryInfoWrapperPass>(); 281 AU.addRequired<MemorySSAWrapperPass>(); 282 AU.addPreserved<MemorySSAWrapperPass>(); 283 AU.addRequired<TargetTransformInfoWrapperPass>(); 284 AU.addRequired<AssumptionCacheTracker>(); 285 getLoopAnalysisUsage(AU); 286 LazyBlockFrequencyInfoPass::getLazyBFIAnalysisUsage(AU); 287 AU.addPreserved<LazyBlockFrequencyInfoPass>(); 288 AU.addPreserved<LazyBranchProbabilityInfoPass>(); 289 } 290 291 private: 292 LoopInvariantCodeMotion LICM; 293 }; 294 } // namespace 295 296 PreservedAnalyses LICMPass::run(Loop &L, LoopAnalysisManager &AM, 297 LoopStandardAnalysisResults &AR, LPMUpdater &) { 298 if (!AR.MSSA) 299 report_fatal_error("LICM requires MemorySSA (loop-mssa)", 300 /*GenCrashDiag*/false); 301 302 // For the new PM, we also can't use OptimizationRemarkEmitter as an analysis 303 // pass. Function analyses need to be preserved across loop transformations 304 // but ORE cannot be preserved (see comment before the pass definition). 305 OptimizationRemarkEmitter ORE(L.getHeader()->getParent()); 306 307 LoopInvariantCodeMotion LICM(Opts.MssaOptCap, Opts.MssaNoAccForPromotionCap, 308 Opts.AllowSpeculation); 309 if (!LICM.runOnLoop(&L, &AR.AA, &AR.LI, &AR.DT, &AR.AC, &AR.TLI, &AR.TTI, 310 &AR.SE, AR.MSSA, &ORE)) 311 return PreservedAnalyses::all(); 312 313 auto PA = getLoopPassPreservedAnalyses(); 314 PA.preserve<MemorySSAAnalysis>(); 315 316 return PA; 317 } 318 319 void LICMPass::printPipeline( 320 raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) { 321 static_cast<PassInfoMixin<LICMPass> *>(this)->printPipeline( 322 OS, MapClassName2PassName); 323 324 OS << '<'; 325 OS << (Opts.AllowSpeculation ? "" : "no-") << "allowspeculation"; 326 OS << '>'; 327 } 328 329 PreservedAnalyses LNICMPass::run(LoopNest &LN, LoopAnalysisManager &AM, 330 LoopStandardAnalysisResults &AR, 331 LPMUpdater &) { 332 if (!AR.MSSA) 333 report_fatal_error("LNICM requires MemorySSA (loop-mssa)", 334 /*GenCrashDiag*/false); 335 336 // For the new PM, we also can't use OptimizationRemarkEmitter as an analysis 337 // pass. Function analyses need to be preserved across loop transformations 338 // but ORE cannot be preserved (see comment before the pass definition). 339 OptimizationRemarkEmitter ORE(LN.getParent()); 340 341 LoopInvariantCodeMotion LICM(Opts.MssaOptCap, Opts.MssaNoAccForPromotionCap, 342 Opts.AllowSpeculation); 343 344 Loop &OutermostLoop = LN.getOutermostLoop(); 345 bool Changed = LICM.runOnLoop(&OutermostLoop, &AR.AA, &AR.LI, &AR.DT, &AR.AC, 346 &AR.TLI, &AR.TTI, &AR.SE, AR.MSSA, &ORE, true); 347 348 if (!Changed) 349 return PreservedAnalyses::all(); 350 351 auto PA = getLoopPassPreservedAnalyses(); 352 353 PA.preserve<DominatorTreeAnalysis>(); 354 PA.preserve<LoopAnalysis>(); 355 PA.preserve<MemorySSAAnalysis>(); 356 357 return PA; 358 } 359 360 void LNICMPass::printPipeline( 361 raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) { 362 static_cast<PassInfoMixin<LNICMPass> *>(this)->printPipeline( 363 OS, MapClassName2PassName); 364 365 OS << '<'; 366 OS << (Opts.AllowSpeculation ? "" : "no-") << "allowspeculation"; 367 OS << '>'; 368 } 369 370 char LegacyLICMPass::ID = 0; 371 INITIALIZE_PASS_BEGIN(LegacyLICMPass, "licm", "Loop Invariant Code Motion", 372 false, false) 373 INITIALIZE_PASS_DEPENDENCY(LoopPass) 374 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 375 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 376 INITIALIZE_PASS_DEPENDENCY(MemorySSAWrapperPass) 377 INITIALIZE_PASS_DEPENDENCY(LazyBFIPass) 378 INITIALIZE_PASS_END(LegacyLICMPass, "licm", "Loop Invariant Code Motion", false, 379 false) 380 381 Pass *llvm::createLICMPass() { return new LegacyLICMPass(); } 382 383 llvm::SinkAndHoistLICMFlags::SinkAndHoistLICMFlags(bool IsSink, Loop &L, 384 MemorySSA &MSSA) 385 : SinkAndHoistLICMFlags(SetLicmMssaOptCap, SetLicmMssaNoAccForPromotionCap, 386 IsSink, L, MSSA) {} 387 388 llvm::SinkAndHoistLICMFlags::SinkAndHoistLICMFlags( 389 unsigned LicmMssaOptCap, unsigned LicmMssaNoAccForPromotionCap, bool IsSink, 390 Loop &L, MemorySSA &MSSA) 391 : LicmMssaOptCap(LicmMssaOptCap), 392 LicmMssaNoAccForPromotionCap(LicmMssaNoAccForPromotionCap), 393 IsSink(IsSink) { 394 unsigned AccessCapCount = 0; 395 for (auto *BB : L.getBlocks()) 396 if (const auto *Accesses = MSSA.getBlockAccesses(BB)) 397 for (const auto &MA : *Accesses) { 398 (void)MA; 399 ++AccessCapCount; 400 if (AccessCapCount > LicmMssaNoAccForPromotionCap) { 401 NoOfMemAccTooLarge = true; 402 return; 403 } 404 } 405 } 406 407 /// Hoist expressions out of the specified loop. Note, alias info for inner 408 /// loop is not preserved so it is not a good idea to run LICM multiple 409 /// times on one loop. 410 bool LoopInvariantCodeMotion::runOnLoop(Loop *L, AAResults *AA, LoopInfo *LI, 411 DominatorTree *DT, AssumptionCache *AC, 412 TargetLibraryInfo *TLI, 413 TargetTransformInfo *TTI, 414 ScalarEvolution *SE, MemorySSA *MSSA, 415 OptimizationRemarkEmitter *ORE, 416 bool LoopNestMode) { 417 bool Changed = false; 418 419 assert(L->isLCSSAForm(*DT) && "Loop is not in LCSSA form."); 420 421 // If this loop has metadata indicating that LICM is not to be performed then 422 // just exit. 423 if (hasDisableLICMTransformsHint(L)) { 424 return false; 425 } 426 427 // Don't sink stores from loops with coroutine suspend instructions. 428 // LICM would sink instructions into the default destination of 429 // the coroutine switch. The default destination of the switch is to 430 // handle the case where the coroutine is suspended, by which point the 431 // coroutine frame may have been destroyed. No instruction can be sunk there. 432 // FIXME: This would unfortunately hurt the performance of coroutines, however 433 // there is currently no general solution for this. Similar issues could also 434 // potentially happen in other passes where instructions are being moved 435 // across that edge. 436 bool HasCoroSuspendInst = llvm::any_of(L->getBlocks(), [](BasicBlock *BB) { 437 return llvm::any_of(*BB, [](Instruction &I) { 438 IntrinsicInst *II = dyn_cast<IntrinsicInst>(&I); 439 return II && II->getIntrinsicID() == Intrinsic::coro_suspend; 440 }); 441 }); 442 443 MemorySSAUpdater MSSAU(MSSA); 444 SinkAndHoistLICMFlags Flags(LicmMssaOptCap, LicmMssaNoAccForPromotionCap, 445 /*IsSink=*/true, *L, *MSSA); 446 447 // Get the preheader block to move instructions into... 448 BasicBlock *Preheader = L->getLoopPreheader(); 449 450 // Compute loop safety information. 451 ICFLoopSafetyInfo SafetyInfo; 452 SafetyInfo.computeLoopSafetyInfo(L); 453 454 // We want to visit all of the instructions in this loop... that are not parts 455 // of our subloops (they have already had their invariants hoisted out of 456 // their loop, into this loop, so there is no need to process the BODIES of 457 // the subloops). 458 // 459 // Traverse the body of the loop in depth first order on the dominator tree so 460 // that we are guaranteed to see definitions before we see uses. This allows 461 // us to sink instructions in one pass, without iteration. After sinking 462 // instructions, we perform another pass to hoist them out of the loop. 463 if (L->hasDedicatedExits()) 464 Changed |= 465 LoopNestMode 466 ? sinkRegionForLoopNest(DT->getNode(L->getHeader()), AA, LI, DT, 467 TLI, TTI, L, MSSAU, &SafetyInfo, Flags, ORE) 468 : sinkRegion(DT->getNode(L->getHeader()), AA, LI, DT, TLI, TTI, L, 469 MSSAU, &SafetyInfo, Flags, ORE); 470 Flags.setIsSink(false); 471 if (Preheader) 472 Changed |= hoistRegion(DT->getNode(L->getHeader()), AA, LI, DT, AC, TLI, L, 473 MSSAU, SE, &SafetyInfo, Flags, ORE, LoopNestMode, 474 LicmAllowSpeculation); 475 476 // Now that all loop invariants have been removed from the loop, promote any 477 // memory references to scalars that we can. 478 // Don't sink stores from loops without dedicated block exits. Exits 479 // containing indirect branches are not transformed by loop simplify, 480 // make sure we catch that. An additional load may be generated in the 481 // preheader for SSA updater, so also avoid sinking when no preheader 482 // is available. 483 if (!DisablePromotion && Preheader && L->hasDedicatedExits() && 484 !Flags.tooManyMemoryAccesses() && !HasCoroSuspendInst) { 485 // Figure out the loop exits and their insertion points 486 SmallVector<BasicBlock *, 8> ExitBlocks; 487 L->getUniqueExitBlocks(ExitBlocks); 488 489 // We can't insert into a catchswitch. 490 bool HasCatchSwitch = llvm::any_of(ExitBlocks, [](BasicBlock *Exit) { 491 return isa<CatchSwitchInst>(Exit->getTerminator()); 492 }); 493 494 if (!HasCatchSwitch) { 495 SmallVector<BasicBlock::iterator, 8> InsertPts; 496 SmallVector<MemoryAccess *, 8> MSSAInsertPts; 497 InsertPts.reserve(ExitBlocks.size()); 498 MSSAInsertPts.reserve(ExitBlocks.size()); 499 for (BasicBlock *ExitBlock : ExitBlocks) { 500 InsertPts.push_back(ExitBlock->getFirstInsertionPt()); 501 MSSAInsertPts.push_back(nullptr); 502 } 503 504 PredIteratorCache PIC; 505 506 // Promoting one set of accesses may make the pointers for another set 507 // loop invariant, so run this in a loop. 508 bool Promoted = false; 509 bool LocalPromoted; 510 do { 511 LocalPromoted = false; 512 for (auto [PointerMustAliases, HasReadsOutsideSet] : 513 collectPromotionCandidates(MSSA, AA, L)) { 514 LocalPromoted |= promoteLoopAccessesToScalars( 515 PointerMustAliases, ExitBlocks, InsertPts, MSSAInsertPts, PIC, LI, 516 DT, AC, TLI, TTI, L, MSSAU, &SafetyInfo, ORE, 517 LicmAllowSpeculation, HasReadsOutsideSet); 518 } 519 Promoted |= LocalPromoted; 520 } while (LocalPromoted); 521 522 // Once we have promoted values across the loop body we have to 523 // recursively reform LCSSA as any nested loop may now have values defined 524 // within the loop used in the outer loop. 525 // FIXME: This is really heavy handed. It would be a bit better to use an 526 // SSAUpdater strategy during promotion that was LCSSA aware and reformed 527 // it as it went. 528 if (Promoted) 529 formLCSSARecursively(*L, *DT, LI, SE); 530 531 Changed |= Promoted; 532 } 533 } 534 535 // Check that neither this loop nor its parent have had LCSSA broken. LICM is 536 // specifically moving instructions across the loop boundary and so it is 537 // especially in need of basic functional correctness checking here. 538 assert(L->isLCSSAForm(*DT) && "Loop not left in LCSSA form after LICM!"); 539 assert((L->isOutermost() || L->getParentLoop()->isLCSSAForm(*DT)) && 540 "Parent loop not left in LCSSA form after LICM!"); 541 542 if (VerifyMemorySSA) 543 MSSA->verifyMemorySSA(); 544 545 if (Changed && SE) 546 SE->forgetLoopDispositions(); 547 return Changed; 548 } 549 550 /// Walk the specified region of the CFG (defined by all blocks dominated by 551 /// the specified block, and that are in the current loop) in reverse depth 552 /// first order w.r.t the DominatorTree. This allows us to visit uses before 553 /// definitions, allowing us to sink a loop body in one pass without iteration. 554 /// 555 bool llvm::sinkRegion(DomTreeNode *N, AAResults *AA, LoopInfo *LI, 556 DominatorTree *DT, TargetLibraryInfo *TLI, 557 TargetTransformInfo *TTI, Loop *CurLoop, 558 MemorySSAUpdater &MSSAU, ICFLoopSafetyInfo *SafetyInfo, 559 SinkAndHoistLICMFlags &Flags, 560 OptimizationRemarkEmitter *ORE, Loop *OutermostLoop) { 561 562 // Verify inputs. 563 assert(N != nullptr && AA != nullptr && LI != nullptr && DT != nullptr && 564 CurLoop != nullptr && SafetyInfo != nullptr && 565 "Unexpected input to sinkRegion."); 566 567 // We want to visit children before parents. We will enqueue all the parents 568 // before their children in the worklist and process the worklist in reverse 569 // order. 570 SmallVector<BasicBlock *, 16> Worklist = 571 collectChildrenInLoop(DT, N, CurLoop); 572 573 bool Changed = false; 574 for (BasicBlock *BB : reverse(Worklist)) { 575 // subloop (which would already have been processed). 576 if (inSubLoop(BB, CurLoop, LI)) 577 continue; 578 579 for (BasicBlock::iterator II = BB->end(); II != BB->begin();) { 580 Instruction &I = *--II; 581 582 // The instruction is not used in the loop if it is dead. In this case, 583 // we just delete it instead of sinking it. 584 if (isInstructionTriviallyDead(&I, TLI)) { 585 LLVM_DEBUG(dbgs() << "LICM deleting dead inst: " << I << '\n'); 586 salvageKnowledge(&I); 587 salvageDebugInfo(I); 588 ++II; 589 eraseInstruction(I, *SafetyInfo, MSSAU); 590 Changed = true; 591 continue; 592 } 593 594 // Check to see if we can sink this instruction to the exit blocks 595 // of the loop. We can do this if the all users of the instruction are 596 // outside of the loop. In this case, it doesn't even matter if the 597 // operands of the instruction are loop invariant. 598 // 599 bool FoldableInLoop = false; 600 bool LoopNestMode = OutermostLoop != nullptr; 601 if (!I.mayHaveSideEffects() && 602 isNotUsedOrFoldableInLoop(I, LoopNestMode ? OutermostLoop : CurLoop, 603 SafetyInfo, TTI, FoldableInLoop, 604 LoopNestMode) && 605 canSinkOrHoistInst(I, AA, DT, CurLoop, MSSAU, true, Flags, ORE)) { 606 if (sink(I, LI, DT, CurLoop, SafetyInfo, MSSAU, ORE)) { 607 if (!FoldableInLoop) { 608 ++II; 609 salvageDebugInfo(I); 610 eraseInstruction(I, *SafetyInfo, MSSAU); 611 } 612 Changed = true; 613 } 614 } 615 } 616 } 617 if (VerifyMemorySSA) 618 MSSAU.getMemorySSA()->verifyMemorySSA(); 619 return Changed; 620 } 621 622 bool llvm::sinkRegionForLoopNest(DomTreeNode *N, AAResults *AA, LoopInfo *LI, 623 DominatorTree *DT, TargetLibraryInfo *TLI, 624 TargetTransformInfo *TTI, Loop *CurLoop, 625 MemorySSAUpdater &MSSAU, 626 ICFLoopSafetyInfo *SafetyInfo, 627 SinkAndHoistLICMFlags &Flags, 628 OptimizationRemarkEmitter *ORE) { 629 630 bool Changed = false; 631 SmallPriorityWorklist<Loop *, 4> Worklist; 632 Worklist.insert(CurLoop); 633 appendLoopsToWorklist(*CurLoop, Worklist); 634 while (!Worklist.empty()) { 635 Loop *L = Worklist.pop_back_val(); 636 Changed |= sinkRegion(DT->getNode(L->getHeader()), AA, LI, DT, TLI, TTI, L, 637 MSSAU, SafetyInfo, Flags, ORE, CurLoop); 638 } 639 return Changed; 640 } 641 642 namespace { 643 // This is a helper class for hoistRegion to make it able to hoist control flow 644 // in order to be able to hoist phis. The way this works is that we initially 645 // start hoisting to the loop preheader, and when we see a loop invariant branch 646 // we make note of this. When we then come to hoist an instruction that's 647 // conditional on such a branch we duplicate the branch and the relevant control 648 // flow, then hoist the instruction into the block corresponding to its original 649 // block in the duplicated control flow. 650 class ControlFlowHoister { 651 private: 652 // Information about the loop we are hoisting from 653 LoopInfo *LI; 654 DominatorTree *DT; 655 Loop *CurLoop; 656 MemorySSAUpdater &MSSAU; 657 658 // A map of blocks in the loop to the block their instructions will be hoisted 659 // to. 660 DenseMap<BasicBlock *, BasicBlock *> HoistDestinationMap; 661 662 // The branches that we can hoist, mapped to the block that marks a 663 // convergence point of their control flow. 664 DenseMap<BranchInst *, BasicBlock *> HoistableBranches; 665 666 public: 667 ControlFlowHoister(LoopInfo *LI, DominatorTree *DT, Loop *CurLoop, 668 MemorySSAUpdater &MSSAU) 669 : LI(LI), DT(DT), CurLoop(CurLoop), MSSAU(MSSAU) {} 670 671 void registerPossiblyHoistableBranch(BranchInst *BI) { 672 // We can only hoist conditional branches with loop invariant operands. 673 if (!ControlFlowHoisting || !BI->isConditional() || 674 !CurLoop->hasLoopInvariantOperands(BI)) 675 return; 676 677 // The branch destinations need to be in the loop, and we don't gain 678 // anything by duplicating conditional branches with duplicate successors, 679 // as it's essentially the same as an unconditional branch. 680 BasicBlock *TrueDest = BI->getSuccessor(0); 681 BasicBlock *FalseDest = BI->getSuccessor(1); 682 if (!CurLoop->contains(TrueDest) || !CurLoop->contains(FalseDest) || 683 TrueDest == FalseDest) 684 return; 685 686 // We can hoist BI if one branch destination is the successor of the other, 687 // or both have common successor which we check by seeing if the 688 // intersection of their successors is non-empty. 689 // TODO: This could be expanded to allowing branches where both ends 690 // eventually converge to a single block. 691 SmallPtrSet<BasicBlock *, 4> TrueDestSucc, FalseDestSucc; 692 TrueDestSucc.insert(succ_begin(TrueDest), succ_end(TrueDest)); 693 FalseDestSucc.insert(succ_begin(FalseDest), succ_end(FalseDest)); 694 BasicBlock *CommonSucc = nullptr; 695 if (TrueDestSucc.count(FalseDest)) { 696 CommonSucc = FalseDest; 697 } else if (FalseDestSucc.count(TrueDest)) { 698 CommonSucc = TrueDest; 699 } else { 700 set_intersect(TrueDestSucc, FalseDestSucc); 701 // If there's one common successor use that. 702 if (TrueDestSucc.size() == 1) 703 CommonSucc = *TrueDestSucc.begin(); 704 // If there's more than one pick whichever appears first in the block list 705 // (we can't use the value returned by TrueDestSucc.begin() as it's 706 // unpredicatable which element gets returned). 707 else if (!TrueDestSucc.empty()) { 708 Function *F = TrueDest->getParent(); 709 auto IsSucc = [&](BasicBlock &BB) { return TrueDestSucc.count(&BB); }; 710 auto It = llvm::find_if(*F, IsSucc); 711 assert(It != F->end() && "Could not find successor in function"); 712 CommonSucc = &*It; 713 } 714 } 715 // The common successor has to be dominated by the branch, as otherwise 716 // there will be some other path to the successor that will not be 717 // controlled by this branch so any phi we hoist would be controlled by the 718 // wrong condition. This also takes care of avoiding hoisting of loop back 719 // edges. 720 // TODO: In some cases this could be relaxed if the successor is dominated 721 // by another block that's been hoisted and we can guarantee that the 722 // control flow has been replicated exactly. 723 if (CommonSucc && DT->dominates(BI, CommonSucc)) 724 HoistableBranches[BI] = CommonSucc; 725 } 726 727 bool canHoistPHI(PHINode *PN) { 728 // The phi must have loop invariant operands. 729 if (!ControlFlowHoisting || !CurLoop->hasLoopInvariantOperands(PN)) 730 return false; 731 // We can hoist phis if the block they are in is the target of hoistable 732 // branches which cover all of the predecessors of the block. 733 SmallPtrSet<BasicBlock *, 8> PredecessorBlocks; 734 BasicBlock *BB = PN->getParent(); 735 for (BasicBlock *PredBB : predecessors(BB)) 736 PredecessorBlocks.insert(PredBB); 737 // If we have less predecessor blocks than predecessors then the phi will 738 // have more than one incoming value for the same block which we can't 739 // handle. 740 // TODO: This could be handled be erasing some of the duplicate incoming 741 // values. 742 if (PredecessorBlocks.size() != pred_size(BB)) 743 return false; 744 for (auto &Pair : HoistableBranches) { 745 if (Pair.second == BB) { 746 // Which blocks are predecessors via this branch depends on if the 747 // branch is triangle-like or diamond-like. 748 if (Pair.first->getSuccessor(0) == BB) { 749 PredecessorBlocks.erase(Pair.first->getParent()); 750 PredecessorBlocks.erase(Pair.first->getSuccessor(1)); 751 } else if (Pair.first->getSuccessor(1) == BB) { 752 PredecessorBlocks.erase(Pair.first->getParent()); 753 PredecessorBlocks.erase(Pair.first->getSuccessor(0)); 754 } else { 755 PredecessorBlocks.erase(Pair.first->getSuccessor(0)); 756 PredecessorBlocks.erase(Pair.first->getSuccessor(1)); 757 } 758 } 759 } 760 // PredecessorBlocks will now be empty if for every predecessor of BB we 761 // found a hoistable branch source. 762 return PredecessorBlocks.empty(); 763 } 764 765 BasicBlock *getOrCreateHoistedBlock(BasicBlock *BB) { 766 if (!ControlFlowHoisting) 767 return CurLoop->getLoopPreheader(); 768 // If BB has already been hoisted, return that 769 if (HoistDestinationMap.count(BB)) 770 return HoistDestinationMap[BB]; 771 772 // Check if this block is conditional based on a pending branch 773 auto HasBBAsSuccessor = 774 [&](DenseMap<BranchInst *, BasicBlock *>::value_type &Pair) { 775 return BB != Pair.second && (Pair.first->getSuccessor(0) == BB || 776 Pair.first->getSuccessor(1) == BB); 777 }; 778 auto It = llvm::find_if(HoistableBranches, HasBBAsSuccessor); 779 780 // If not involved in a pending branch, hoist to preheader 781 BasicBlock *InitialPreheader = CurLoop->getLoopPreheader(); 782 if (It == HoistableBranches.end()) { 783 LLVM_DEBUG(dbgs() << "LICM using " 784 << InitialPreheader->getNameOrAsOperand() 785 << " as hoist destination for " 786 << BB->getNameOrAsOperand() << "\n"); 787 HoistDestinationMap[BB] = InitialPreheader; 788 return InitialPreheader; 789 } 790 BranchInst *BI = It->first; 791 assert(std::find_if(++It, HoistableBranches.end(), HasBBAsSuccessor) == 792 HoistableBranches.end() && 793 "BB is expected to be the target of at most one branch"); 794 795 LLVMContext &C = BB->getContext(); 796 BasicBlock *TrueDest = BI->getSuccessor(0); 797 BasicBlock *FalseDest = BI->getSuccessor(1); 798 BasicBlock *CommonSucc = HoistableBranches[BI]; 799 BasicBlock *HoistTarget = getOrCreateHoistedBlock(BI->getParent()); 800 801 // Create hoisted versions of blocks that currently don't have them 802 auto CreateHoistedBlock = [&](BasicBlock *Orig) { 803 if (HoistDestinationMap.count(Orig)) 804 return HoistDestinationMap[Orig]; 805 BasicBlock *New = 806 BasicBlock::Create(C, Orig->getName() + ".licm", Orig->getParent()); 807 HoistDestinationMap[Orig] = New; 808 DT->addNewBlock(New, HoistTarget); 809 if (CurLoop->getParentLoop()) 810 CurLoop->getParentLoop()->addBasicBlockToLoop(New, *LI); 811 ++NumCreatedBlocks; 812 LLVM_DEBUG(dbgs() << "LICM created " << New->getName() 813 << " as hoist destination for " << Orig->getName() 814 << "\n"); 815 return New; 816 }; 817 BasicBlock *HoistTrueDest = CreateHoistedBlock(TrueDest); 818 BasicBlock *HoistFalseDest = CreateHoistedBlock(FalseDest); 819 BasicBlock *HoistCommonSucc = CreateHoistedBlock(CommonSucc); 820 821 // Link up these blocks with branches. 822 if (!HoistCommonSucc->getTerminator()) { 823 // The new common successor we've generated will branch to whatever that 824 // hoist target branched to. 825 BasicBlock *TargetSucc = HoistTarget->getSingleSuccessor(); 826 assert(TargetSucc && "Expected hoist target to have a single successor"); 827 HoistCommonSucc->moveBefore(TargetSucc); 828 BranchInst::Create(TargetSucc, HoistCommonSucc); 829 } 830 if (!HoistTrueDest->getTerminator()) { 831 HoistTrueDest->moveBefore(HoistCommonSucc); 832 BranchInst::Create(HoistCommonSucc, HoistTrueDest); 833 } 834 if (!HoistFalseDest->getTerminator()) { 835 HoistFalseDest->moveBefore(HoistCommonSucc); 836 BranchInst::Create(HoistCommonSucc, HoistFalseDest); 837 } 838 839 // If BI is being cloned to what was originally the preheader then 840 // HoistCommonSucc will now be the new preheader. 841 if (HoistTarget == InitialPreheader) { 842 // Phis in the loop header now need to use the new preheader. 843 InitialPreheader->replaceSuccessorsPhiUsesWith(HoistCommonSucc); 844 MSSAU.wireOldPredecessorsToNewImmediatePredecessor( 845 HoistTarget->getSingleSuccessor(), HoistCommonSucc, {HoistTarget}); 846 // The new preheader dominates the loop header. 847 DomTreeNode *PreheaderNode = DT->getNode(HoistCommonSucc); 848 DomTreeNode *HeaderNode = DT->getNode(CurLoop->getHeader()); 849 DT->changeImmediateDominator(HeaderNode, PreheaderNode); 850 // The preheader hoist destination is now the new preheader, with the 851 // exception of the hoist destination of this branch. 852 for (auto &Pair : HoistDestinationMap) 853 if (Pair.second == InitialPreheader && Pair.first != BI->getParent()) 854 Pair.second = HoistCommonSucc; 855 } 856 857 // Now finally clone BI. 858 ReplaceInstWithInst( 859 HoistTarget->getTerminator(), 860 BranchInst::Create(HoistTrueDest, HoistFalseDest, BI->getCondition())); 861 ++NumClonedBranches; 862 863 assert(CurLoop->getLoopPreheader() && 864 "Hoisting blocks should not have destroyed preheader"); 865 return HoistDestinationMap[BB]; 866 } 867 }; 868 } // namespace 869 870 /// Walk the specified region of the CFG (defined by all blocks dominated by 871 /// the specified block, and that are in the current loop) in depth first 872 /// order w.r.t the DominatorTree. This allows us to visit definitions before 873 /// uses, allowing us to hoist a loop body in one pass without iteration. 874 /// 875 bool llvm::hoistRegion(DomTreeNode *N, AAResults *AA, LoopInfo *LI, 876 DominatorTree *DT, AssumptionCache *AC, 877 TargetLibraryInfo *TLI, Loop *CurLoop, 878 MemorySSAUpdater &MSSAU, ScalarEvolution *SE, 879 ICFLoopSafetyInfo *SafetyInfo, 880 SinkAndHoistLICMFlags &Flags, 881 OptimizationRemarkEmitter *ORE, bool LoopNestMode, 882 bool AllowSpeculation) { 883 // Verify inputs. 884 assert(N != nullptr && AA != nullptr && LI != nullptr && DT != nullptr && 885 CurLoop != nullptr && SafetyInfo != nullptr && 886 "Unexpected input to hoistRegion."); 887 888 ControlFlowHoister CFH(LI, DT, CurLoop, MSSAU); 889 890 // Keep track of instructions that have been hoisted, as they may need to be 891 // re-hoisted if they end up not dominating all of their uses. 892 SmallVector<Instruction *, 16> HoistedInstructions; 893 894 // For PHI hoisting to work we need to hoist blocks before their successors. 895 // We can do this by iterating through the blocks in the loop in reverse 896 // post-order. 897 LoopBlocksRPO Worklist(CurLoop); 898 Worklist.perform(LI); 899 bool Changed = false; 900 BasicBlock *Preheader = CurLoop->getLoopPreheader(); 901 for (BasicBlock *BB : Worklist) { 902 // Only need to process the contents of this block if it is not part of a 903 // subloop (which would already have been processed). 904 if (!LoopNestMode && inSubLoop(BB, CurLoop, LI)) 905 continue; 906 907 for (Instruction &I : llvm::make_early_inc_range(*BB)) { 908 // Try hoisting the instruction out to the preheader. We can only do 909 // this if all of the operands of the instruction are loop invariant and 910 // if it is safe to hoist the instruction. We also check block frequency 911 // to make sure instruction only gets hoisted into colder blocks. 912 // TODO: It may be safe to hoist if we are hoisting to a conditional block 913 // and we have accurately duplicated the control flow from the loop header 914 // to that block. 915 if (CurLoop->hasLoopInvariantOperands(&I) && 916 canSinkOrHoistInst(I, AA, DT, CurLoop, MSSAU, true, Flags, ORE) && 917 isSafeToExecuteUnconditionally( 918 I, DT, TLI, CurLoop, SafetyInfo, ORE, 919 Preheader->getTerminator(), AC, AllowSpeculation)) { 920 hoist(I, DT, CurLoop, CFH.getOrCreateHoistedBlock(BB), SafetyInfo, 921 MSSAU, SE, ORE); 922 HoistedInstructions.push_back(&I); 923 Changed = true; 924 continue; 925 } 926 927 // Attempt to remove floating point division out of the loop by 928 // converting it to a reciprocal multiplication. 929 if (I.getOpcode() == Instruction::FDiv && I.hasAllowReciprocal() && 930 CurLoop->isLoopInvariant(I.getOperand(1))) { 931 auto Divisor = I.getOperand(1); 932 auto One = llvm::ConstantFP::get(Divisor->getType(), 1.0); 933 auto ReciprocalDivisor = BinaryOperator::CreateFDiv(One, Divisor); 934 ReciprocalDivisor->setFastMathFlags(I.getFastMathFlags()); 935 SafetyInfo->insertInstructionTo(ReciprocalDivisor, I.getParent()); 936 ReciprocalDivisor->insertBefore(I.getIterator()); 937 ReciprocalDivisor->setDebugLoc(I.getDebugLoc()); 938 939 auto Product = 940 BinaryOperator::CreateFMul(I.getOperand(0), ReciprocalDivisor); 941 Product->setFastMathFlags(I.getFastMathFlags()); 942 SafetyInfo->insertInstructionTo(Product, I.getParent()); 943 Product->insertAfter(I.getIterator()); 944 Product->setDebugLoc(I.getDebugLoc()); 945 I.replaceAllUsesWith(Product); 946 eraseInstruction(I, *SafetyInfo, MSSAU); 947 948 hoist(*ReciprocalDivisor, DT, CurLoop, CFH.getOrCreateHoistedBlock(BB), 949 SafetyInfo, MSSAU, SE, ORE); 950 HoistedInstructions.push_back(ReciprocalDivisor); 951 Changed = true; 952 continue; 953 } 954 955 auto IsInvariantStart = [&](Instruction &I) { 956 using namespace PatternMatch; 957 return I.use_empty() && 958 match(&I, m_Intrinsic<Intrinsic::invariant_start>()); 959 }; 960 auto MustExecuteWithoutWritesBefore = [&](Instruction &I) { 961 return SafetyInfo->isGuaranteedToExecute(I, DT, CurLoop) && 962 SafetyInfo->doesNotWriteMemoryBefore(I, CurLoop); 963 }; 964 if ((IsInvariantStart(I) || isGuard(&I)) && 965 CurLoop->hasLoopInvariantOperands(&I) && 966 MustExecuteWithoutWritesBefore(I)) { 967 hoist(I, DT, CurLoop, CFH.getOrCreateHoistedBlock(BB), SafetyInfo, 968 MSSAU, SE, ORE); 969 HoistedInstructions.push_back(&I); 970 Changed = true; 971 continue; 972 } 973 974 if (PHINode *PN = dyn_cast<PHINode>(&I)) { 975 if (CFH.canHoistPHI(PN)) { 976 // Redirect incoming blocks first to ensure that we create hoisted 977 // versions of those blocks before we hoist the phi. 978 for (unsigned int i = 0; i < PN->getNumIncomingValues(); ++i) 979 PN->setIncomingBlock( 980 i, CFH.getOrCreateHoistedBlock(PN->getIncomingBlock(i))); 981 hoist(*PN, DT, CurLoop, CFH.getOrCreateHoistedBlock(BB), SafetyInfo, 982 MSSAU, SE, ORE); 983 assert(DT->dominates(PN, BB) && "Conditional PHIs not expected"); 984 Changed = true; 985 continue; 986 } 987 } 988 989 // Try to reassociate instructions so that part of computations can be 990 // done out of loop. 991 if (hoistArithmetics(I, *CurLoop, *SafetyInfo, MSSAU, AC, DT)) { 992 Changed = true; 993 continue; 994 } 995 996 // Remember possibly hoistable branches so we can actually hoist them 997 // later if needed. 998 if (BranchInst *BI = dyn_cast<BranchInst>(&I)) 999 CFH.registerPossiblyHoistableBranch(BI); 1000 } 1001 } 1002 1003 // If we hoisted instructions to a conditional block they may not dominate 1004 // their uses that weren't hoisted (such as phis where some operands are not 1005 // loop invariant). If so make them unconditional by moving them to their 1006 // immediate dominator. We iterate through the instructions in reverse order 1007 // which ensures that when we rehoist an instruction we rehoist its operands, 1008 // and also keep track of where in the block we are rehoisting to make sure 1009 // that we rehoist instructions before the instructions that use them. 1010 Instruction *HoistPoint = nullptr; 1011 if (ControlFlowHoisting) { 1012 for (Instruction *I : reverse(HoistedInstructions)) { 1013 if (!llvm::all_of(I->uses(), 1014 [&](Use &U) { return DT->dominates(I, U); })) { 1015 BasicBlock *Dominator = 1016 DT->getNode(I->getParent())->getIDom()->getBlock(); 1017 if (!HoistPoint || !DT->dominates(HoistPoint->getParent(), Dominator)) { 1018 if (HoistPoint) 1019 assert(DT->dominates(Dominator, HoistPoint->getParent()) && 1020 "New hoist point expected to dominate old hoist point"); 1021 HoistPoint = Dominator->getTerminator(); 1022 } 1023 LLVM_DEBUG(dbgs() << "LICM rehoisting to " 1024 << HoistPoint->getParent()->getNameOrAsOperand() 1025 << ": " << *I << "\n"); 1026 moveInstructionBefore(*I, HoistPoint->getIterator(), *SafetyInfo, MSSAU, 1027 SE); 1028 HoistPoint = I; 1029 Changed = true; 1030 } 1031 } 1032 } 1033 if (VerifyMemorySSA) 1034 MSSAU.getMemorySSA()->verifyMemorySSA(); 1035 1036 // Now that we've finished hoisting make sure that LI and DT are still 1037 // valid. 1038 #ifdef EXPENSIVE_CHECKS 1039 if (Changed) { 1040 assert(DT->verify(DominatorTree::VerificationLevel::Fast) && 1041 "Dominator tree verification failed"); 1042 LI->verify(*DT); 1043 } 1044 #endif 1045 1046 return Changed; 1047 } 1048 1049 // Return true if LI is invariant within scope of the loop. LI is invariant if 1050 // CurLoop is dominated by an invariant.start representing the same memory 1051 // location and size as the memory location LI loads from, and also the 1052 // invariant.start has no uses. 1053 static bool isLoadInvariantInLoop(LoadInst *LI, DominatorTree *DT, 1054 Loop *CurLoop) { 1055 Value *Addr = LI->getPointerOperand(); 1056 const DataLayout &DL = LI->getDataLayout(); 1057 const TypeSize LocSizeInBits = DL.getTypeSizeInBits(LI->getType()); 1058 1059 // It is not currently possible for clang to generate an invariant.start 1060 // intrinsic with scalable vector types because we don't support thread local 1061 // sizeless types and we don't permit sizeless types in structs or classes. 1062 // Furthermore, even if support is added for this in future the intrinsic 1063 // itself is defined to have a size of -1 for variable sized objects. This 1064 // makes it impossible to verify if the intrinsic envelops our region of 1065 // interest. For example, both <vscale x 32 x i8> and <vscale x 16 x i8> 1066 // types would have a -1 parameter, but the former is clearly double the size 1067 // of the latter. 1068 if (LocSizeInBits.isScalable()) 1069 return false; 1070 1071 // If we've ended up at a global/constant, bail. We shouldn't be looking at 1072 // uselists for non-local Values in a loop pass. 1073 if (isa<Constant>(Addr)) 1074 return false; 1075 1076 unsigned UsesVisited = 0; 1077 // Traverse all uses of the load operand value, to see if invariant.start is 1078 // one of the uses, and whether it dominates the load instruction. 1079 for (auto *U : Addr->users()) { 1080 // Avoid traversing for Load operand with high number of users. 1081 if (++UsesVisited > MaxNumUsesTraversed) 1082 return false; 1083 IntrinsicInst *II = dyn_cast<IntrinsicInst>(U); 1084 // If there are escaping uses of invariant.start instruction, the load maybe 1085 // non-invariant. 1086 if (!II || II->getIntrinsicID() != Intrinsic::invariant_start || 1087 !II->use_empty()) 1088 continue; 1089 ConstantInt *InvariantSize = cast<ConstantInt>(II->getArgOperand(0)); 1090 // The intrinsic supports having a -1 argument for variable sized objects 1091 // so we should check for that here. 1092 if (InvariantSize->isNegative()) 1093 continue; 1094 uint64_t InvariantSizeInBits = InvariantSize->getSExtValue() * 8; 1095 // Confirm the invariant.start location size contains the load operand size 1096 // in bits. Also, the invariant.start should dominate the load, and we 1097 // should not hoist the load out of a loop that contains this dominating 1098 // invariant.start. 1099 if (LocSizeInBits.getFixedValue() <= InvariantSizeInBits && 1100 DT->properlyDominates(II->getParent(), CurLoop->getHeader())) 1101 return true; 1102 } 1103 1104 return false; 1105 } 1106 1107 namespace { 1108 /// Return true if-and-only-if we know how to (mechanically) both hoist and 1109 /// sink a given instruction out of a loop. Does not address legality 1110 /// concerns such as aliasing or speculation safety. 1111 bool isHoistableAndSinkableInst(Instruction &I) { 1112 // Only these instructions are hoistable/sinkable. 1113 return (isa<LoadInst>(I) || isa<StoreInst>(I) || isa<CallInst>(I) || 1114 isa<FenceInst>(I) || isa<CastInst>(I) || isa<UnaryOperator>(I) || 1115 isa<BinaryOperator>(I) || isa<SelectInst>(I) || 1116 isa<GetElementPtrInst>(I) || isa<CmpInst>(I) || 1117 isa<InsertElementInst>(I) || isa<ExtractElementInst>(I) || 1118 isa<ShuffleVectorInst>(I) || isa<ExtractValueInst>(I) || 1119 isa<InsertValueInst>(I) || isa<FreezeInst>(I)); 1120 } 1121 /// Return true if MSSA knows there are no MemoryDefs in the loop. 1122 bool isReadOnly(const MemorySSAUpdater &MSSAU, const Loop *L) { 1123 for (auto *BB : L->getBlocks()) 1124 if (MSSAU.getMemorySSA()->getBlockDefs(BB)) 1125 return false; 1126 return true; 1127 } 1128 1129 /// Return true if I is the only Instruction with a MemoryAccess in L. 1130 bool isOnlyMemoryAccess(const Instruction *I, const Loop *L, 1131 const MemorySSAUpdater &MSSAU) { 1132 for (auto *BB : L->getBlocks()) 1133 if (auto *Accs = MSSAU.getMemorySSA()->getBlockAccesses(BB)) { 1134 int NotAPhi = 0; 1135 for (const auto &Acc : *Accs) { 1136 if (isa<MemoryPhi>(&Acc)) 1137 continue; 1138 const auto *MUD = cast<MemoryUseOrDef>(&Acc); 1139 if (MUD->getMemoryInst() != I || NotAPhi++ == 1) 1140 return false; 1141 } 1142 } 1143 return true; 1144 } 1145 } 1146 1147 static MemoryAccess *getClobberingMemoryAccess(MemorySSA &MSSA, 1148 BatchAAResults &BAA, 1149 SinkAndHoistLICMFlags &Flags, 1150 MemoryUseOrDef *MA) { 1151 // See declaration of SetLicmMssaOptCap for usage details. 1152 if (Flags.tooManyClobberingCalls()) 1153 return MA->getDefiningAccess(); 1154 1155 MemoryAccess *Source = 1156 MSSA.getSkipSelfWalker()->getClobberingMemoryAccess(MA, BAA); 1157 Flags.incrementClobberingCalls(); 1158 return Source; 1159 } 1160 1161 bool llvm::canSinkOrHoistInst(Instruction &I, AAResults *AA, DominatorTree *DT, 1162 Loop *CurLoop, MemorySSAUpdater &MSSAU, 1163 bool TargetExecutesOncePerLoop, 1164 SinkAndHoistLICMFlags &Flags, 1165 OptimizationRemarkEmitter *ORE) { 1166 // If we don't understand the instruction, bail early. 1167 if (!isHoistableAndSinkableInst(I)) 1168 return false; 1169 1170 MemorySSA *MSSA = MSSAU.getMemorySSA(); 1171 // Loads have extra constraints we have to verify before we can hoist them. 1172 if (LoadInst *LI = dyn_cast<LoadInst>(&I)) { 1173 if (!LI->isUnordered()) 1174 return false; // Don't sink/hoist volatile or ordered atomic loads! 1175 1176 // Loads from constant memory are always safe to move, even if they end up 1177 // in the same alias set as something that ends up being modified. 1178 if (!isModSet(AA->getModRefInfoMask(LI->getOperand(0)))) 1179 return true; 1180 if (LI->hasMetadata(LLVMContext::MD_invariant_load)) 1181 return true; 1182 1183 if (LI->isAtomic() && !TargetExecutesOncePerLoop) 1184 return false; // Don't risk duplicating unordered loads 1185 1186 // This checks for an invariant.start dominating the load. 1187 if (isLoadInvariantInLoop(LI, DT, CurLoop)) 1188 return true; 1189 1190 auto MU = cast<MemoryUse>(MSSA->getMemoryAccess(LI)); 1191 1192 bool InvariantGroup = LI->hasMetadata(LLVMContext::MD_invariant_group); 1193 1194 bool Invalidated = pointerInvalidatedByLoop( 1195 MSSA, MU, CurLoop, I, Flags, InvariantGroup); 1196 // Check loop-invariant address because this may also be a sinkable load 1197 // whose address is not necessarily loop-invariant. 1198 if (ORE && Invalidated && CurLoop->isLoopInvariant(LI->getPointerOperand())) 1199 ORE->emit([&]() { 1200 return OptimizationRemarkMissed( 1201 DEBUG_TYPE, "LoadWithLoopInvariantAddressInvalidated", LI) 1202 << "failed to move load with loop-invariant address " 1203 "because the loop may invalidate its value"; 1204 }); 1205 1206 return !Invalidated; 1207 } else if (CallInst *CI = dyn_cast<CallInst>(&I)) { 1208 // Don't sink or hoist dbg info; it's legal, but not useful. 1209 if (isa<DbgInfoIntrinsic>(I)) 1210 return false; 1211 1212 // Don't sink calls which can throw. 1213 if (CI->mayThrow()) 1214 return false; 1215 1216 // Convergent attribute has been used on operations that involve 1217 // inter-thread communication which results are implicitly affected by the 1218 // enclosing control flows. It is not safe to hoist or sink such operations 1219 // across control flow. 1220 if (CI->isConvergent()) 1221 return false; 1222 1223 // FIXME: Current LLVM IR semantics don't work well with coroutines and 1224 // thread local globals. We currently treat getting the address of a thread 1225 // local global as not accessing memory, even though it may not be a 1226 // constant throughout a function with coroutines. Remove this check after 1227 // we better model semantics of thread local globals. 1228 if (CI->getFunction()->isPresplitCoroutine()) 1229 return false; 1230 1231 using namespace PatternMatch; 1232 if (match(CI, m_Intrinsic<Intrinsic::assume>())) 1233 // Assumes don't actually alias anything or throw 1234 return true; 1235 1236 // Handle simple cases by querying alias analysis. 1237 MemoryEffects Behavior = AA->getMemoryEffects(CI); 1238 1239 if (Behavior.doesNotAccessMemory()) 1240 return true; 1241 if (Behavior.onlyReadsMemory()) { 1242 // A readonly argmemonly function only reads from memory pointed to by 1243 // it's arguments with arbitrary offsets. If we can prove there are no 1244 // writes to this memory in the loop, we can hoist or sink. 1245 if (Behavior.onlyAccessesArgPointees()) { 1246 // TODO: expand to writeable arguments 1247 for (Value *Op : CI->args()) 1248 if (Op->getType()->isPointerTy() && 1249 pointerInvalidatedByLoop( 1250 MSSA, cast<MemoryUse>(MSSA->getMemoryAccess(CI)), CurLoop, I, 1251 Flags, /*InvariantGroup=*/false)) 1252 return false; 1253 return true; 1254 } 1255 1256 // If this call only reads from memory and there are no writes to memory 1257 // in the loop, we can hoist or sink the call as appropriate. 1258 if (isReadOnly(MSSAU, CurLoop)) 1259 return true; 1260 } 1261 1262 // FIXME: This should use mod/ref information to see if we can hoist or 1263 // sink the call. 1264 1265 return false; 1266 } else if (auto *FI = dyn_cast<FenceInst>(&I)) { 1267 // Fences alias (most) everything to provide ordering. For the moment, 1268 // just give up if there are any other memory operations in the loop. 1269 return isOnlyMemoryAccess(FI, CurLoop, MSSAU); 1270 } else if (auto *SI = dyn_cast<StoreInst>(&I)) { 1271 if (!SI->isUnordered()) 1272 return false; // Don't sink/hoist volatile or ordered atomic store! 1273 1274 // We can only hoist a store that we can prove writes a value which is not 1275 // read or overwritten within the loop. For those cases, we fallback to 1276 // load store promotion instead. TODO: We can extend this to cases where 1277 // there is exactly one write to the location and that write dominates an 1278 // arbitrary number of reads in the loop. 1279 if (isOnlyMemoryAccess(SI, CurLoop, MSSAU)) 1280 return true; 1281 // If there are more accesses than the Promotion cap, then give up as we're 1282 // not walking a list that long. 1283 if (Flags.tooManyMemoryAccesses()) 1284 return false; 1285 1286 auto *SIMD = MSSA->getMemoryAccess(SI); 1287 BatchAAResults BAA(*AA); 1288 auto *Source = getClobberingMemoryAccess(*MSSA, BAA, Flags, SIMD); 1289 // Make sure there are no clobbers inside the loop. 1290 if (!MSSA->isLiveOnEntryDef(Source) && 1291 CurLoop->contains(Source->getBlock())) 1292 return false; 1293 1294 // If there are interfering Uses (i.e. their defining access is in the 1295 // loop), or ordered loads (stored as Defs!), don't move this store. 1296 // Could do better here, but this is conservatively correct. 1297 // TODO: Cache set of Uses on the first walk in runOnLoop, update when 1298 // moving accesses. Can also extend to dominating uses. 1299 for (auto *BB : CurLoop->getBlocks()) 1300 if (auto *Accesses = MSSA->getBlockAccesses(BB)) { 1301 for (const auto &MA : *Accesses) 1302 if (const auto *MU = dyn_cast<MemoryUse>(&MA)) { 1303 auto *MD = getClobberingMemoryAccess(*MSSA, BAA, Flags, 1304 const_cast<MemoryUse *>(MU)); 1305 if (!MSSA->isLiveOnEntryDef(MD) && 1306 CurLoop->contains(MD->getBlock())) 1307 return false; 1308 // Disable hoisting past potentially interfering loads. Optimized 1309 // Uses may point to an access outside the loop, as getClobbering 1310 // checks the previous iteration when walking the backedge. 1311 // FIXME: More precise: no Uses that alias SI. 1312 if (!Flags.getIsSink() && !MSSA->dominates(SIMD, MU)) 1313 return false; 1314 } else if (const auto *MD = dyn_cast<MemoryDef>(&MA)) { 1315 if (auto *LI = dyn_cast<LoadInst>(MD->getMemoryInst())) { 1316 (void)LI; // Silence warning. 1317 assert(!LI->isUnordered() && "Expected unordered load"); 1318 return false; 1319 } 1320 // Any call, while it may not be clobbering SI, it may be a use. 1321 if (auto *CI = dyn_cast<CallInst>(MD->getMemoryInst())) { 1322 // Check if the call may read from the memory location written 1323 // to by SI. Check CI's attributes and arguments; the number of 1324 // such checks performed is limited above by NoOfMemAccTooLarge. 1325 ModRefInfo MRI = BAA.getModRefInfo(CI, MemoryLocation::get(SI)); 1326 if (isModOrRefSet(MRI)) 1327 return false; 1328 } 1329 } 1330 } 1331 return true; 1332 } 1333 1334 assert(!I.mayReadOrWriteMemory() && "unhandled aliasing"); 1335 1336 // We've established mechanical ability and aliasing, it's up to the caller 1337 // to check fault safety 1338 return true; 1339 } 1340 1341 /// Returns true if a PHINode is a trivially replaceable with an 1342 /// Instruction. 1343 /// This is true when all incoming values are that instruction. 1344 /// This pattern occurs most often with LCSSA PHI nodes. 1345 /// 1346 static bool isTriviallyReplaceablePHI(const PHINode &PN, const Instruction &I) { 1347 for (const Value *IncValue : PN.incoming_values()) 1348 if (IncValue != &I) 1349 return false; 1350 1351 return true; 1352 } 1353 1354 /// Return true if the instruction is foldable in the loop. 1355 static bool isFoldableInLoop(const Instruction &I, const Loop *CurLoop, 1356 const TargetTransformInfo *TTI) { 1357 if (auto *GEP = dyn_cast<GetElementPtrInst>(&I)) { 1358 InstructionCost CostI = 1359 TTI->getInstructionCost(&I, TargetTransformInfo::TCK_SizeAndLatency); 1360 if (CostI != TargetTransformInfo::TCC_Free) 1361 return false; 1362 // For a GEP, we cannot simply use getInstructionCost because currently 1363 // it optimistically assumes that a GEP will fold into addressing mode 1364 // regardless of its users. 1365 const BasicBlock *BB = GEP->getParent(); 1366 for (const User *U : GEP->users()) { 1367 const Instruction *UI = cast<Instruction>(U); 1368 if (CurLoop->contains(UI) && 1369 (BB != UI->getParent() || 1370 (!isa<StoreInst>(UI) && !isa<LoadInst>(UI)))) 1371 return false; 1372 } 1373 return true; 1374 } 1375 1376 return false; 1377 } 1378 1379 /// Return true if the only users of this instruction are outside of 1380 /// the loop. If this is true, we can sink the instruction to the exit 1381 /// blocks of the loop. 1382 /// 1383 /// We also return true if the instruction could be folded away in lowering. 1384 /// (e.g., a GEP can be folded into a load as an addressing mode in the loop). 1385 static bool isNotUsedOrFoldableInLoop(const Instruction &I, const Loop *CurLoop, 1386 const LoopSafetyInfo *SafetyInfo, 1387 TargetTransformInfo *TTI, 1388 bool &FoldableInLoop, bool LoopNestMode) { 1389 const auto &BlockColors = SafetyInfo->getBlockColors(); 1390 bool IsFoldable = isFoldableInLoop(I, CurLoop, TTI); 1391 for (const User *U : I.users()) { 1392 const Instruction *UI = cast<Instruction>(U); 1393 if (const PHINode *PN = dyn_cast<PHINode>(UI)) { 1394 const BasicBlock *BB = PN->getParent(); 1395 // We cannot sink uses in catchswitches. 1396 if (isa<CatchSwitchInst>(BB->getTerminator())) 1397 return false; 1398 1399 // We need to sink a callsite to a unique funclet. Avoid sinking if the 1400 // phi use is too muddled. 1401 if (isa<CallInst>(I)) 1402 if (!BlockColors.empty() && 1403 BlockColors.find(const_cast<BasicBlock *>(BB))->second.size() != 1) 1404 return false; 1405 1406 if (LoopNestMode) { 1407 while (isa<PHINode>(UI) && UI->hasOneUser() && 1408 UI->getNumOperands() == 1) { 1409 if (!CurLoop->contains(UI)) 1410 break; 1411 UI = cast<Instruction>(UI->user_back()); 1412 } 1413 } 1414 } 1415 1416 if (CurLoop->contains(UI)) { 1417 if (IsFoldable) { 1418 FoldableInLoop = true; 1419 continue; 1420 } 1421 return false; 1422 } 1423 } 1424 return true; 1425 } 1426 1427 static Instruction *cloneInstructionInExitBlock( 1428 Instruction &I, BasicBlock &ExitBlock, PHINode &PN, const LoopInfo *LI, 1429 const LoopSafetyInfo *SafetyInfo, MemorySSAUpdater &MSSAU) { 1430 Instruction *New; 1431 if (auto *CI = dyn_cast<CallInst>(&I)) { 1432 const auto &BlockColors = SafetyInfo->getBlockColors(); 1433 1434 // Sinking call-sites need to be handled differently from other 1435 // instructions. The cloned call-site needs a funclet bundle operand 1436 // appropriate for its location in the CFG. 1437 SmallVector<OperandBundleDef, 1> OpBundles; 1438 for (unsigned BundleIdx = 0, BundleEnd = CI->getNumOperandBundles(); 1439 BundleIdx != BundleEnd; ++BundleIdx) { 1440 OperandBundleUse Bundle = CI->getOperandBundleAt(BundleIdx); 1441 if (Bundle.getTagID() == LLVMContext::OB_funclet) 1442 continue; 1443 1444 OpBundles.emplace_back(Bundle); 1445 } 1446 1447 if (!BlockColors.empty()) { 1448 const ColorVector &CV = BlockColors.find(&ExitBlock)->second; 1449 assert(CV.size() == 1 && "non-unique color for exit block!"); 1450 BasicBlock *BBColor = CV.front(); 1451 BasicBlock::iterator EHPad = BBColor->getFirstNonPHIIt(); 1452 if (EHPad->isEHPad()) 1453 OpBundles.emplace_back("funclet", &*EHPad); 1454 } 1455 1456 New = CallInst::Create(CI, OpBundles); 1457 New->copyMetadata(*CI); 1458 } else { 1459 New = I.clone(); 1460 } 1461 1462 New->insertInto(&ExitBlock, ExitBlock.getFirstInsertionPt()); 1463 if (!I.getName().empty()) 1464 New->setName(I.getName() + ".le"); 1465 1466 if (MSSAU.getMemorySSA()->getMemoryAccess(&I)) { 1467 // Create a new MemoryAccess and let MemorySSA set its defining access. 1468 // After running some passes, MemorySSA might be outdated, and the 1469 // instruction `I` may have become a non-memory touching instruction. 1470 MemoryAccess *NewMemAcc = MSSAU.createMemoryAccessInBB( 1471 New, nullptr, New->getParent(), MemorySSA::Beginning, 1472 /*CreationMustSucceed=*/false); 1473 if (NewMemAcc) { 1474 if (auto *MemDef = dyn_cast<MemoryDef>(NewMemAcc)) 1475 MSSAU.insertDef(MemDef, /*RenameUses=*/true); 1476 else { 1477 auto *MemUse = cast<MemoryUse>(NewMemAcc); 1478 MSSAU.insertUse(MemUse, /*RenameUses=*/true); 1479 } 1480 } 1481 } 1482 1483 // Build LCSSA PHI nodes for any in-loop operands (if legal). Note that 1484 // this is particularly cheap because we can rip off the PHI node that we're 1485 // replacing for the number and blocks of the predecessors. 1486 // OPT: If this shows up in a profile, we can instead finish sinking all 1487 // invariant instructions, and then walk their operands to re-establish 1488 // LCSSA. That will eliminate creating PHI nodes just to nuke them when 1489 // sinking bottom-up. 1490 for (Use &Op : New->operands()) 1491 if (LI->wouldBeOutOfLoopUseRequiringLCSSA(Op.get(), PN.getParent())) { 1492 auto *OInst = cast<Instruction>(Op.get()); 1493 PHINode *OpPN = 1494 PHINode::Create(OInst->getType(), PN.getNumIncomingValues(), 1495 OInst->getName() + ".lcssa"); 1496 OpPN->insertBefore(ExitBlock.begin()); 1497 for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i) 1498 OpPN->addIncoming(OInst, PN.getIncomingBlock(i)); 1499 Op = OpPN; 1500 } 1501 return New; 1502 } 1503 1504 static void eraseInstruction(Instruction &I, ICFLoopSafetyInfo &SafetyInfo, 1505 MemorySSAUpdater &MSSAU) { 1506 MSSAU.removeMemoryAccess(&I); 1507 SafetyInfo.removeInstruction(&I); 1508 I.eraseFromParent(); 1509 } 1510 1511 static void moveInstructionBefore(Instruction &I, BasicBlock::iterator Dest, 1512 ICFLoopSafetyInfo &SafetyInfo, 1513 MemorySSAUpdater &MSSAU, 1514 ScalarEvolution *SE) { 1515 SafetyInfo.removeInstruction(&I); 1516 SafetyInfo.insertInstructionTo(&I, Dest->getParent()); 1517 I.moveBefore(*Dest->getParent(), Dest); 1518 if (MemoryUseOrDef *OldMemAcc = cast_or_null<MemoryUseOrDef>( 1519 MSSAU.getMemorySSA()->getMemoryAccess(&I))) 1520 MSSAU.moveToPlace(OldMemAcc, Dest->getParent(), 1521 MemorySSA::BeforeTerminator); 1522 if (SE) 1523 SE->forgetBlockAndLoopDispositions(&I); 1524 } 1525 1526 static Instruction *sinkThroughTriviallyReplaceablePHI( 1527 PHINode *TPN, Instruction *I, LoopInfo *LI, 1528 SmallDenseMap<BasicBlock *, Instruction *, 32> &SunkCopies, 1529 const LoopSafetyInfo *SafetyInfo, const Loop *CurLoop, 1530 MemorySSAUpdater &MSSAU) { 1531 assert(isTriviallyReplaceablePHI(*TPN, *I) && 1532 "Expect only trivially replaceable PHI"); 1533 BasicBlock *ExitBlock = TPN->getParent(); 1534 Instruction *New; 1535 auto It = SunkCopies.find(ExitBlock); 1536 if (It != SunkCopies.end()) 1537 New = It->second; 1538 else 1539 New = SunkCopies[ExitBlock] = cloneInstructionInExitBlock( 1540 *I, *ExitBlock, *TPN, LI, SafetyInfo, MSSAU); 1541 return New; 1542 } 1543 1544 static bool canSplitPredecessors(PHINode *PN, LoopSafetyInfo *SafetyInfo) { 1545 BasicBlock *BB = PN->getParent(); 1546 if (!BB->canSplitPredecessors()) 1547 return false; 1548 // It's not impossible to split EHPad blocks, but if BlockColors already exist 1549 // it require updating BlockColors for all offspring blocks accordingly. By 1550 // skipping such corner case, we can make updating BlockColors after splitting 1551 // predecessor fairly simple. 1552 if (!SafetyInfo->getBlockColors().empty() && 1553 BB->getFirstNonPHIIt()->isEHPad()) 1554 return false; 1555 for (BasicBlock *BBPred : predecessors(BB)) { 1556 if (isa<IndirectBrInst>(BBPred->getTerminator())) 1557 return false; 1558 } 1559 return true; 1560 } 1561 1562 static void splitPredecessorsOfLoopExit(PHINode *PN, DominatorTree *DT, 1563 LoopInfo *LI, const Loop *CurLoop, 1564 LoopSafetyInfo *SafetyInfo, 1565 MemorySSAUpdater *MSSAU) { 1566 #ifndef NDEBUG 1567 SmallVector<BasicBlock *, 32> ExitBlocks; 1568 CurLoop->getUniqueExitBlocks(ExitBlocks); 1569 SmallPtrSet<BasicBlock *, 32> ExitBlockSet(ExitBlocks.begin(), 1570 ExitBlocks.end()); 1571 #endif 1572 BasicBlock *ExitBB = PN->getParent(); 1573 assert(ExitBlockSet.count(ExitBB) && "Expect the PHI is in an exit block."); 1574 1575 // Split predecessors of the loop exit to make instructions in the loop are 1576 // exposed to exit blocks through trivially replaceable PHIs while keeping the 1577 // loop in the canonical form where each predecessor of each exit block should 1578 // be contained within the loop. For example, this will convert the loop below 1579 // from 1580 // 1581 // LB1: 1582 // %v1 = 1583 // br %LE, %LB2 1584 // LB2: 1585 // %v2 = 1586 // br %LE, %LB1 1587 // LE: 1588 // %p = phi [%v1, %LB1], [%v2, %LB2] <-- non-trivially replaceable 1589 // 1590 // to 1591 // 1592 // LB1: 1593 // %v1 = 1594 // br %LE.split, %LB2 1595 // LB2: 1596 // %v2 = 1597 // br %LE.split2, %LB1 1598 // LE.split: 1599 // %p1 = phi [%v1, %LB1] <-- trivially replaceable 1600 // br %LE 1601 // LE.split2: 1602 // %p2 = phi [%v2, %LB2] <-- trivially replaceable 1603 // br %LE 1604 // LE: 1605 // %p = phi [%p1, %LE.split], [%p2, %LE.split2] 1606 // 1607 const auto &BlockColors = SafetyInfo->getBlockColors(); 1608 SmallSetVector<BasicBlock *, 8> PredBBs(pred_begin(ExitBB), pred_end(ExitBB)); 1609 DomTreeUpdater DTU(DT, DomTreeUpdater::UpdateStrategy::Lazy); 1610 while (!PredBBs.empty()) { 1611 BasicBlock *PredBB = *PredBBs.begin(); 1612 assert(CurLoop->contains(PredBB) && 1613 "Expect all predecessors are in the loop"); 1614 if (PN->getBasicBlockIndex(PredBB) >= 0) { 1615 BasicBlock *NewPred = SplitBlockPredecessors( 1616 ExitBB, PredBB, ".split.loop.exit", &DTU, LI, MSSAU, true); 1617 // Since we do not allow splitting EH-block with BlockColors in 1618 // canSplitPredecessors(), we can simply assign predecessor's color to 1619 // the new block. 1620 if (!BlockColors.empty()) 1621 // Grab a reference to the ColorVector to be inserted before getting the 1622 // reference to the vector we are copying because inserting the new 1623 // element in BlockColors might cause the map to be reallocated. 1624 SafetyInfo->copyColors(NewPred, PredBB); 1625 } 1626 PredBBs.remove(PredBB); 1627 } 1628 } 1629 1630 /// When an instruction is found to only be used outside of the loop, this 1631 /// function moves it to the exit blocks and patches up SSA form as needed. 1632 /// This method is guaranteed to remove the original instruction from its 1633 /// position, and may either delete it or move it to outside of the loop. 1634 /// 1635 static bool sink(Instruction &I, LoopInfo *LI, DominatorTree *DT, 1636 const Loop *CurLoop, ICFLoopSafetyInfo *SafetyInfo, 1637 MemorySSAUpdater &MSSAU, OptimizationRemarkEmitter *ORE) { 1638 bool Changed = false; 1639 LLVM_DEBUG(dbgs() << "LICM sinking instruction: " << I << "\n"); 1640 1641 // Iterate over users to be ready for actual sinking. Replace users via 1642 // unreachable blocks with undef and make all user PHIs trivially replaceable. 1643 SmallPtrSet<Instruction *, 8> VisitedUsers; 1644 for (Value::user_iterator UI = I.user_begin(), UE = I.user_end(); UI != UE;) { 1645 auto *User = cast<Instruction>(*UI); 1646 Use &U = UI.getUse(); 1647 ++UI; 1648 1649 if (VisitedUsers.count(User) || CurLoop->contains(User)) 1650 continue; 1651 1652 if (!DT->isReachableFromEntry(User->getParent())) { 1653 U = PoisonValue::get(I.getType()); 1654 Changed = true; 1655 continue; 1656 } 1657 1658 // The user must be a PHI node. 1659 PHINode *PN = cast<PHINode>(User); 1660 1661 // Surprisingly, instructions can be used outside of loops without any 1662 // exits. This can only happen in PHI nodes if the incoming block is 1663 // unreachable. 1664 BasicBlock *BB = PN->getIncomingBlock(U); 1665 if (!DT->isReachableFromEntry(BB)) { 1666 U = PoisonValue::get(I.getType()); 1667 Changed = true; 1668 continue; 1669 } 1670 1671 VisitedUsers.insert(PN); 1672 if (isTriviallyReplaceablePHI(*PN, I)) 1673 continue; 1674 1675 if (!canSplitPredecessors(PN, SafetyInfo)) 1676 return Changed; 1677 1678 // Split predecessors of the PHI so that we can make users trivially 1679 // replaceable. 1680 splitPredecessorsOfLoopExit(PN, DT, LI, CurLoop, SafetyInfo, &MSSAU); 1681 1682 // Should rebuild the iterators, as they may be invalidated by 1683 // splitPredecessorsOfLoopExit(). 1684 UI = I.user_begin(); 1685 UE = I.user_end(); 1686 } 1687 1688 if (VisitedUsers.empty()) 1689 return Changed; 1690 1691 ORE->emit([&]() { 1692 return OptimizationRemark(DEBUG_TYPE, "InstSunk", &I) 1693 << "sinking " << ore::NV("Inst", &I); 1694 }); 1695 if (isa<LoadInst>(I)) 1696 ++NumMovedLoads; 1697 else if (isa<CallInst>(I)) 1698 ++NumMovedCalls; 1699 ++NumSunk; 1700 1701 #ifndef NDEBUG 1702 SmallVector<BasicBlock *, 32> ExitBlocks; 1703 CurLoop->getUniqueExitBlocks(ExitBlocks); 1704 SmallPtrSet<BasicBlock *, 32> ExitBlockSet(ExitBlocks.begin(), 1705 ExitBlocks.end()); 1706 #endif 1707 1708 // Clones of this instruction. Don't create more than one per exit block! 1709 SmallDenseMap<BasicBlock *, Instruction *, 32> SunkCopies; 1710 1711 // If this instruction is only used outside of the loop, then all users are 1712 // PHI nodes in exit blocks due to LCSSA form. Just RAUW them with clones of 1713 // the instruction. 1714 // First check if I is worth sinking for all uses. Sink only when it is worth 1715 // across all uses. 1716 SmallSetVector<User*, 8> Users(I.user_begin(), I.user_end()); 1717 for (auto *UI : Users) { 1718 auto *User = cast<Instruction>(UI); 1719 1720 if (CurLoop->contains(User)) 1721 continue; 1722 1723 PHINode *PN = cast<PHINode>(User); 1724 assert(ExitBlockSet.count(PN->getParent()) && 1725 "The LCSSA PHI is not in an exit block!"); 1726 1727 // The PHI must be trivially replaceable. 1728 Instruction *New = sinkThroughTriviallyReplaceablePHI( 1729 PN, &I, LI, SunkCopies, SafetyInfo, CurLoop, MSSAU); 1730 // As we sink the instruction out of the BB, drop its debug location. 1731 New->dropLocation(); 1732 PN->replaceAllUsesWith(New); 1733 eraseInstruction(*PN, *SafetyInfo, MSSAU); 1734 Changed = true; 1735 } 1736 return Changed; 1737 } 1738 1739 /// When an instruction is found to only use loop invariant operands that 1740 /// is safe to hoist, this instruction is called to do the dirty work. 1741 /// 1742 static void hoist(Instruction &I, const DominatorTree *DT, const Loop *CurLoop, 1743 BasicBlock *Dest, ICFLoopSafetyInfo *SafetyInfo, 1744 MemorySSAUpdater &MSSAU, ScalarEvolution *SE, 1745 OptimizationRemarkEmitter *ORE) { 1746 LLVM_DEBUG(dbgs() << "LICM hoisting to " << Dest->getNameOrAsOperand() << ": " 1747 << I << "\n"); 1748 ORE->emit([&]() { 1749 return OptimizationRemark(DEBUG_TYPE, "Hoisted", &I) << "hoisting " 1750 << ore::NV("Inst", &I); 1751 }); 1752 1753 // Metadata can be dependent on conditions we are hoisting above. 1754 // Conservatively strip all metadata on the instruction unless we were 1755 // guaranteed to execute I if we entered the loop, in which case the metadata 1756 // is valid in the loop preheader. 1757 // Similarly, If I is a call and it is not guaranteed to execute in the loop, 1758 // then moving to the preheader means we should strip attributes on the call 1759 // that can cause UB since we may be hoisting above conditions that allowed 1760 // inferring those attributes. They may not be valid at the preheader. 1761 if ((I.hasMetadataOtherThanDebugLoc() || isa<CallInst>(I)) && 1762 // The check on hasMetadataOtherThanDebugLoc is to prevent us from burning 1763 // time in isGuaranteedToExecute if we don't actually have anything to 1764 // drop. It is a compile time optimization, not required for correctness. 1765 !SafetyInfo->isGuaranteedToExecute(I, DT, CurLoop)) 1766 I.dropUBImplyingAttrsAndMetadata(); 1767 1768 if (isa<PHINode>(I)) 1769 // Move the new node to the end of the phi list in the destination block. 1770 moveInstructionBefore(I, Dest->getFirstNonPHIIt(), *SafetyInfo, MSSAU, SE); 1771 else 1772 // Move the new node to the destination block, before its terminator. 1773 moveInstructionBefore(I, Dest->getTerminator()->getIterator(), *SafetyInfo, 1774 MSSAU, SE); 1775 1776 I.updateLocationAfterHoist(); 1777 1778 if (isa<LoadInst>(I)) 1779 ++NumMovedLoads; 1780 else if (isa<CallInst>(I)) 1781 ++NumMovedCalls; 1782 ++NumHoisted; 1783 } 1784 1785 /// Only sink or hoist an instruction if it is not a trapping instruction, 1786 /// or if the instruction is known not to trap when moved to the preheader. 1787 /// or if it is a trapping instruction and is guaranteed to execute. 1788 static bool isSafeToExecuteUnconditionally( 1789 Instruction &Inst, const DominatorTree *DT, const TargetLibraryInfo *TLI, 1790 const Loop *CurLoop, const LoopSafetyInfo *SafetyInfo, 1791 OptimizationRemarkEmitter *ORE, const Instruction *CtxI, 1792 AssumptionCache *AC, bool AllowSpeculation) { 1793 if (AllowSpeculation && 1794 isSafeToSpeculativelyExecute(&Inst, CtxI, AC, DT, TLI)) 1795 return true; 1796 1797 bool GuaranteedToExecute = 1798 SafetyInfo->isGuaranteedToExecute(Inst, DT, CurLoop); 1799 1800 if (!GuaranteedToExecute) { 1801 auto *LI = dyn_cast<LoadInst>(&Inst); 1802 if (LI && CurLoop->isLoopInvariant(LI->getPointerOperand())) 1803 ORE->emit([&]() { 1804 return OptimizationRemarkMissed( 1805 DEBUG_TYPE, "LoadWithLoopInvariantAddressCondExecuted", LI) 1806 << "failed to hoist load with loop-invariant address " 1807 "because load is conditionally executed"; 1808 }); 1809 } 1810 1811 return GuaranteedToExecute; 1812 } 1813 1814 namespace { 1815 class LoopPromoter : public LoadAndStorePromoter { 1816 Value *SomePtr; // Designated pointer to store to. 1817 SmallVectorImpl<BasicBlock *> &LoopExitBlocks; 1818 SmallVectorImpl<BasicBlock::iterator> &LoopInsertPts; 1819 SmallVectorImpl<MemoryAccess *> &MSSAInsertPts; 1820 PredIteratorCache &PredCache; 1821 MemorySSAUpdater &MSSAU; 1822 LoopInfo &LI; 1823 DebugLoc DL; 1824 Align Alignment; 1825 bool UnorderedAtomic; 1826 AAMDNodes AATags; 1827 ICFLoopSafetyInfo &SafetyInfo; 1828 bool CanInsertStoresInExitBlocks; 1829 ArrayRef<const Instruction *> Uses; 1830 1831 // We're about to add a use of V in a loop exit block. Insert an LCSSA phi 1832 // (if legal) if doing so would add an out-of-loop use to an instruction 1833 // defined in-loop. 1834 Value *maybeInsertLCSSAPHI(Value *V, BasicBlock *BB) const { 1835 if (!LI.wouldBeOutOfLoopUseRequiringLCSSA(V, BB)) 1836 return V; 1837 1838 Instruction *I = cast<Instruction>(V); 1839 // We need to create an LCSSA PHI node for the incoming value and 1840 // store that. 1841 PHINode *PN = PHINode::Create(I->getType(), PredCache.size(BB), 1842 I->getName() + ".lcssa"); 1843 PN->insertBefore(BB->begin()); 1844 for (BasicBlock *Pred : PredCache.get(BB)) 1845 PN->addIncoming(I, Pred); 1846 return PN; 1847 } 1848 1849 public: 1850 LoopPromoter(Value *SP, ArrayRef<const Instruction *> Insts, SSAUpdater &S, 1851 SmallVectorImpl<BasicBlock *> &LEB, 1852 SmallVectorImpl<BasicBlock::iterator> &LIP, 1853 SmallVectorImpl<MemoryAccess *> &MSSAIP, PredIteratorCache &PIC, 1854 MemorySSAUpdater &MSSAU, LoopInfo &li, DebugLoc dl, 1855 Align Alignment, bool UnorderedAtomic, const AAMDNodes &AATags, 1856 ICFLoopSafetyInfo &SafetyInfo, bool CanInsertStoresInExitBlocks) 1857 : LoadAndStorePromoter(Insts, S), SomePtr(SP), LoopExitBlocks(LEB), 1858 LoopInsertPts(LIP), MSSAInsertPts(MSSAIP), PredCache(PIC), MSSAU(MSSAU), 1859 LI(li), DL(std::move(dl)), Alignment(Alignment), 1860 UnorderedAtomic(UnorderedAtomic), AATags(AATags), 1861 SafetyInfo(SafetyInfo), 1862 CanInsertStoresInExitBlocks(CanInsertStoresInExitBlocks), Uses(Insts) {} 1863 1864 void insertStoresInLoopExitBlocks() { 1865 // Insert stores after in the loop exit blocks. Each exit block gets a 1866 // store of the live-out values that feed them. Since we've already told 1867 // the SSA updater about the defs in the loop and the preheader 1868 // definition, it is all set and we can start using it. 1869 DIAssignID *NewID = nullptr; 1870 for (unsigned i = 0, e = LoopExitBlocks.size(); i != e; ++i) { 1871 BasicBlock *ExitBlock = LoopExitBlocks[i]; 1872 Value *LiveInValue = SSA.GetValueInMiddleOfBlock(ExitBlock); 1873 LiveInValue = maybeInsertLCSSAPHI(LiveInValue, ExitBlock); 1874 Value *Ptr = maybeInsertLCSSAPHI(SomePtr, ExitBlock); 1875 BasicBlock::iterator InsertPos = LoopInsertPts[i]; 1876 StoreInst *NewSI = new StoreInst(LiveInValue, Ptr, InsertPos); 1877 if (UnorderedAtomic) 1878 NewSI->setOrdering(AtomicOrdering::Unordered); 1879 NewSI->setAlignment(Alignment); 1880 NewSI->setDebugLoc(DL); 1881 // Attach DIAssignID metadata to the new store, generating it on the 1882 // first loop iteration. 1883 if (i == 0) { 1884 // NewSI will have its DIAssignID set here if there are any stores in 1885 // Uses with a DIAssignID attachment. This merged ID will then be 1886 // attached to the other inserted stores (in the branch below). 1887 NewSI->mergeDIAssignID(Uses); 1888 NewID = cast_or_null<DIAssignID>( 1889 NewSI->getMetadata(LLVMContext::MD_DIAssignID)); 1890 } else { 1891 // Attach the DIAssignID (or nullptr) merged from Uses in the branch 1892 // above. 1893 NewSI->setMetadata(LLVMContext::MD_DIAssignID, NewID); 1894 } 1895 1896 if (AATags) 1897 NewSI->setAAMetadata(AATags); 1898 1899 MemoryAccess *MSSAInsertPoint = MSSAInsertPts[i]; 1900 MemoryAccess *NewMemAcc; 1901 if (!MSSAInsertPoint) { 1902 NewMemAcc = MSSAU.createMemoryAccessInBB( 1903 NewSI, nullptr, NewSI->getParent(), MemorySSA::Beginning); 1904 } else { 1905 NewMemAcc = 1906 MSSAU.createMemoryAccessAfter(NewSI, nullptr, MSSAInsertPoint); 1907 } 1908 MSSAInsertPts[i] = NewMemAcc; 1909 MSSAU.insertDef(cast<MemoryDef>(NewMemAcc), true); 1910 // FIXME: true for safety, false may still be correct. 1911 } 1912 } 1913 1914 void doExtraRewritesBeforeFinalDeletion() override { 1915 if (CanInsertStoresInExitBlocks) 1916 insertStoresInLoopExitBlocks(); 1917 } 1918 1919 void instructionDeleted(Instruction *I) const override { 1920 SafetyInfo.removeInstruction(I); 1921 MSSAU.removeMemoryAccess(I); 1922 } 1923 1924 bool shouldDelete(Instruction *I) const override { 1925 if (isa<StoreInst>(I)) 1926 return CanInsertStoresInExitBlocks; 1927 return true; 1928 } 1929 }; 1930 1931 bool isNotCapturedBeforeOrInLoop(const Value *V, const Loop *L, 1932 DominatorTree *DT) { 1933 // We can perform the captured-before check against any instruction in the 1934 // loop header, as the loop header is reachable from any instruction inside 1935 // the loop. 1936 // TODO: ReturnCaptures=true shouldn't be necessary here. 1937 return !PointerMayBeCapturedBefore(V, /* ReturnCaptures */ true, 1938 /* StoreCaptures */ true, 1939 L->getHeader()->getTerminator(), DT); 1940 } 1941 1942 /// Return true if we can prove that a caller cannot inspect the object if an 1943 /// unwind occurs inside the loop. 1944 bool isNotVisibleOnUnwindInLoop(const Value *Object, const Loop *L, 1945 DominatorTree *DT) { 1946 bool RequiresNoCaptureBeforeUnwind; 1947 if (!isNotVisibleOnUnwind(Object, RequiresNoCaptureBeforeUnwind)) 1948 return false; 1949 1950 return !RequiresNoCaptureBeforeUnwind || 1951 isNotCapturedBeforeOrInLoop(Object, L, DT); 1952 } 1953 1954 bool isThreadLocalObject(const Value *Object, const Loop *L, DominatorTree *DT, 1955 TargetTransformInfo *TTI) { 1956 // The object must be function-local to start with, and then not captured 1957 // before/in the loop. 1958 return (isIdentifiedFunctionLocal(Object) && 1959 isNotCapturedBeforeOrInLoop(Object, L, DT)) || 1960 (TTI->isSingleThreaded() || SingleThread); 1961 } 1962 1963 } // namespace 1964 1965 /// Try to promote memory values to scalars by sinking stores out of the 1966 /// loop and moving loads to before the loop. We do this by looping over 1967 /// the stores in the loop, looking for stores to Must pointers which are 1968 /// loop invariant. 1969 /// 1970 bool llvm::promoteLoopAccessesToScalars( 1971 const SmallSetVector<Value *, 8> &PointerMustAliases, 1972 SmallVectorImpl<BasicBlock *> &ExitBlocks, 1973 SmallVectorImpl<BasicBlock::iterator> &InsertPts, 1974 SmallVectorImpl<MemoryAccess *> &MSSAInsertPts, PredIteratorCache &PIC, 1975 LoopInfo *LI, DominatorTree *DT, AssumptionCache *AC, 1976 const TargetLibraryInfo *TLI, TargetTransformInfo *TTI, Loop *CurLoop, 1977 MemorySSAUpdater &MSSAU, ICFLoopSafetyInfo *SafetyInfo, 1978 OptimizationRemarkEmitter *ORE, bool AllowSpeculation, 1979 bool HasReadsOutsideSet) { 1980 // Verify inputs. 1981 assert(LI != nullptr && DT != nullptr && CurLoop != nullptr && 1982 SafetyInfo != nullptr && 1983 "Unexpected Input to promoteLoopAccessesToScalars"); 1984 1985 LLVM_DEBUG({ 1986 dbgs() << "Trying to promote set of must-aliased pointers:\n"; 1987 for (Value *Ptr : PointerMustAliases) 1988 dbgs() << " " << *Ptr << "\n"; 1989 }); 1990 ++NumPromotionCandidates; 1991 1992 Value *SomePtr = *PointerMustAliases.begin(); 1993 BasicBlock *Preheader = CurLoop->getLoopPreheader(); 1994 1995 // It is not safe to promote a load/store from the loop if the load/store is 1996 // conditional. For example, turning: 1997 // 1998 // for () { if (c) *P += 1; } 1999 // 2000 // into: 2001 // 2002 // tmp = *P; for () { if (c) tmp +=1; } *P = tmp; 2003 // 2004 // is not safe, because *P may only be valid to access if 'c' is true. 2005 // 2006 // The safety property divides into two parts: 2007 // p1) The memory may not be dereferenceable on entry to the loop. In this 2008 // case, we can't insert the required load in the preheader. 2009 // p2) The memory model does not allow us to insert a store along any dynamic 2010 // path which did not originally have one. 2011 // 2012 // If at least one store is guaranteed to execute, both properties are 2013 // satisfied, and promotion is legal. 2014 // 2015 // This, however, is not a necessary condition. Even if no store/load is 2016 // guaranteed to execute, we can still establish these properties. 2017 // We can establish (p1) by proving that hoisting the load into the preheader 2018 // is safe (i.e. proving dereferenceability on all paths through the loop). We 2019 // can use any access within the alias set to prove dereferenceability, 2020 // since they're all must alias. 2021 // 2022 // There are two ways establish (p2): 2023 // a) Prove the location is thread-local. In this case the memory model 2024 // requirement does not apply, and stores are safe to insert. 2025 // b) Prove a store dominates every exit block. In this case, if an exit 2026 // blocks is reached, the original dynamic path would have taken us through 2027 // the store, so inserting a store into the exit block is safe. Note that this 2028 // is different from the store being guaranteed to execute. For instance, 2029 // if an exception is thrown on the first iteration of the loop, the original 2030 // store is never executed, but the exit blocks are not executed either. 2031 2032 bool DereferenceableInPH = false; 2033 bool StoreIsGuanteedToExecute = false; 2034 bool LoadIsGuaranteedToExecute = false; 2035 bool FoundLoadToPromote = false; 2036 2037 // Goes from Unknown to either Safe or Unsafe, but can't switch between them. 2038 enum { 2039 StoreSafe, 2040 StoreUnsafe, 2041 StoreSafetyUnknown, 2042 } StoreSafety = StoreSafetyUnknown; 2043 2044 SmallVector<Instruction *, 64> LoopUses; 2045 2046 // We start with an alignment of one and try to find instructions that allow 2047 // us to prove better alignment. 2048 Align Alignment; 2049 // Keep track of which types of access we see 2050 bool SawUnorderedAtomic = false; 2051 bool SawNotAtomic = false; 2052 AAMDNodes AATags; 2053 2054 const DataLayout &MDL = Preheader->getDataLayout(); 2055 2056 // If there are reads outside the promoted set, then promoting stores is 2057 // definitely not safe. 2058 if (HasReadsOutsideSet) 2059 StoreSafety = StoreUnsafe; 2060 2061 if (StoreSafety == StoreSafetyUnknown && SafetyInfo->anyBlockMayThrow()) { 2062 // If a loop can throw, we have to insert a store along each unwind edge. 2063 // That said, we can't actually make the unwind edge explicit. Therefore, 2064 // we have to prove that the store is dead along the unwind edge. We do 2065 // this by proving that the caller can't have a reference to the object 2066 // after return and thus can't possibly load from the object. 2067 Value *Object = getUnderlyingObject(SomePtr); 2068 if (!isNotVisibleOnUnwindInLoop(Object, CurLoop, DT)) 2069 StoreSafety = StoreUnsafe; 2070 } 2071 2072 // Check that all accesses to pointers in the alias set use the same type. 2073 // We cannot (yet) promote a memory location that is loaded and stored in 2074 // different sizes. While we are at it, collect alignment and AA info. 2075 Type *AccessTy = nullptr; 2076 for (Value *ASIV : PointerMustAliases) { 2077 for (Use &U : ASIV->uses()) { 2078 // Ignore instructions that are outside the loop. 2079 Instruction *UI = dyn_cast<Instruction>(U.getUser()); 2080 if (!UI || !CurLoop->contains(UI)) 2081 continue; 2082 2083 // If there is an non-load/store instruction in the loop, we can't promote 2084 // it. 2085 if (LoadInst *Load = dyn_cast<LoadInst>(UI)) { 2086 if (!Load->isUnordered()) 2087 return false; 2088 2089 SawUnorderedAtomic |= Load->isAtomic(); 2090 SawNotAtomic |= !Load->isAtomic(); 2091 FoundLoadToPromote = true; 2092 2093 Align InstAlignment = Load->getAlign(); 2094 2095 if (!LoadIsGuaranteedToExecute) 2096 LoadIsGuaranteedToExecute = 2097 SafetyInfo->isGuaranteedToExecute(*UI, DT, CurLoop); 2098 2099 // Note that proving a load safe to speculate requires proving 2100 // sufficient alignment at the target location. Proving it guaranteed 2101 // to execute does as well. Thus we can increase our guaranteed 2102 // alignment as well. 2103 if (!DereferenceableInPH || (InstAlignment > Alignment)) 2104 if (isSafeToExecuteUnconditionally( 2105 *Load, DT, TLI, CurLoop, SafetyInfo, ORE, 2106 Preheader->getTerminator(), AC, AllowSpeculation)) { 2107 DereferenceableInPH = true; 2108 Alignment = std::max(Alignment, InstAlignment); 2109 } 2110 } else if (const StoreInst *Store = dyn_cast<StoreInst>(UI)) { 2111 // Stores *of* the pointer are not interesting, only stores *to* the 2112 // pointer. 2113 if (U.getOperandNo() != StoreInst::getPointerOperandIndex()) 2114 continue; 2115 if (!Store->isUnordered()) 2116 return false; 2117 2118 SawUnorderedAtomic |= Store->isAtomic(); 2119 SawNotAtomic |= !Store->isAtomic(); 2120 2121 // If the store is guaranteed to execute, both properties are satisfied. 2122 // We may want to check if a store is guaranteed to execute even if we 2123 // already know that promotion is safe, since it may have higher 2124 // alignment than any other guaranteed stores, in which case we can 2125 // raise the alignment on the promoted store. 2126 Align InstAlignment = Store->getAlign(); 2127 bool GuaranteedToExecute = 2128 SafetyInfo->isGuaranteedToExecute(*UI, DT, CurLoop); 2129 StoreIsGuanteedToExecute |= GuaranteedToExecute; 2130 if (GuaranteedToExecute) { 2131 DereferenceableInPH = true; 2132 if (StoreSafety == StoreSafetyUnknown) 2133 StoreSafety = StoreSafe; 2134 Alignment = std::max(Alignment, InstAlignment); 2135 } 2136 2137 // If a store dominates all exit blocks, it is safe to sink. 2138 // As explained above, if an exit block was executed, a dominating 2139 // store must have been executed at least once, so we are not 2140 // introducing stores on paths that did not have them. 2141 // Note that this only looks at explicit exit blocks. If we ever 2142 // start sinking stores into unwind edges (see above), this will break. 2143 if (StoreSafety == StoreSafetyUnknown && 2144 llvm::all_of(ExitBlocks, [&](BasicBlock *Exit) { 2145 return DT->dominates(Store->getParent(), Exit); 2146 })) 2147 StoreSafety = StoreSafe; 2148 2149 // If the store is not guaranteed to execute, we may still get 2150 // deref info through it. 2151 if (!DereferenceableInPH) { 2152 DereferenceableInPH = isDereferenceableAndAlignedPointer( 2153 Store->getPointerOperand(), Store->getValueOperand()->getType(), 2154 Store->getAlign(), MDL, Preheader->getTerminator(), AC, DT, TLI); 2155 } 2156 } else 2157 continue; // Not a load or store. 2158 2159 if (!AccessTy) 2160 AccessTy = getLoadStoreType(UI); 2161 else if (AccessTy != getLoadStoreType(UI)) 2162 return false; 2163 2164 // Merge the AA tags. 2165 if (LoopUses.empty()) { 2166 // On the first load/store, just take its AA tags. 2167 AATags = UI->getAAMetadata(); 2168 } else if (AATags) { 2169 AATags = AATags.merge(UI->getAAMetadata()); 2170 } 2171 2172 LoopUses.push_back(UI); 2173 } 2174 } 2175 2176 // If we found both an unordered atomic instruction and a non-atomic memory 2177 // access, bail. We can't blindly promote non-atomic to atomic since we 2178 // might not be able to lower the result. We can't downgrade since that 2179 // would violate memory model. Also, align 0 is an error for atomics. 2180 if (SawUnorderedAtomic && SawNotAtomic) 2181 return false; 2182 2183 // If we're inserting an atomic load in the preheader, we must be able to 2184 // lower it. We're only guaranteed to be able to lower naturally aligned 2185 // atomics. 2186 if (SawUnorderedAtomic && Alignment < MDL.getTypeStoreSize(AccessTy)) 2187 return false; 2188 2189 // If we couldn't prove we can hoist the load, bail. 2190 if (!DereferenceableInPH) { 2191 LLVM_DEBUG(dbgs() << "Not promoting: Not dereferenceable in preheader\n"); 2192 return false; 2193 } 2194 2195 // We know we can hoist the load, but don't have a guaranteed store. 2196 // Check whether the location is writable and thread-local. If it is, then we 2197 // can insert stores along paths which originally didn't have them without 2198 // violating the memory model. 2199 if (StoreSafety == StoreSafetyUnknown) { 2200 Value *Object = getUnderlyingObject(SomePtr); 2201 bool ExplicitlyDereferenceableOnly; 2202 if (isWritableObject(Object, ExplicitlyDereferenceableOnly) && 2203 (!ExplicitlyDereferenceableOnly || 2204 isDereferenceablePointer(SomePtr, AccessTy, MDL)) && 2205 isThreadLocalObject(Object, CurLoop, DT, TTI)) 2206 StoreSafety = StoreSafe; 2207 } 2208 2209 // If we've still failed to prove we can sink the store, hoist the load 2210 // only, if possible. 2211 if (StoreSafety != StoreSafe && !FoundLoadToPromote) 2212 // If we cannot hoist the load either, give up. 2213 return false; 2214 2215 // Lets do the promotion! 2216 if (StoreSafety == StoreSafe) { 2217 LLVM_DEBUG(dbgs() << "LICM: Promoting load/store of the value: " << *SomePtr 2218 << '\n'); 2219 ++NumLoadStorePromoted; 2220 } else { 2221 LLVM_DEBUG(dbgs() << "LICM: Promoting load of the value: " << *SomePtr 2222 << '\n'); 2223 ++NumLoadPromoted; 2224 } 2225 2226 ORE->emit([&]() { 2227 return OptimizationRemark(DEBUG_TYPE, "PromoteLoopAccessesToScalar", 2228 LoopUses[0]) 2229 << "Moving accesses to memory location out of the loop"; 2230 }); 2231 2232 // Look at all the loop uses, and try to merge their locations. 2233 std::vector<DILocation *> LoopUsesLocs; 2234 for (auto *U : LoopUses) 2235 LoopUsesLocs.push_back(U->getDebugLoc().get()); 2236 auto DL = DebugLoc(DILocation::getMergedLocations(LoopUsesLocs)); 2237 2238 // We use the SSAUpdater interface to insert phi nodes as required. 2239 SmallVector<PHINode *, 16> NewPHIs; 2240 SSAUpdater SSA(&NewPHIs); 2241 LoopPromoter Promoter(SomePtr, LoopUses, SSA, ExitBlocks, InsertPts, 2242 MSSAInsertPts, PIC, MSSAU, *LI, DL, Alignment, 2243 SawUnorderedAtomic, 2244 StoreIsGuanteedToExecute ? AATags : AAMDNodes(), 2245 *SafetyInfo, StoreSafety == StoreSafe); 2246 2247 // Set up the preheader to have a definition of the value. It is the live-out 2248 // value from the preheader that uses in the loop will use. 2249 LoadInst *PreheaderLoad = nullptr; 2250 if (FoundLoadToPromote || !StoreIsGuanteedToExecute) { 2251 PreheaderLoad = 2252 new LoadInst(AccessTy, SomePtr, SomePtr->getName() + ".promoted", 2253 Preheader->getTerminator()->getIterator()); 2254 if (SawUnorderedAtomic) 2255 PreheaderLoad->setOrdering(AtomicOrdering::Unordered); 2256 PreheaderLoad->setAlignment(Alignment); 2257 PreheaderLoad->setDebugLoc(DebugLoc()); 2258 if (AATags && LoadIsGuaranteedToExecute) 2259 PreheaderLoad->setAAMetadata(AATags); 2260 2261 MemoryAccess *PreheaderLoadMemoryAccess = MSSAU.createMemoryAccessInBB( 2262 PreheaderLoad, nullptr, PreheaderLoad->getParent(), MemorySSA::End); 2263 MemoryUse *NewMemUse = cast<MemoryUse>(PreheaderLoadMemoryAccess); 2264 MSSAU.insertUse(NewMemUse, /*RenameUses=*/true); 2265 SSA.AddAvailableValue(Preheader, PreheaderLoad); 2266 } else { 2267 SSA.AddAvailableValue(Preheader, PoisonValue::get(AccessTy)); 2268 } 2269 2270 if (VerifyMemorySSA) 2271 MSSAU.getMemorySSA()->verifyMemorySSA(); 2272 // Rewrite all the loads in the loop and remember all the definitions from 2273 // stores in the loop. 2274 Promoter.run(LoopUses); 2275 2276 if (VerifyMemorySSA) 2277 MSSAU.getMemorySSA()->verifyMemorySSA(); 2278 // If the SSAUpdater didn't use the load in the preheader, just zap it now. 2279 if (PreheaderLoad && PreheaderLoad->use_empty()) 2280 eraseInstruction(*PreheaderLoad, *SafetyInfo, MSSAU); 2281 2282 return true; 2283 } 2284 2285 static void foreachMemoryAccess(MemorySSA *MSSA, Loop *L, 2286 function_ref<void(Instruction *)> Fn) { 2287 for (const BasicBlock *BB : L->blocks()) 2288 if (const auto *Accesses = MSSA->getBlockAccesses(BB)) 2289 for (const auto &Access : *Accesses) 2290 if (const auto *MUD = dyn_cast<MemoryUseOrDef>(&Access)) 2291 Fn(MUD->getMemoryInst()); 2292 } 2293 2294 // The bool indicates whether there might be reads outside the set, in which 2295 // case only loads may be promoted. 2296 static SmallVector<PointersAndHasReadsOutsideSet, 0> 2297 collectPromotionCandidates(MemorySSA *MSSA, AliasAnalysis *AA, Loop *L) { 2298 BatchAAResults BatchAA(*AA); 2299 AliasSetTracker AST(BatchAA); 2300 2301 auto IsPotentiallyPromotable = [L](const Instruction *I) { 2302 if (const auto *SI = dyn_cast<StoreInst>(I)) 2303 return L->isLoopInvariant(SI->getPointerOperand()); 2304 if (const auto *LI = dyn_cast<LoadInst>(I)) 2305 return L->isLoopInvariant(LI->getPointerOperand()); 2306 return false; 2307 }; 2308 2309 // Populate AST with potentially promotable accesses. 2310 SmallPtrSet<Value *, 16> AttemptingPromotion; 2311 foreachMemoryAccess(MSSA, L, [&](Instruction *I) { 2312 if (IsPotentiallyPromotable(I)) { 2313 AttemptingPromotion.insert(I); 2314 AST.add(I); 2315 } 2316 }); 2317 2318 // We're only interested in must-alias sets that contain a mod. 2319 SmallVector<PointerIntPair<const AliasSet *, 1, bool>, 8> Sets; 2320 for (AliasSet &AS : AST) 2321 if (!AS.isForwardingAliasSet() && AS.isMod() && AS.isMustAlias()) 2322 Sets.push_back({&AS, false}); 2323 2324 if (Sets.empty()) 2325 return {}; // Nothing to promote... 2326 2327 // Discard any sets for which there is an aliasing non-promotable access. 2328 foreachMemoryAccess(MSSA, L, [&](Instruction *I) { 2329 if (AttemptingPromotion.contains(I)) 2330 return; 2331 2332 llvm::erase_if(Sets, [&](PointerIntPair<const AliasSet *, 1, bool> &Pair) { 2333 ModRefInfo MR = Pair.getPointer()->aliasesUnknownInst(I, BatchAA); 2334 // Cannot promote if there are writes outside the set. 2335 if (isModSet(MR)) 2336 return true; 2337 if (isRefSet(MR)) { 2338 // Remember reads outside the set. 2339 Pair.setInt(true); 2340 // If this is a mod-only set and there are reads outside the set, 2341 // we will not be able to promote, so bail out early. 2342 return !Pair.getPointer()->isRef(); 2343 } 2344 return false; 2345 }); 2346 }); 2347 2348 SmallVector<std::pair<SmallSetVector<Value *, 8>, bool>, 0> Result; 2349 for (auto [Set, HasReadsOutsideSet] : Sets) { 2350 SmallSetVector<Value *, 8> PointerMustAliases; 2351 for (const auto &MemLoc : *Set) 2352 PointerMustAliases.insert(const_cast<Value *>(MemLoc.Ptr)); 2353 Result.emplace_back(std::move(PointerMustAliases), HasReadsOutsideSet); 2354 } 2355 2356 return Result; 2357 } 2358 2359 static bool pointerInvalidatedByLoop(MemorySSA *MSSA, MemoryUse *MU, 2360 Loop *CurLoop, Instruction &I, 2361 SinkAndHoistLICMFlags &Flags, 2362 bool InvariantGroup) { 2363 // For hoisting, use the walker to determine safety 2364 if (!Flags.getIsSink()) { 2365 // If hoisting an invariant group, we only need to check that there 2366 // is no store to the loaded pointer between the start of the loop, 2367 // and the load (since all values must be the same). 2368 2369 // This can be checked in two conditions: 2370 // 1) if the memoryaccess is outside the loop 2371 // 2) the earliest access is at the loop header, 2372 // if the memory loaded is the phi node 2373 2374 BatchAAResults BAA(MSSA->getAA()); 2375 MemoryAccess *Source = getClobberingMemoryAccess(*MSSA, BAA, Flags, MU); 2376 return !MSSA->isLiveOnEntryDef(Source) && 2377 CurLoop->contains(Source->getBlock()) && 2378 !(InvariantGroup && Source->getBlock() == CurLoop->getHeader() && isa<MemoryPhi>(Source)); 2379 } 2380 2381 // For sinking, we'd need to check all Defs below this use. The getClobbering 2382 // call will look on the backedge of the loop, but will check aliasing with 2383 // the instructions on the previous iteration. 2384 // For example: 2385 // for (i ... ) 2386 // load a[i] ( Use (LoE) 2387 // store a[i] ( 1 = Def (2), with 2 = Phi for the loop. 2388 // i++; 2389 // The load sees no clobbering inside the loop, as the backedge alias check 2390 // does phi translation, and will check aliasing against store a[i-1]. 2391 // However sinking the load outside the loop, below the store is incorrect. 2392 2393 // For now, only sink if there are no Defs in the loop, and the existing ones 2394 // precede the use and are in the same block. 2395 // FIXME: Increase precision: Safe to sink if Use post dominates the Def; 2396 // needs PostDominatorTreeAnalysis. 2397 // FIXME: More precise: no Defs that alias this Use. 2398 if (Flags.tooManyMemoryAccesses()) 2399 return true; 2400 for (auto *BB : CurLoop->getBlocks()) 2401 if (pointerInvalidatedByBlock(*BB, *MSSA, *MU)) 2402 return true; 2403 // When sinking, the source block may not be part of the loop so check it. 2404 if (!CurLoop->contains(&I)) 2405 return pointerInvalidatedByBlock(*I.getParent(), *MSSA, *MU); 2406 2407 return false; 2408 } 2409 2410 bool pointerInvalidatedByBlock(BasicBlock &BB, MemorySSA &MSSA, MemoryUse &MU) { 2411 if (const auto *Accesses = MSSA.getBlockDefs(&BB)) 2412 for (const auto &MA : *Accesses) 2413 if (const auto *MD = dyn_cast<MemoryDef>(&MA)) 2414 if (MU.getBlock() != MD->getBlock() || !MSSA.locallyDominates(MD, &MU)) 2415 return true; 2416 return false; 2417 } 2418 2419 /// Try to simplify things like (A < INV_1 AND icmp A < INV_2) into (A < 2420 /// min(INV_1, INV_2)), if INV_1 and INV_2 are both loop invariants and their 2421 /// minimun can be computed outside of loop, and X is not a loop-invariant. 2422 static bool hoistMinMax(Instruction &I, Loop &L, ICFLoopSafetyInfo &SafetyInfo, 2423 MemorySSAUpdater &MSSAU) { 2424 bool Inverse = false; 2425 using namespace PatternMatch; 2426 Value *Cond1, *Cond2; 2427 if (match(&I, m_LogicalOr(m_Value(Cond1), m_Value(Cond2)))) { 2428 Inverse = true; 2429 } else if (match(&I, m_LogicalAnd(m_Value(Cond1), m_Value(Cond2)))) { 2430 // Do nothing 2431 } else 2432 return false; 2433 2434 auto MatchICmpAgainstInvariant = [&](Value *C, CmpPredicate &P, Value *&LHS, 2435 Value *&RHS) { 2436 if (!match(C, m_OneUse(m_ICmp(P, m_Value(LHS), m_Value(RHS))))) 2437 return false; 2438 if (!LHS->getType()->isIntegerTy()) 2439 return false; 2440 if (!ICmpInst::isRelational(P)) 2441 return false; 2442 if (L.isLoopInvariant(LHS)) { 2443 std::swap(LHS, RHS); 2444 P = ICmpInst::getSwappedPredicate(P); 2445 } 2446 if (L.isLoopInvariant(LHS) || !L.isLoopInvariant(RHS)) 2447 return false; 2448 if (Inverse) 2449 P = ICmpInst::getInversePredicate(P); 2450 return true; 2451 }; 2452 CmpPredicate P1, P2; 2453 Value *LHS1, *LHS2, *RHS1, *RHS2; 2454 if (!MatchICmpAgainstInvariant(Cond1, P1, LHS1, RHS1) || 2455 !MatchICmpAgainstInvariant(Cond2, P2, LHS2, RHS2)) 2456 return false; 2457 auto MatchingPred = CmpPredicate::getMatching(P1, P2); 2458 if (!MatchingPred || LHS1 != LHS2) 2459 return false; 2460 2461 // Everything is fine, we can do the transform. 2462 bool UseMin = ICmpInst::isLT(*MatchingPred) || ICmpInst::isLE(*MatchingPred); 2463 assert( 2464 (UseMin || ICmpInst::isGT(*MatchingPred) || 2465 ICmpInst::isGE(*MatchingPred)) && 2466 "Relational predicate is either less (or equal) or greater (or equal)!"); 2467 Intrinsic::ID id = ICmpInst::isSigned(*MatchingPred) 2468 ? (UseMin ? Intrinsic::smin : Intrinsic::smax) 2469 : (UseMin ? Intrinsic::umin : Intrinsic::umax); 2470 auto *Preheader = L.getLoopPreheader(); 2471 assert(Preheader && "Loop is not in simplify form?"); 2472 IRBuilder<> Builder(Preheader->getTerminator()); 2473 // We are about to create a new guaranteed use for RHS2 which might not exist 2474 // before (if it was a non-taken input of logical and/or instruction). If it 2475 // was poison, we need to freeze it. Note that no new use for LHS and RHS1 are 2476 // introduced, so they don't need this. 2477 if (isa<SelectInst>(I)) 2478 RHS2 = Builder.CreateFreeze(RHS2, RHS2->getName() + ".fr"); 2479 Value *NewRHS = Builder.CreateBinaryIntrinsic( 2480 id, RHS1, RHS2, nullptr, 2481 StringRef("invariant.") + 2482 (ICmpInst::isSigned(*MatchingPred) ? "s" : "u") + 2483 (UseMin ? "min" : "max")); 2484 Builder.SetInsertPoint(&I); 2485 ICmpInst::Predicate P = *MatchingPred; 2486 if (Inverse) 2487 P = ICmpInst::getInversePredicate(P); 2488 Value *NewCond = Builder.CreateICmp(P, LHS1, NewRHS); 2489 NewCond->takeName(&I); 2490 I.replaceAllUsesWith(NewCond); 2491 eraseInstruction(I, SafetyInfo, MSSAU); 2492 eraseInstruction(*cast<Instruction>(Cond1), SafetyInfo, MSSAU); 2493 eraseInstruction(*cast<Instruction>(Cond2), SafetyInfo, MSSAU); 2494 return true; 2495 } 2496 2497 /// Reassociate gep (gep ptr, idx1), idx2 to gep (gep ptr, idx2), idx1 if 2498 /// this allows hoisting the inner GEP. 2499 static bool hoistGEP(Instruction &I, Loop &L, ICFLoopSafetyInfo &SafetyInfo, 2500 MemorySSAUpdater &MSSAU, AssumptionCache *AC, 2501 DominatorTree *DT) { 2502 auto *GEP = dyn_cast<GetElementPtrInst>(&I); 2503 if (!GEP) 2504 return false; 2505 2506 auto *Src = dyn_cast<GetElementPtrInst>(GEP->getPointerOperand()); 2507 if (!Src || !Src->hasOneUse() || !L.contains(Src)) 2508 return false; 2509 2510 Value *SrcPtr = Src->getPointerOperand(); 2511 auto LoopInvariant = [&](Value *V) { return L.isLoopInvariant(V); }; 2512 if (!L.isLoopInvariant(SrcPtr) || !all_of(GEP->indices(), LoopInvariant)) 2513 return false; 2514 2515 // This can only happen if !AllowSpeculation, otherwise this would already be 2516 // handled. 2517 // FIXME: Should we respect AllowSpeculation in these reassociation folds? 2518 // The flag exists to prevent metadata dropping, which is not relevant here. 2519 if (all_of(Src->indices(), LoopInvariant)) 2520 return false; 2521 2522 // The swapped GEPs are inbounds if both original GEPs are inbounds 2523 // and the sign of the offsets is the same. For simplicity, only 2524 // handle both offsets being non-negative. 2525 const DataLayout &DL = GEP->getDataLayout(); 2526 auto NonNegative = [&](Value *V) { 2527 return isKnownNonNegative(V, SimplifyQuery(DL, DT, AC, GEP)); 2528 }; 2529 bool IsInBounds = Src->isInBounds() && GEP->isInBounds() && 2530 all_of(Src->indices(), NonNegative) && 2531 all_of(GEP->indices(), NonNegative); 2532 2533 BasicBlock *Preheader = L.getLoopPreheader(); 2534 IRBuilder<> Builder(Preheader->getTerminator()); 2535 Value *NewSrc = Builder.CreateGEP(GEP->getSourceElementType(), SrcPtr, 2536 SmallVector<Value *>(GEP->indices()), 2537 "invariant.gep", IsInBounds); 2538 Builder.SetInsertPoint(GEP); 2539 Value *NewGEP = Builder.CreateGEP(Src->getSourceElementType(), NewSrc, 2540 SmallVector<Value *>(Src->indices()), "gep", 2541 IsInBounds); 2542 GEP->replaceAllUsesWith(NewGEP); 2543 eraseInstruction(*GEP, SafetyInfo, MSSAU); 2544 eraseInstruction(*Src, SafetyInfo, MSSAU); 2545 return true; 2546 } 2547 2548 /// Try to turn things like "LV + C1 < C2" into "LV < C2 - C1". Here 2549 /// C1 and C2 are loop invariants and LV is a loop-variant. 2550 static bool hoistAdd(ICmpInst::Predicate Pred, Value *VariantLHS, 2551 Value *InvariantRHS, ICmpInst &ICmp, Loop &L, 2552 ICFLoopSafetyInfo &SafetyInfo, MemorySSAUpdater &MSSAU, 2553 AssumptionCache *AC, DominatorTree *DT) { 2554 assert(!L.isLoopInvariant(VariantLHS) && "Precondition."); 2555 assert(L.isLoopInvariant(InvariantRHS) && "Precondition."); 2556 2557 bool IsSigned = ICmpInst::isSigned(Pred); 2558 2559 // Try to represent VariantLHS as sum of invariant and variant operands. 2560 using namespace PatternMatch; 2561 Value *VariantOp, *InvariantOp; 2562 if (IsSigned && 2563 !match(VariantLHS, m_NSWAdd(m_Value(VariantOp), m_Value(InvariantOp)))) 2564 return false; 2565 if (!IsSigned && 2566 !match(VariantLHS, m_NUWAdd(m_Value(VariantOp), m_Value(InvariantOp)))) 2567 return false; 2568 2569 // LHS itself is a loop-variant, try to represent it in the form: 2570 // "VariantOp + InvariantOp". If it is possible, then we can reassociate. 2571 if (L.isLoopInvariant(VariantOp)) 2572 std::swap(VariantOp, InvariantOp); 2573 if (L.isLoopInvariant(VariantOp) || !L.isLoopInvariant(InvariantOp)) 2574 return false; 2575 2576 // In order to turn "LV + C1 < C2" into "LV < C2 - C1", we need to be able to 2577 // freely move values from left side of inequality to right side (just as in 2578 // normal linear arithmetics). Overflows make things much more complicated, so 2579 // we want to avoid this. 2580 auto &DL = L.getHeader()->getDataLayout(); 2581 SimplifyQuery SQ(DL, DT, AC, &ICmp); 2582 if (IsSigned && computeOverflowForSignedSub(InvariantRHS, InvariantOp, SQ) != 2583 llvm::OverflowResult::NeverOverflows) 2584 return false; 2585 if (!IsSigned && 2586 computeOverflowForUnsignedSub(InvariantRHS, InvariantOp, SQ) != 2587 llvm::OverflowResult::NeverOverflows) 2588 return false; 2589 auto *Preheader = L.getLoopPreheader(); 2590 assert(Preheader && "Loop is not in simplify form?"); 2591 IRBuilder<> Builder(Preheader->getTerminator()); 2592 Value *NewCmpOp = 2593 Builder.CreateSub(InvariantRHS, InvariantOp, "invariant.op", 2594 /*HasNUW*/ !IsSigned, /*HasNSW*/ IsSigned); 2595 ICmp.setPredicate(Pred); 2596 ICmp.setOperand(0, VariantOp); 2597 ICmp.setOperand(1, NewCmpOp); 2598 eraseInstruction(cast<Instruction>(*VariantLHS), SafetyInfo, MSSAU); 2599 return true; 2600 } 2601 2602 /// Try to reassociate and hoist the following two patterns: 2603 /// LV - C1 < C2 --> LV < C1 + C2, 2604 /// C1 - LV < C2 --> LV > C1 - C2. 2605 static bool hoistSub(ICmpInst::Predicate Pred, Value *VariantLHS, 2606 Value *InvariantRHS, ICmpInst &ICmp, Loop &L, 2607 ICFLoopSafetyInfo &SafetyInfo, MemorySSAUpdater &MSSAU, 2608 AssumptionCache *AC, DominatorTree *DT) { 2609 assert(!L.isLoopInvariant(VariantLHS) && "Precondition."); 2610 assert(L.isLoopInvariant(InvariantRHS) && "Precondition."); 2611 2612 bool IsSigned = ICmpInst::isSigned(Pred); 2613 2614 // Try to represent VariantLHS as sum of invariant and variant operands. 2615 using namespace PatternMatch; 2616 Value *VariantOp, *InvariantOp; 2617 if (IsSigned && 2618 !match(VariantLHS, m_NSWSub(m_Value(VariantOp), m_Value(InvariantOp)))) 2619 return false; 2620 if (!IsSigned && 2621 !match(VariantLHS, m_NUWSub(m_Value(VariantOp), m_Value(InvariantOp)))) 2622 return false; 2623 2624 bool VariantSubtracted = false; 2625 // LHS itself is a loop-variant, try to represent it in the form: 2626 // "VariantOp + InvariantOp". If it is possible, then we can reassociate. If 2627 // the variant operand goes with minus, we use a slightly different scheme. 2628 if (L.isLoopInvariant(VariantOp)) { 2629 std::swap(VariantOp, InvariantOp); 2630 VariantSubtracted = true; 2631 Pred = ICmpInst::getSwappedPredicate(Pred); 2632 } 2633 if (L.isLoopInvariant(VariantOp) || !L.isLoopInvariant(InvariantOp)) 2634 return false; 2635 2636 // In order to turn "LV - C1 < C2" into "LV < C2 + C1", we need to be able to 2637 // freely move values from left side of inequality to right side (just as in 2638 // normal linear arithmetics). Overflows make things much more complicated, so 2639 // we want to avoid this. Likewise, for "C1 - LV < C2" we need to prove that 2640 // "C1 - C2" does not overflow. 2641 auto &DL = L.getHeader()->getDataLayout(); 2642 SimplifyQuery SQ(DL, DT, AC, &ICmp); 2643 if (VariantSubtracted && IsSigned) { 2644 // C1 - LV < C2 --> LV > C1 - C2 2645 if (computeOverflowForSignedSub(InvariantOp, InvariantRHS, SQ) != 2646 llvm::OverflowResult::NeverOverflows) 2647 return false; 2648 } else if (VariantSubtracted && !IsSigned) { 2649 // C1 - LV < C2 --> LV > C1 - C2 2650 if (computeOverflowForUnsignedSub(InvariantOp, InvariantRHS, SQ) != 2651 llvm::OverflowResult::NeverOverflows) 2652 return false; 2653 } else if (!VariantSubtracted && IsSigned) { 2654 // LV - C1 < C2 --> LV < C1 + C2 2655 if (computeOverflowForSignedAdd(InvariantOp, InvariantRHS, SQ) != 2656 llvm::OverflowResult::NeverOverflows) 2657 return false; 2658 } else { // !VariantSubtracted && !IsSigned 2659 // LV - C1 < C2 --> LV < C1 + C2 2660 if (computeOverflowForUnsignedAdd(InvariantOp, InvariantRHS, SQ) != 2661 llvm::OverflowResult::NeverOverflows) 2662 return false; 2663 } 2664 auto *Preheader = L.getLoopPreheader(); 2665 assert(Preheader && "Loop is not in simplify form?"); 2666 IRBuilder<> Builder(Preheader->getTerminator()); 2667 Value *NewCmpOp = 2668 VariantSubtracted 2669 ? Builder.CreateSub(InvariantOp, InvariantRHS, "invariant.op", 2670 /*HasNUW*/ !IsSigned, /*HasNSW*/ IsSigned) 2671 : Builder.CreateAdd(InvariantOp, InvariantRHS, "invariant.op", 2672 /*HasNUW*/ !IsSigned, /*HasNSW*/ IsSigned); 2673 ICmp.setPredicate(Pred); 2674 ICmp.setOperand(0, VariantOp); 2675 ICmp.setOperand(1, NewCmpOp); 2676 eraseInstruction(cast<Instruction>(*VariantLHS), SafetyInfo, MSSAU); 2677 return true; 2678 } 2679 2680 /// Reassociate and hoist add/sub expressions. 2681 static bool hoistAddSub(Instruction &I, Loop &L, ICFLoopSafetyInfo &SafetyInfo, 2682 MemorySSAUpdater &MSSAU, AssumptionCache *AC, 2683 DominatorTree *DT) { 2684 using namespace PatternMatch; 2685 CmpPredicate Pred; 2686 Value *LHS, *RHS; 2687 if (!match(&I, m_ICmp(Pred, m_Value(LHS), m_Value(RHS)))) 2688 return false; 2689 2690 // Put variant operand to LHS position. 2691 if (L.isLoopInvariant(LHS)) { 2692 std::swap(LHS, RHS); 2693 Pred = ICmpInst::getSwappedPredicate(Pred); 2694 } 2695 // We want to delete the initial operation after reassociation, so only do it 2696 // if it has no other uses. 2697 if (L.isLoopInvariant(LHS) || !L.isLoopInvariant(RHS) || !LHS->hasOneUse()) 2698 return false; 2699 2700 // TODO: We could go with smarter context, taking common dominator of all I's 2701 // users instead of I itself. 2702 if (hoistAdd(Pred, LHS, RHS, cast<ICmpInst>(I), L, SafetyInfo, MSSAU, AC, DT)) 2703 return true; 2704 2705 if (hoistSub(Pred, LHS, RHS, cast<ICmpInst>(I), L, SafetyInfo, MSSAU, AC, DT)) 2706 return true; 2707 2708 return false; 2709 } 2710 2711 static bool isReassociableOp(Instruction *I, unsigned IntOpcode, 2712 unsigned FPOpcode) { 2713 if (I->getOpcode() == IntOpcode) 2714 return true; 2715 if (I->getOpcode() == FPOpcode && I->hasAllowReassoc() && 2716 I->hasNoSignedZeros()) 2717 return true; 2718 return false; 2719 } 2720 2721 /// Try to reassociate expressions like ((A1 * B1) + (A2 * B2) + ...) * C where 2722 /// A1, A2, ... and C are loop invariants into expressions like 2723 /// ((A1 * C * B1) + (A2 * C * B2) + ...) and hoist the (A1 * C), (A2 * C), ... 2724 /// invariant expressions. This functions returns true only if any hoisting has 2725 /// actually occured. 2726 static bool hoistMulAddAssociation(Instruction &I, Loop &L, 2727 ICFLoopSafetyInfo &SafetyInfo, 2728 MemorySSAUpdater &MSSAU, AssumptionCache *AC, 2729 DominatorTree *DT) { 2730 if (!isReassociableOp(&I, Instruction::Mul, Instruction::FMul)) 2731 return false; 2732 Value *VariantOp = I.getOperand(0); 2733 Value *InvariantOp = I.getOperand(1); 2734 if (L.isLoopInvariant(VariantOp)) 2735 std::swap(VariantOp, InvariantOp); 2736 if (L.isLoopInvariant(VariantOp) || !L.isLoopInvariant(InvariantOp)) 2737 return false; 2738 Value *Factor = InvariantOp; 2739 2740 // First, we need to make sure we should do the transformation. 2741 SmallVector<Use *> Changes; 2742 SmallVector<BinaryOperator *> Adds; 2743 SmallVector<BinaryOperator *> Worklist; 2744 if (BinaryOperator *VariantBinOp = dyn_cast<BinaryOperator>(VariantOp)) 2745 Worklist.push_back(VariantBinOp); 2746 while (!Worklist.empty()) { 2747 BinaryOperator *BO = Worklist.pop_back_val(); 2748 if (!BO->hasOneUse()) 2749 return false; 2750 if (isReassociableOp(BO, Instruction::Add, Instruction::FAdd) && 2751 isa<BinaryOperator>(BO->getOperand(0)) && 2752 isa<BinaryOperator>(BO->getOperand(1))) { 2753 Worklist.push_back(cast<BinaryOperator>(BO->getOperand(0))); 2754 Worklist.push_back(cast<BinaryOperator>(BO->getOperand(1))); 2755 Adds.push_back(BO); 2756 continue; 2757 } 2758 if (!isReassociableOp(BO, Instruction::Mul, Instruction::FMul) || 2759 L.isLoopInvariant(BO)) 2760 return false; 2761 Use &U0 = BO->getOperandUse(0); 2762 Use &U1 = BO->getOperandUse(1); 2763 if (L.isLoopInvariant(U0)) 2764 Changes.push_back(&U0); 2765 else if (L.isLoopInvariant(U1)) 2766 Changes.push_back(&U1); 2767 else 2768 return false; 2769 unsigned Limit = I.getType()->isIntOrIntVectorTy() 2770 ? IntAssociationUpperLimit 2771 : FPAssociationUpperLimit; 2772 if (Changes.size() > Limit) 2773 return false; 2774 } 2775 if (Changes.empty()) 2776 return false; 2777 2778 // Drop the poison flags for any adds we looked through. 2779 if (I.getType()->isIntOrIntVectorTy()) { 2780 for (auto *Add : Adds) 2781 Add->dropPoisonGeneratingFlags(); 2782 } 2783 2784 // We know we should do it so let's do the transformation. 2785 auto *Preheader = L.getLoopPreheader(); 2786 assert(Preheader && "Loop is not in simplify form?"); 2787 IRBuilder<> Builder(Preheader->getTerminator()); 2788 for (auto *U : Changes) { 2789 assert(L.isLoopInvariant(U->get())); 2790 auto *Ins = cast<BinaryOperator>(U->getUser()); 2791 Value *Mul; 2792 if (I.getType()->isIntOrIntVectorTy()) { 2793 Mul = Builder.CreateMul(U->get(), Factor, "factor.op.mul"); 2794 // Drop the poison flags on the original multiply. 2795 Ins->dropPoisonGeneratingFlags(); 2796 } else 2797 Mul = Builder.CreateFMulFMF(U->get(), Factor, Ins, "factor.op.fmul"); 2798 2799 // Rewrite the reassociable instruction. 2800 unsigned OpIdx = U->getOperandNo(); 2801 auto *LHS = OpIdx == 0 ? Mul : Ins->getOperand(0); 2802 auto *RHS = OpIdx == 1 ? Mul : Ins->getOperand(1); 2803 auto *NewBO = 2804 BinaryOperator::Create(Ins->getOpcode(), LHS, RHS, 2805 Ins->getName() + ".reass", Ins->getIterator()); 2806 NewBO->copyIRFlags(Ins); 2807 if (VariantOp == Ins) 2808 VariantOp = NewBO; 2809 Ins->replaceAllUsesWith(NewBO); 2810 eraseInstruction(*Ins, SafetyInfo, MSSAU); 2811 } 2812 2813 I.replaceAllUsesWith(VariantOp); 2814 eraseInstruction(I, SafetyInfo, MSSAU); 2815 return true; 2816 } 2817 2818 /// Reassociate associative binary expressions of the form 2819 /// 2820 /// 1. "(LV op C1) op C2" ==> "LV op (C1 op C2)" 2821 /// 2. "(C1 op LV) op C2" ==> "LV op (C1 op C2)" 2822 /// 3. "C2 op (C1 op LV)" ==> "LV op (C1 op C2)" 2823 /// 4. "C2 op (LV op C1)" ==> "LV op (C1 op C2)" 2824 /// 2825 /// where op is an associative BinOp, LV is a loop variant, and C1 and C2 are 2826 /// loop invariants that we want to hoist, noting that associativity implies 2827 /// commutativity. 2828 static bool hoistBOAssociation(Instruction &I, Loop &L, 2829 ICFLoopSafetyInfo &SafetyInfo, 2830 MemorySSAUpdater &MSSAU, AssumptionCache *AC, 2831 DominatorTree *DT) { 2832 auto *BO = dyn_cast<BinaryOperator>(&I); 2833 if (!BO || !BO->isAssociative()) 2834 return false; 2835 2836 Instruction::BinaryOps Opcode = BO->getOpcode(); 2837 bool LVInRHS = L.isLoopInvariant(BO->getOperand(0)); 2838 auto *BO0 = dyn_cast<BinaryOperator>(BO->getOperand(LVInRHS)); 2839 if (!BO0 || BO0->getOpcode() != Opcode || !BO0->isAssociative() || 2840 BO0->hasNUsesOrMore(3)) 2841 return false; 2842 2843 Value *LV = BO0->getOperand(0); 2844 Value *C1 = BO0->getOperand(1); 2845 Value *C2 = BO->getOperand(!LVInRHS); 2846 2847 assert(BO->isCommutative() && BO0->isCommutative() && 2848 "Associativity implies commutativity"); 2849 if (L.isLoopInvariant(LV) && !L.isLoopInvariant(C1)) 2850 std::swap(LV, C1); 2851 if (L.isLoopInvariant(LV) || !L.isLoopInvariant(C1) || !L.isLoopInvariant(C2)) 2852 return false; 2853 2854 auto *Preheader = L.getLoopPreheader(); 2855 assert(Preheader && "Loop is not in simplify form?"); 2856 2857 IRBuilder<> Builder(Preheader->getTerminator()); 2858 auto *Inv = Builder.CreateBinOp(Opcode, C1, C2, "invariant.op"); 2859 2860 auto *NewBO = BinaryOperator::Create( 2861 Opcode, LV, Inv, BO->getName() + ".reass", BO->getIterator()); 2862 2863 // Copy NUW for ADDs if both instructions have it. 2864 if (Opcode == Instruction::Add && BO->hasNoUnsignedWrap() && 2865 BO0->hasNoUnsignedWrap()) { 2866 // If `Inv` was not constant-folded, a new Instruction has been created. 2867 if (auto *I = dyn_cast<Instruction>(Inv)) 2868 I->setHasNoUnsignedWrap(true); 2869 NewBO->setHasNoUnsignedWrap(true); 2870 } else if (Opcode == Instruction::FAdd || Opcode == Instruction::FMul) { 2871 // Intersect FMF flags for FADD and FMUL. 2872 FastMathFlags Intersect = BO->getFastMathFlags() & BO0->getFastMathFlags(); 2873 if (auto *I = dyn_cast<Instruction>(Inv)) 2874 I->setFastMathFlags(Intersect); 2875 NewBO->setFastMathFlags(Intersect); 2876 } 2877 2878 BO->replaceAllUsesWith(NewBO); 2879 eraseInstruction(*BO, SafetyInfo, MSSAU); 2880 2881 // (LV op C1) might not be erased if it has more uses than the one we just 2882 // replaced. 2883 if (BO0->use_empty()) 2884 eraseInstruction(*BO0, SafetyInfo, MSSAU); 2885 2886 return true; 2887 } 2888 2889 static bool hoistArithmetics(Instruction &I, Loop &L, 2890 ICFLoopSafetyInfo &SafetyInfo, 2891 MemorySSAUpdater &MSSAU, AssumptionCache *AC, 2892 DominatorTree *DT) { 2893 // Optimize complex patterns, such as (x < INV1 && x < INV2), turning them 2894 // into (x < min(INV1, INV2)), and hoisting the invariant part of this 2895 // expression out of the loop. 2896 if (hoistMinMax(I, L, SafetyInfo, MSSAU)) { 2897 ++NumHoisted; 2898 ++NumMinMaxHoisted; 2899 return true; 2900 } 2901 2902 // Try to hoist GEPs by reassociation. 2903 if (hoistGEP(I, L, SafetyInfo, MSSAU, AC, DT)) { 2904 ++NumHoisted; 2905 ++NumGEPsHoisted; 2906 return true; 2907 } 2908 2909 // Try to hoist add/sub's by reassociation. 2910 if (hoistAddSub(I, L, SafetyInfo, MSSAU, AC, DT)) { 2911 ++NumHoisted; 2912 ++NumAddSubHoisted; 2913 return true; 2914 } 2915 2916 bool IsInt = I.getType()->isIntOrIntVectorTy(); 2917 if (hoistMulAddAssociation(I, L, SafetyInfo, MSSAU, AC, DT)) { 2918 ++NumHoisted; 2919 if (IsInt) 2920 ++NumIntAssociationsHoisted; 2921 else 2922 ++NumFPAssociationsHoisted; 2923 return true; 2924 } 2925 2926 if (hoistBOAssociation(I, L, SafetyInfo, MSSAU, AC, DT)) { 2927 ++NumHoisted; 2928 ++NumBOAssociationsHoisted; 2929 return true; 2930 } 2931 2932 return false; 2933 } 2934 2935 /// Little predicate that returns true if the specified basic block is in 2936 /// a subloop of the current one, not the current one itself. 2937 /// 2938 static bool inSubLoop(BasicBlock *BB, Loop *CurLoop, LoopInfo *LI) { 2939 assert(CurLoop->contains(BB) && "Only valid if BB is IN the loop"); 2940 return LI->getLoopFor(BB) != CurLoop; 2941 } 2942