1 //===- LoopFlatten.cpp - Loop flattening pass------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This pass flattens pairs nested loops into a single loop. 10 // 11 // The intention is to optimise loop nests like this, which together access an 12 // array linearly: 13 // for (int i = 0; i < N; ++i) 14 // for (int j = 0; j < M; ++j) 15 // f(A[i*M+j]); 16 // into one loop: 17 // for (int i = 0; i < (N*M); ++i) 18 // f(A[i]); 19 // 20 // It can also flatten loops where the induction variables are not used in the 21 // loop. This is only worth doing if the induction variables are only used in an 22 // expression like i*M+j. If they had any other uses, we would have to insert a 23 // div/mod to reconstruct the original values, so this wouldn't be profitable. 24 // 25 // We also need to prove that N*M will not overflow. 26 // 27 //===----------------------------------------------------------------------===// 28 29 #include "llvm/Transforms/Scalar/LoopFlatten.h" 30 31 #include "llvm/ADT/Statistic.h" 32 #include "llvm/Analysis/AssumptionCache.h" 33 #include "llvm/Analysis/LoopInfo.h" 34 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 35 #include "llvm/Analysis/ScalarEvolution.h" 36 #include "llvm/Analysis/TargetTransformInfo.h" 37 #include "llvm/Analysis/ValueTracking.h" 38 #include "llvm/IR/Dominators.h" 39 #include "llvm/IR/Function.h" 40 #include "llvm/IR/IRBuilder.h" 41 #include "llvm/IR/Module.h" 42 #include "llvm/IR/PatternMatch.h" 43 #include "llvm/IR/Verifier.h" 44 #include "llvm/InitializePasses.h" 45 #include "llvm/Pass.h" 46 #include "llvm/Support/Debug.h" 47 #include "llvm/Support/raw_ostream.h" 48 #include "llvm/Transforms/Scalar.h" 49 #include "llvm/Transforms/Utils/Local.h" 50 #include "llvm/Transforms/Utils/LoopUtils.h" 51 #include "llvm/Transforms/Utils/ScalarEvolutionExpander.h" 52 #include "llvm/Transforms/Utils/SimplifyIndVar.h" 53 54 using namespace llvm; 55 using namespace llvm::PatternMatch; 56 57 #define DEBUG_TYPE "loop-flatten" 58 59 STATISTIC(NumFlattened, "Number of loops flattened"); 60 61 static cl::opt<unsigned> RepeatedInstructionThreshold( 62 "loop-flatten-cost-threshold", cl::Hidden, cl::init(2), 63 cl::desc("Limit on the cost of instructions that can be repeated due to " 64 "loop flattening")); 65 66 static cl::opt<bool> 67 AssumeNoOverflow("loop-flatten-assume-no-overflow", cl::Hidden, 68 cl::init(false), 69 cl::desc("Assume that the product of the two iteration " 70 "trip counts will never overflow")); 71 72 static cl::opt<bool> 73 WidenIV("loop-flatten-widen-iv", cl::Hidden, 74 cl::init(true), 75 cl::desc("Widen the loop induction variables, if possible, so " 76 "overflow checks won't reject flattening")); 77 78 struct FlattenInfo { 79 Loop *OuterLoop = nullptr; 80 Loop *InnerLoop = nullptr; 81 // These PHINodes correspond to loop induction variables, which are expected 82 // to start at zero and increment by one on each loop. 83 PHINode *InnerInductionPHI = nullptr; 84 PHINode *OuterInductionPHI = nullptr; 85 Value *InnerTripCount = nullptr; 86 Value *OuterTripCount = nullptr; 87 BinaryOperator *InnerIncrement = nullptr; 88 BinaryOperator *OuterIncrement = nullptr; 89 BranchInst *InnerBranch = nullptr; 90 BranchInst *OuterBranch = nullptr; 91 SmallPtrSet<Value *, 4> LinearIVUses; 92 SmallPtrSet<PHINode *, 4> InnerPHIsToTransform; 93 94 // Whether this holds the flatten info before or after widening. 95 bool Widened = false; 96 97 // Holds the old/narrow induction phis, i.e. the Phis before IV widening has 98 // been applied. This bookkeeping is used so we can skip some checks on these 99 // phi nodes. 100 PHINode *NarrowInnerInductionPHI = nullptr; 101 PHINode *NarrowOuterInductionPHI = nullptr; 102 103 FlattenInfo(Loop *OL, Loop *IL) : OuterLoop(OL), InnerLoop(IL) {}; 104 105 bool isNarrowInductionPhi(PHINode *Phi) { 106 // This can't be the narrow phi if we haven't widened the IV first. 107 if (!Widened) 108 return false; 109 return NarrowInnerInductionPHI == Phi || NarrowOuterInductionPHI == Phi; 110 } 111 }; 112 113 static bool 114 setLoopComponents(Value *&TC, Value *&TripCount, BinaryOperator *&Increment, 115 SmallPtrSetImpl<Instruction *> &IterationInstructions) { 116 TripCount = TC; 117 IterationInstructions.insert(Increment); 118 LLVM_DEBUG(dbgs() << "Found Increment: "; Increment->dump()); 119 LLVM_DEBUG(dbgs() << "Found trip count: "; TripCount->dump()); 120 LLVM_DEBUG(dbgs() << "Successfully found all loop components\n"); 121 return true; 122 } 123 124 // Finds the induction variable, increment and trip count for a simple loop that 125 // we can flatten. 126 static bool findLoopComponents( 127 Loop *L, SmallPtrSetImpl<Instruction *> &IterationInstructions, 128 PHINode *&InductionPHI, Value *&TripCount, BinaryOperator *&Increment, 129 BranchInst *&BackBranch, ScalarEvolution *SE, bool IsWidened) { 130 LLVM_DEBUG(dbgs() << "Finding components of loop: " << L->getName() << "\n"); 131 132 if (!L->isLoopSimplifyForm()) { 133 LLVM_DEBUG(dbgs() << "Loop is not in normal form\n"); 134 return false; 135 } 136 137 // Currently, to simplify the implementation, the Loop induction variable must 138 // start at zero and increment with a step size of one. 139 if (!L->isCanonical(*SE)) { 140 LLVM_DEBUG(dbgs() << "Loop is not canonical\n"); 141 return false; 142 } 143 144 // There must be exactly one exiting block, and it must be the same at the 145 // latch. 146 BasicBlock *Latch = L->getLoopLatch(); 147 if (L->getExitingBlock() != Latch) { 148 LLVM_DEBUG(dbgs() << "Exiting and latch block are different\n"); 149 return false; 150 } 151 152 // Find the induction PHI. If there is no induction PHI, we can't do the 153 // transformation. TODO: could other variables trigger this? Do we have to 154 // search for the best one? 155 InductionPHI = L->getInductionVariable(*SE); 156 if (!InductionPHI) { 157 LLVM_DEBUG(dbgs() << "Could not find induction PHI\n"); 158 return false; 159 } 160 LLVM_DEBUG(dbgs() << "Found induction PHI: "; InductionPHI->dump()); 161 162 bool ContinueOnTrue = L->contains(Latch->getTerminator()->getSuccessor(0)); 163 auto IsValidPredicate = [&](ICmpInst::Predicate Pred) { 164 if (ContinueOnTrue) 165 return Pred == CmpInst::ICMP_NE || Pred == CmpInst::ICMP_ULT; 166 else 167 return Pred == CmpInst::ICMP_EQ; 168 }; 169 170 // Find Compare and make sure it is valid. getLatchCmpInst checks that the 171 // back branch of the latch is conditional. 172 ICmpInst *Compare = L->getLatchCmpInst(); 173 if (!Compare || !IsValidPredicate(Compare->getUnsignedPredicate()) || 174 Compare->hasNUsesOrMore(2)) { 175 LLVM_DEBUG(dbgs() << "Could not find valid comparison\n"); 176 return false; 177 } 178 BackBranch = cast<BranchInst>(Latch->getTerminator()); 179 IterationInstructions.insert(BackBranch); 180 LLVM_DEBUG(dbgs() << "Found back branch: "; BackBranch->dump()); 181 IterationInstructions.insert(Compare); 182 LLVM_DEBUG(dbgs() << "Found comparison: "; Compare->dump()); 183 184 // Find increment and trip count. 185 // There are exactly 2 incoming values to the induction phi; one from the 186 // pre-header and one from the latch. The incoming latch value is the 187 // increment variable. 188 Increment = 189 dyn_cast<BinaryOperator>(InductionPHI->getIncomingValueForBlock(Latch)); 190 if (Increment->hasNUsesOrMore(3)) { 191 LLVM_DEBUG(dbgs() << "Could not find valid increment\n"); 192 return false; 193 } 194 // The trip count is the RHS of the compare. If this doesn't match the trip 195 // count computed by SCEV then this is because the trip count variable 196 // has been widened so the types don't match, or because it is a constant and 197 // another transformation has changed the compare (e.g. icmp ult %inc, 198 // tripcount -> icmp ult %j, tripcount-1), or both. 199 Value *RHS = Compare->getOperand(1); 200 const SCEV *BackedgeTakenCount = SE->getBackedgeTakenCount(L); 201 if (isa<SCEVCouldNotCompute>(BackedgeTakenCount)) { 202 LLVM_DEBUG(dbgs() << "Backedge-taken count is not predictable\n"); 203 return false; 204 } 205 // The use of the Extend=false flag on getTripCountFromExitCount was added 206 // during a refactoring to preserve existing behavior. However, there's 207 // nothing obvious in the surrounding code when handles the overflow case. 208 // FIXME: audit code to establish whether there's a latent bug here. 209 const SCEV *SCEVTripCount = 210 SE->getTripCountFromExitCount(BackedgeTakenCount, false); 211 const SCEV *SCEVRHS = SE->getSCEV(RHS); 212 if (SCEVRHS == SCEVTripCount) 213 return setLoopComponents(RHS, TripCount, Increment, IterationInstructions); 214 ConstantInt *ConstantRHS = dyn_cast<ConstantInt>(RHS); 215 if (ConstantRHS) { 216 const SCEV *BackedgeTCExt = nullptr; 217 if (IsWidened) { 218 const SCEV *SCEVTripCountExt; 219 // Find the extended backedge taken count and extended trip count using 220 // SCEV. One of these should now match the RHS of the compare. 221 BackedgeTCExt = SE->getZeroExtendExpr(BackedgeTakenCount, RHS->getType()); 222 SCEVTripCountExt = SE->getTripCountFromExitCount(BackedgeTCExt, false); 223 if (SCEVRHS != BackedgeTCExt && SCEVRHS != SCEVTripCountExt) { 224 LLVM_DEBUG(dbgs() << "Could not find valid trip count\n"); 225 return false; 226 } 227 } 228 // If the RHS of the compare is equal to the backedge taken count we need 229 // to add one to get the trip count. 230 if (SCEVRHS == BackedgeTCExt || SCEVRHS == BackedgeTakenCount) { 231 ConstantInt *One = ConstantInt::get(ConstantRHS->getType(), 1); 232 Value *NewRHS = ConstantInt::get( 233 ConstantRHS->getContext(), ConstantRHS->getValue() + One->getValue()); 234 return setLoopComponents(NewRHS, TripCount, Increment, 235 IterationInstructions); 236 } 237 return setLoopComponents(RHS, TripCount, Increment, IterationInstructions); 238 } 239 // If the RHS isn't a constant then check that the reason it doesn't match 240 // the SCEV trip count is because the RHS is a ZExt or SExt instruction 241 // (and take the trip count to be the RHS). 242 if (!IsWidened) { 243 LLVM_DEBUG(dbgs() << "Could not find valid trip count\n"); 244 return false; 245 } 246 auto *TripCountInst = dyn_cast<Instruction>(RHS); 247 if (!TripCountInst) { 248 LLVM_DEBUG(dbgs() << "Could not find valid trip count\n"); 249 return false; 250 } 251 if ((!isa<ZExtInst>(TripCountInst) && !isa<SExtInst>(TripCountInst)) || 252 SE->getSCEV(TripCountInst->getOperand(0)) != SCEVTripCount) { 253 LLVM_DEBUG(dbgs() << "Could not find valid extended trip count\n"); 254 return false; 255 } 256 return setLoopComponents(RHS, TripCount, Increment, IterationInstructions); 257 } 258 259 static bool checkPHIs(FlattenInfo &FI, const TargetTransformInfo *TTI) { 260 // All PHIs in the inner and outer headers must either be: 261 // - The induction PHI, which we are going to rewrite as one induction in 262 // the new loop. This is already checked by findLoopComponents. 263 // - An outer header PHI with all incoming values from outside the loop. 264 // LoopSimplify guarantees we have a pre-header, so we don't need to 265 // worry about that here. 266 // - Pairs of PHIs in the inner and outer headers, which implement a 267 // loop-carried dependency that will still be valid in the new loop. To 268 // be valid, this variable must be modified only in the inner loop. 269 270 // The set of PHI nodes in the outer loop header that we know will still be 271 // valid after the transformation. These will not need to be modified (with 272 // the exception of the induction variable), but we do need to check that 273 // there are no unsafe PHI nodes. 274 SmallPtrSet<PHINode *, 4> SafeOuterPHIs; 275 SafeOuterPHIs.insert(FI.OuterInductionPHI); 276 277 // Check that all PHI nodes in the inner loop header match one of the valid 278 // patterns. 279 for (PHINode &InnerPHI : FI.InnerLoop->getHeader()->phis()) { 280 // The induction PHIs break these rules, and that's OK because we treat 281 // them specially when doing the transformation. 282 if (&InnerPHI == FI.InnerInductionPHI) 283 continue; 284 if (FI.isNarrowInductionPhi(&InnerPHI)) 285 continue; 286 287 // Each inner loop PHI node must have two incoming values/blocks - one 288 // from the pre-header, and one from the latch. 289 assert(InnerPHI.getNumIncomingValues() == 2); 290 Value *PreHeaderValue = 291 InnerPHI.getIncomingValueForBlock(FI.InnerLoop->getLoopPreheader()); 292 Value *LatchValue = 293 InnerPHI.getIncomingValueForBlock(FI.InnerLoop->getLoopLatch()); 294 295 // The incoming value from the outer loop must be the PHI node in the 296 // outer loop header, with no modifications made in the top of the outer 297 // loop. 298 PHINode *OuterPHI = dyn_cast<PHINode>(PreHeaderValue); 299 if (!OuterPHI || OuterPHI->getParent() != FI.OuterLoop->getHeader()) { 300 LLVM_DEBUG(dbgs() << "value modified in top of outer loop\n"); 301 return false; 302 } 303 304 // The other incoming value must come from the inner loop, without any 305 // modifications in the tail end of the outer loop. We are in LCSSA form, 306 // so this will actually be a PHI in the inner loop's exit block, which 307 // only uses values from inside the inner loop. 308 PHINode *LCSSAPHI = dyn_cast<PHINode>( 309 OuterPHI->getIncomingValueForBlock(FI.OuterLoop->getLoopLatch())); 310 if (!LCSSAPHI) { 311 LLVM_DEBUG(dbgs() << "could not find LCSSA PHI\n"); 312 return false; 313 } 314 315 // The value used by the LCSSA PHI must be the same one that the inner 316 // loop's PHI uses. 317 if (LCSSAPHI->hasConstantValue() != LatchValue) { 318 LLVM_DEBUG( 319 dbgs() << "LCSSA PHI incoming value does not match latch value\n"); 320 return false; 321 } 322 323 LLVM_DEBUG(dbgs() << "PHI pair is safe:\n"); 324 LLVM_DEBUG(dbgs() << " Inner: "; InnerPHI.dump()); 325 LLVM_DEBUG(dbgs() << " Outer: "; OuterPHI->dump()); 326 SafeOuterPHIs.insert(OuterPHI); 327 FI.InnerPHIsToTransform.insert(&InnerPHI); 328 } 329 330 for (PHINode &OuterPHI : FI.OuterLoop->getHeader()->phis()) { 331 if (FI.isNarrowInductionPhi(&OuterPHI)) 332 continue; 333 if (!SafeOuterPHIs.count(&OuterPHI)) { 334 LLVM_DEBUG(dbgs() << "found unsafe PHI in outer loop: "; OuterPHI.dump()); 335 return false; 336 } 337 } 338 339 LLVM_DEBUG(dbgs() << "checkPHIs: OK\n"); 340 return true; 341 } 342 343 static bool 344 checkOuterLoopInsts(FlattenInfo &FI, 345 SmallPtrSetImpl<Instruction *> &IterationInstructions, 346 const TargetTransformInfo *TTI) { 347 // Check for instructions in the outer but not inner loop. If any of these 348 // have side-effects then this transformation is not legal, and if there is 349 // a significant amount of code here which can't be optimised out that it's 350 // not profitable (as these instructions would get executed for each 351 // iteration of the inner loop). 352 InstructionCost RepeatedInstrCost = 0; 353 for (auto *B : FI.OuterLoop->getBlocks()) { 354 if (FI.InnerLoop->contains(B)) 355 continue; 356 357 for (auto &I : *B) { 358 if (!isa<PHINode>(&I) && !I.isTerminator() && 359 !isSafeToSpeculativelyExecute(&I)) { 360 LLVM_DEBUG(dbgs() << "Cannot flatten because instruction may have " 361 "side effects: "; 362 I.dump()); 363 return false; 364 } 365 // The execution count of the outer loop's iteration instructions 366 // (increment, compare and branch) will be increased, but the 367 // equivalent instructions will be removed from the inner loop, so 368 // they make a net difference of zero. 369 if (IterationInstructions.count(&I)) 370 continue; 371 // The uncoditional branch to the inner loop's header will turn into 372 // a fall-through, so adds no cost. 373 BranchInst *Br = dyn_cast<BranchInst>(&I); 374 if (Br && Br->isUnconditional() && 375 Br->getSuccessor(0) == FI.InnerLoop->getHeader()) 376 continue; 377 // Multiplies of the outer iteration variable and inner iteration 378 // count will be optimised out. 379 if (match(&I, m_c_Mul(m_Specific(FI.OuterInductionPHI), 380 m_Specific(FI.InnerTripCount)))) 381 continue; 382 InstructionCost Cost = 383 TTI->getUserCost(&I, TargetTransformInfo::TCK_SizeAndLatency); 384 LLVM_DEBUG(dbgs() << "Cost " << Cost << ": "; I.dump()); 385 RepeatedInstrCost += Cost; 386 } 387 } 388 389 LLVM_DEBUG(dbgs() << "Cost of instructions that will be repeated: " 390 << RepeatedInstrCost << "\n"); 391 // Bail out if flattening the loops would cause instructions in the outer 392 // loop but not in the inner loop to be executed extra times. 393 if (RepeatedInstrCost > RepeatedInstructionThreshold) { 394 LLVM_DEBUG(dbgs() << "checkOuterLoopInsts: not profitable, bailing.\n"); 395 return false; 396 } 397 398 LLVM_DEBUG(dbgs() << "checkOuterLoopInsts: OK\n"); 399 return true; 400 } 401 402 static bool checkIVUsers(FlattenInfo &FI) { 403 // We require all uses of both induction variables to match this pattern: 404 // 405 // (OuterPHI * InnerTripCount) + InnerPHI 406 // 407 // Any uses of the induction variables not matching that pattern would 408 // require a div/mod to reconstruct in the flattened loop, so the 409 // transformation wouldn't be profitable. 410 411 Value *InnerTripCount = FI.InnerTripCount; 412 if (FI.Widened && 413 (isa<SExtInst>(InnerTripCount) || isa<ZExtInst>(InnerTripCount))) 414 InnerTripCount = cast<Instruction>(InnerTripCount)->getOperand(0); 415 416 // Check that all uses of the inner loop's induction variable match the 417 // expected pattern, recording the uses of the outer IV. 418 SmallPtrSet<Value *, 4> ValidOuterPHIUses; 419 for (User *U : FI.InnerInductionPHI->users()) { 420 if (U == FI.InnerIncrement) 421 continue; 422 423 // After widening the IVs, a trunc instruction might have been introduced, 424 // so look through truncs. 425 if (isa<TruncInst>(U)) { 426 if (!U->hasOneUse()) 427 return false; 428 U = *U->user_begin(); 429 } 430 431 // If the use is in the compare (which is also the condition of the inner 432 // branch) then the compare has been altered by another transformation e.g 433 // icmp ult %inc, tripcount -> icmp ult %j, tripcount-1, where tripcount is 434 // a constant. Ignore this use as the compare gets removed later anyway. 435 if (U == FI.InnerBranch->getCondition()) 436 continue; 437 438 LLVM_DEBUG(dbgs() << "Found use of inner induction variable: "; U->dump()); 439 440 Value *MatchedMul = nullptr; 441 Value *MatchedItCount = nullptr; 442 bool IsAdd = match(U, m_c_Add(m_Specific(FI.InnerInductionPHI), 443 m_Value(MatchedMul))) && 444 match(MatchedMul, m_c_Mul(m_Specific(FI.OuterInductionPHI), 445 m_Value(MatchedItCount))); 446 447 // Matches the same pattern as above, except it also looks for truncs 448 // on the phi, which can be the result of widening the induction variables. 449 bool IsAddTrunc = 450 match(U, m_c_Add(m_Trunc(m_Specific(FI.InnerInductionPHI)), 451 m_Value(MatchedMul))) && 452 match(MatchedMul, m_c_Mul(m_Trunc(m_Specific(FI.OuterInductionPHI)), 453 m_Value(MatchedItCount))); 454 455 if (!MatchedItCount) 456 return false; 457 // Look through extends if the IV has been widened. 458 if (FI.Widened && 459 (isa<SExtInst>(MatchedItCount) || isa<ZExtInst>(MatchedItCount))) { 460 assert(MatchedItCount->getType() == FI.InnerInductionPHI->getType() && 461 "Unexpected type mismatch in types after widening"); 462 MatchedItCount = isa<SExtInst>(MatchedItCount) 463 ? dyn_cast<SExtInst>(MatchedItCount)->getOperand(0) 464 : dyn_cast<ZExtInst>(MatchedItCount)->getOperand(0); 465 } 466 467 if ((IsAdd || IsAddTrunc) && MatchedItCount == InnerTripCount) { 468 LLVM_DEBUG(dbgs() << "Use is optimisable\n"); 469 ValidOuterPHIUses.insert(MatchedMul); 470 FI.LinearIVUses.insert(U); 471 } else { 472 LLVM_DEBUG(dbgs() << "Did not match expected pattern, bailing\n"); 473 return false; 474 } 475 } 476 477 // Check that there are no uses of the outer IV other than the ones found 478 // as part of the pattern above. 479 for (User *U : FI.OuterInductionPHI->users()) { 480 if (U == FI.OuterIncrement) 481 continue; 482 483 auto IsValidOuterPHIUses = [&] (User *U) -> bool { 484 LLVM_DEBUG(dbgs() << "Found use of outer induction variable: "; U->dump()); 485 if (!ValidOuterPHIUses.count(U)) { 486 LLVM_DEBUG(dbgs() << "Did not match expected pattern, bailing\n"); 487 return false; 488 } 489 LLVM_DEBUG(dbgs() << "Use is optimisable\n"); 490 return true; 491 }; 492 493 if (auto *V = dyn_cast<TruncInst>(U)) { 494 for (auto *K : V->users()) { 495 if (!IsValidOuterPHIUses(K)) 496 return false; 497 } 498 continue; 499 } 500 501 if (!IsValidOuterPHIUses(U)) 502 return false; 503 } 504 505 LLVM_DEBUG(dbgs() << "checkIVUsers: OK\n"; 506 dbgs() << "Found " << FI.LinearIVUses.size() 507 << " value(s) that can be replaced:\n"; 508 for (Value *V : FI.LinearIVUses) { 509 dbgs() << " "; 510 V->dump(); 511 }); 512 return true; 513 } 514 515 // Return an OverflowResult dependant on if overflow of the multiplication of 516 // InnerTripCount and OuterTripCount can be assumed not to happen. 517 static OverflowResult checkOverflow(FlattenInfo &FI, DominatorTree *DT, 518 AssumptionCache *AC) { 519 Function *F = FI.OuterLoop->getHeader()->getParent(); 520 const DataLayout &DL = F->getParent()->getDataLayout(); 521 522 // For debugging/testing. 523 if (AssumeNoOverflow) 524 return OverflowResult::NeverOverflows; 525 526 // Check if the multiply could not overflow due to known ranges of the 527 // input values. 528 OverflowResult OR = computeOverflowForUnsignedMul( 529 FI.InnerTripCount, FI.OuterTripCount, DL, AC, 530 FI.OuterLoop->getLoopPreheader()->getTerminator(), DT); 531 if (OR != OverflowResult::MayOverflow) 532 return OR; 533 534 for (Value *V : FI.LinearIVUses) { 535 for (Value *U : V->users()) { 536 if (auto *GEP = dyn_cast<GetElementPtrInst>(U)) { 537 for (Value *GEPUser : U->users()) { 538 Instruction *GEPUserInst = dyn_cast<Instruction>(GEPUser); 539 if (!isa<LoadInst>(GEPUserInst) && 540 !(isa<StoreInst>(GEPUserInst) && 541 GEP == GEPUserInst->getOperand(1))) 542 continue; 543 if (!isGuaranteedToExecuteForEveryIteration(GEPUserInst, 544 FI.InnerLoop)) 545 continue; 546 // The IV is used as the operand of a GEP which dominates the loop 547 // latch, and the IV is at least as wide as the address space of the 548 // GEP. In this case, the GEP would wrap around the address space 549 // before the IV increment wraps, which would be UB. 550 if (GEP->isInBounds() && 551 V->getType()->getIntegerBitWidth() >= 552 DL.getPointerTypeSizeInBits(GEP->getType())) { 553 LLVM_DEBUG( 554 dbgs() << "use of linear IV would be UB if overflow occurred: "; 555 GEP->dump()); 556 return OverflowResult::NeverOverflows; 557 } 558 } 559 } 560 } 561 } 562 563 return OverflowResult::MayOverflow; 564 } 565 566 static bool CanFlattenLoopPair(FlattenInfo &FI, DominatorTree *DT, LoopInfo *LI, 567 ScalarEvolution *SE, AssumptionCache *AC, 568 const TargetTransformInfo *TTI) { 569 SmallPtrSet<Instruction *, 8> IterationInstructions; 570 if (!findLoopComponents(FI.InnerLoop, IterationInstructions, 571 FI.InnerInductionPHI, FI.InnerTripCount, 572 FI.InnerIncrement, FI.InnerBranch, SE, FI.Widened)) 573 return false; 574 if (!findLoopComponents(FI.OuterLoop, IterationInstructions, 575 FI.OuterInductionPHI, FI.OuterTripCount, 576 FI.OuterIncrement, FI.OuterBranch, SE, FI.Widened)) 577 return false; 578 579 // Both of the loop trip count values must be invariant in the outer loop 580 // (non-instructions are all inherently invariant). 581 if (!FI.OuterLoop->isLoopInvariant(FI.InnerTripCount)) { 582 LLVM_DEBUG(dbgs() << "inner loop trip count not invariant\n"); 583 return false; 584 } 585 if (!FI.OuterLoop->isLoopInvariant(FI.OuterTripCount)) { 586 LLVM_DEBUG(dbgs() << "outer loop trip count not invariant\n"); 587 return false; 588 } 589 590 if (!checkPHIs(FI, TTI)) 591 return false; 592 593 // FIXME: it should be possible to handle different types correctly. 594 if (FI.InnerInductionPHI->getType() != FI.OuterInductionPHI->getType()) 595 return false; 596 597 if (!checkOuterLoopInsts(FI, IterationInstructions, TTI)) 598 return false; 599 600 // Find the values in the loop that can be replaced with the linearized 601 // induction variable, and check that there are no other uses of the inner 602 // or outer induction variable. If there were, we could still do this 603 // transformation, but we'd have to insert a div/mod to calculate the 604 // original IVs, so it wouldn't be profitable. 605 if (!checkIVUsers(FI)) 606 return false; 607 608 LLVM_DEBUG(dbgs() << "CanFlattenLoopPair: OK\n"); 609 return true; 610 } 611 612 static bool DoFlattenLoopPair(FlattenInfo &FI, DominatorTree *DT, LoopInfo *LI, 613 ScalarEvolution *SE, AssumptionCache *AC, 614 const TargetTransformInfo *TTI, LPMUpdater *U) { 615 Function *F = FI.OuterLoop->getHeader()->getParent(); 616 LLVM_DEBUG(dbgs() << "Checks all passed, doing the transformation\n"); 617 { 618 using namespace ore; 619 OptimizationRemark Remark(DEBUG_TYPE, "Flattened", FI.InnerLoop->getStartLoc(), 620 FI.InnerLoop->getHeader()); 621 OptimizationRemarkEmitter ORE(F); 622 Remark << "Flattened into outer loop"; 623 ORE.emit(Remark); 624 } 625 626 Value *NewTripCount = BinaryOperator::CreateMul( 627 FI.InnerTripCount, FI.OuterTripCount, "flatten.tripcount", 628 FI.OuterLoop->getLoopPreheader()->getTerminator()); 629 LLVM_DEBUG(dbgs() << "Created new trip count in preheader: "; 630 NewTripCount->dump()); 631 632 // Fix up PHI nodes that take values from the inner loop back-edge, which 633 // we are about to remove. 634 FI.InnerInductionPHI->removeIncomingValue(FI.InnerLoop->getLoopLatch()); 635 636 // The old Phi will be optimised away later, but for now we can't leave 637 // leave it in an invalid state, so are updating them too. 638 for (PHINode *PHI : FI.InnerPHIsToTransform) 639 PHI->removeIncomingValue(FI.InnerLoop->getLoopLatch()); 640 641 // Modify the trip count of the outer loop to be the product of the two 642 // trip counts. 643 cast<User>(FI.OuterBranch->getCondition())->setOperand(1, NewTripCount); 644 645 // Replace the inner loop backedge with an unconditional branch to the exit. 646 BasicBlock *InnerExitBlock = FI.InnerLoop->getExitBlock(); 647 BasicBlock *InnerExitingBlock = FI.InnerLoop->getExitingBlock(); 648 InnerExitingBlock->getTerminator()->eraseFromParent(); 649 BranchInst::Create(InnerExitBlock, InnerExitingBlock); 650 DT->deleteEdge(InnerExitingBlock, FI.InnerLoop->getHeader()); 651 652 // Replace all uses of the polynomial calculated from the two induction 653 // variables with the one new one. 654 IRBuilder<> Builder(FI.OuterInductionPHI->getParent()->getTerminator()); 655 for (Value *V : FI.LinearIVUses) { 656 Value *OuterValue = FI.OuterInductionPHI; 657 if (FI.Widened) 658 OuterValue = Builder.CreateTrunc(FI.OuterInductionPHI, V->getType(), 659 "flatten.trunciv"); 660 661 LLVM_DEBUG(dbgs() << "Replacing: "; V->dump(); 662 dbgs() << "with: "; OuterValue->dump()); 663 V->replaceAllUsesWith(OuterValue); 664 } 665 666 // Tell LoopInfo, SCEV and the pass manager that the inner loop has been 667 // deleted, and any information that have about the outer loop invalidated. 668 SE->forgetLoop(FI.OuterLoop); 669 SE->forgetLoop(FI.InnerLoop); 670 if (U) 671 U->markLoopAsDeleted(*FI.InnerLoop, FI.InnerLoop->getName()); 672 LI->erase(FI.InnerLoop); 673 674 // Increment statistic value. 675 NumFlattened++; 676 677 return true; 678 } 679 680 static bool CanWidenIV(FlattenInfo &FI, DominatorTree *DT, LoopInfo *LI, 681 ScalarEvolution *SE, AssumptionCache *AC, 682 const TargetTransformInfo *TTI) { 683 if (!WidenIV) { 684 LLVM_DEBUG(dbgs() << "Widening the IVs is disabled\n"); 685 return false; 686 } 687 688 LLVM_DEBUG(dbgs() << "Try widening the IVs\n"); 689 Module *M = FI.InnerLoop->getHeader()->getParent()->getParent(); 690 auto &DL = M->getDataLayout(); 691 auto *InnerType = FI.InnerInductionPHI->getType(); 692 auto *OuterType = FI.OuterInductionPHI->getType(); 693 unsigned MaxLegalSize = DL.getLargestLegalIntTypeSizeInBits(); 694 auto *MaxLegalType = DL.getLargestLegalIntType(M->getContext()); 695 696 // If both induction types are less than the maximum legal integer width, 697 // promote both to the widest type available so we know calculating 698 // (OuterTripCount * InnerTripCount) as the new trip count is safe. 699 if (InnerType != OuterType || 700 InnerType->getScalarSizeInBits() >= MaxLegalSize || 701 MaxLegalType->getScalarSizeInBits() < InnerType->getScalarSizeInBits() * 2) { 702 LLVM_DEBUG(dbgs() << "Can't widen the IV\n"); 703 return false; 704 } 705 706 SCEVExpander Rewriter(*SE, DL, "loopflatten"); 707 SmallVector<WeakTrackingVH, 4> DeadInsts; 708 unsigned ElimExt = 0; 709 unsigned Widened = 0; 710 711 auto CreateWideIV = [&] (WideIVInfo WideIV, bool &Deleted) -> bool { 712 PHINode *WidePhi = createWideIV(WideIV, LI, SE, Rewriter, DT, DeadInsts, 713 ElimExt, Widened, true /* HasGuards */, 714 true /* UsePostIncrementRanges */); 715 if (!WidePhi) 716 return false; 717 LLVM_DEBUG(dbgs() << "Created wide phi: "; WidePhi->dump()); 718 LLVM_DEBUG(dbgs() << "Deleting old phi: "; WideIV.NarrowIV->dump()); 719 Deleted = RecursivelyDeleteDeadPHINode(WideIV.NarrowIV); 720 return true; 721 }; 722 723 bool Deleted; 724 if (!CreateWideIV({FI.InnerInductionPHI, MaxLegalType, false }, Deleted)) 725 return false; 726 // Add the narrow phi to list, so that it will be adjusted later when the 727 // the transformation is performed. 728 if (!Deleted) 729 FI.InnerPHIsToTransform.insert(FI.InnerInductionPHI); 730 731 if (!CreateWideIV({FI.OuterInductionPHI, MaxLegalType, false }, Deleted)) 732 return false; 733 734 assert(Widened && "Widened IV expected"); 735 FI.Widened = true; 736 737 // Save the old/narrow induction phis, which we need to ignore in CheckPHIs. 738 FI.NarrowInnerInductionPHI = FI.InnerInductionPHI; 739 FI.NarrowOuterInductionPHI = FI.OuterInductionPHI; 740 741 // After widening, rediscover all the loop components. 742 return CanFlattenLoopPair(FI, DT, LI, SE, AC, TTI); 743 } 744 745 static bool FlattenLoopPair(FlattenInfo &FI, DominatorTree *DT, LoopInfo *LI, 746 ScalarEvolution *SE, AssumptionCache *AC, 747 const TargetTransformInfo *TTI, LPMUpdater *U) { 748 LLVM_DEBUG( 749 dbgs() << "Loop flattening running on outer loop " 750 << FI.OuterLoop->getHeader()->getName() << " and inner loop " 751 << FI.InnerLoop->getHeader()->getName() << " in " 752 << FI.OuterLoop->getHeader()->getParent()->getName() << "\n"); 753 754 if (!CanFlattenLoopPair(FI, DT, LI, SE, AC, TTI)) 755 return false; 756 757 // Check if we can widen the induction variables to avoid overflow checks. 758 bool CanFlatten = CanWidenIV(FI, DT, LI, SE, AC, TTI); 759 760 // It can happen that after widening of the IV, flattening may not be 761 // possible/happening, e.g. when it is deemed unprofitable. So bail here if 762 // that is the case. 763 // TODO: IV widening without performing the actual flattening transformation 764 // is not ideal. While this codegen change should not matter much, it is an 765 // unnecessary change which is better to avoid. It's unlikely this happens 766 // often, because if it's unprofitibale after widening, it should be 767 // unprofitabe before widening as checked in the first round of checks. But 768 // 'RepeatedInstructionThreshold' is set to only 2, which can probably be 769 // relaxed. Because this is making a code change (the IV widening, but not 770 // the flattening), we return true here. 771 if (FI.Widened && !CanFlatten) 772 return true; 773 774 // If we have widened and can perform the transformation, do that here. 775 if (CanFlatten) 776 return DoFlattenLoopPair(FI, DT, LI, SE, AC, TTI, U); 777 778 // Otherwise, if we haven't widened the IV, check if the new iteration 779 // variable might overflow. In this case, we need to version the loop, and 780 // select the original version at runtime if the iteration space is too 781 // large. 782 // TODO: We currently don't version the loop. 783 OverflowResult OR = checkOverflow(FI, DT, AC); 784 if (OR == OverflowResult::AlwaysOverflowsHigh || 785 OR == OverflowResult::AlwaysOverflowsLow) { 786 LLVM_DEBUG(dbgs() << "Multiply would always overflow, so not profitable\n"); 787 return false; 788 } else if (OR == OverflowResult::MayOverflow) { 789 LLVM_DEBUG(dbgs() << "Multiply might overflow, not flattening\n"); 790 return false; 791 } 792 793 LLVM_DEBUG(dbgs() << "Multiply cannot overflow, modifying loop in-place\n"); 794 return DoFlattenLoopPair(FI, DT, LI, SE, AC, TTI, U); 795 } 796 797 bool Flatten(LoopNest &LN, DominatorTree *DT, LoopInfo *LI, ScalarEvolution *SE, 798 AssumptionCache *AC, TargetTransformInfo *TTI, LPMUpdater *U) { 799 bool Changed = false; 800 for (Loop *InnerLoop : LN.getLoops()) { 801 auto *OuterLoop = InnerLoop->getParentLoop(); 802 if (!OuterLoop) 803 continue; 804 FlattenInfo FI(OuterLoop, InnerLoop); 805 Changed |= FlattenLoopPair(FI, DT, LI, SE, AC, TTI, U); 806 } 807 return Changed; 808 } 809 810 PreservedAnalyses LoopFlattenPass::run(LoopNest &LN, LoopAnalysisManager &LAM, 811 LoopStandardAnalysisResults &AR, 812 LPMUpdater &U) { 813 814 bool Changed = false; 815 816 // The loop flattening pass requires loops to be 817 // in simplified form, and also needs LCSSA. Running 818 // this pass will simplify all loops that contain inner loops, 819 // regardless of whether anything ends up being flattened. 820 Changed |= Flatten(LN, &AR.DT, &AR.LI, &AR.SE, &AR.AC, &AR.TTI, &U); 821 822 if (!Changed) 823 return PreservedAnalyses::all(); 824 825 return getLoopPassPreservedAnalyses(); 826 } 827 828 namespace { 829 class LoopFlattenLegacyPass : public FunctionPass { 830 public: 831 static char ID; // Pass ID, replacement for typeid 832 LoopFlattenLegacyPass() : FunctionPass(ID) { 833 initializeLoopFlattenLegacyPassPass(*PassRegistry::getPassRegistry()); 834 } 835 836 // Possibly flatten loop L into its child. 837 bool runOnFunction(Function &F) override; 838 839 void getAnalysisUsage(AnalysisUsage &AU) const override { 840 getLoopAnalysisUsage(AU); 841 AU.addRequired<TargetTransformInfoWrapperPass>(); 842 AU.addPreserved<TargetTransformInfoWrapperPass>(); 843 AU.addRequired<AssumptionCacheTracker>(); 844 AU.addPreserved<AssumptionCacheTracker>(); 845 } 846 }; 847 } // namespace 848 849 char LoopFlattenLegacyPass::ID = 0; 850 INITIALIZE_PASS_BEGIN(LoopFlattenLegacyPass, "loop-flatten", "Flattens loops", 851 false, false) 852 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 853 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 854 INITIALIZE_PASS_END(LoopFlattenLegacyPass, "loop-flatten", "Flattens loops", 855 false, false) 856 857 FunctionPass *llvm::createLoopFlattenPass() { return new LoopFlattenLegacyPass(); } 858 859 bool LoopFlattenLegacyPass::runOnFunction(Function &F) { 860 ScalarEvolution *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE(); 861 LoopInfo *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); 862 auto *DTWP = getAnalysisIfAvailable<DominatorTreeWrapperPass>(); 863 DominatorTree *DT = DTWP ? &DTWP->getDomTree() : nullptr; 864 auto &TTIP = getAnalysis<TargetTransformInfoWrapperPass>(); 865 auto *TTI = &TTIP.getTTI(F); 866 auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); 867 bool Changed = false; 868 for (Loop *L : *LI) { 869 auto LN = LoopNest::getLoopNest(*L, *SE); 870 Changed |= Flatten(*LN, DT, LI, SE, AC, TTI, nullptr); 871 } 872 return Changed; 873 } 874