1 //===- LoopFlatten.cpp - Loop flattening pass------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This pass flattens pairs nested loops into a single loop. 10 // 11 // The intention is to optimise loop nests like this, which together access an 12 // array linearly: 13 // 14 // for (int i = 0; i < N; ++i) 15 // for (int j = 0; j < M; ++j) 16 // f(A[i*M+j]); 17 // 18 // into one loop: 19 // 20 // for (int i = 0; i < (N*M); ++i) 21 // f(A[i]); 22 // 23 // It can also flatten loops where the induction variables are not used in the 24 // loop. This is only worth doing if the induction variables are only used in an 25 // expression like i*M+j. If they had any other uses, we would have to insert a 26 // div/mod to reconstruct the original values, so this wouldn't be profitable. 27 // 28 // We also need to prove that N*M will not overflow. The preferred solution is 29 // to widen the IV, which avoids overflow checks, so that is tried first. If 30 // the IV cannot be widened, then we try to determine that this new tripcount 31 // expression won't overflow. 32 // 33 // Q: Does LoopFlatten use SCEV? 34 // Short answer: Yes and no. 35 // 36 // Long answer: 37 // For this transformation to be valid, we require all uses of the induction 38 // variables to be linear expressions of the form i*M+j. The different Loop 39 // APIs are used to get some loop components like the induction variable, 40 // compare statement, etc. In addition, we do some pattern matching to find the 41 // linear expressions and other loop components like the loop increment. The 42 // latter are examples of expressions that do use the induction variable, but 43 // are safe to ignore when we check all uses to be of the form i*M+j. We keep 44 // track of all of this in bookkeeping struct FlattenInfo. 45 // We assume the loops to be canonical, i.e. starting at 0 and increment with 46 // 1. This makes RHS of the compare the loop tripcount (with the right 47 // predicate). We use SCEV to then sanity check that this tripcount matches 48 // with the tripcount as computed by SCEV. 49 // 50 //===----------------------------------------------------------------------===// 51 52 #include "llvm/Transforms/Scalar/LoopFlatten.h" 53 54 #include "llvm/ADT/Statistic.h" 55 #include "llvm/Analysis/AssumptionCache.h" 56 #include "llvm/Analysis/LoopInfo.h" 57 #include "llvm/Analysis/MemorySSAUpdater.h" 58 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 59 #include "llvm/Analysis/ScalarEvolution.h" 60 #include "llvm/Analysis/TargetTransformInfo.h" 61 #include "llvm/Analysis/ValueTracking.h" 62 #include "llvm/IR/Dominators.h" 63 #include "llvm/IR/Function.h" 64 #include "llvm/IR/IRBuilder.h" 65 #include "llvm/IR/Module.h" 66 #include "llvm/IR/PatternMatch.h" 67 #include "llvm/IR/Verifier.h" 68 #include "llvm/InitializePasses.h" 69 #include "llvm/Pass.h" 70 #include "llvm/Support/Debug.h" 71 #include "llvm/Support/raw_ostream.h" 72 #include "llvm/Transforms/Scalar.h" 73 #include "llvm/Transforms/Utils/Local.h" 74 #include "llvm/Transforms/Utils/LoopUtils.h" 75 #include "llvm/Transforms/Utils/ScalarEvolutionExpander.h" 76 #include "llvm/Transforms/Utils/SimplifyIndVar.h" 77 78 using namespace llvm; 79 using namespace llvm::PatternMatch; 80 81 #define DEBUG_TYPE "loop-flatten" 82 83 STATISTIC(NumFlattened, "Number of loops flattened"); 84 85 static cl::opt<unsigned> RepeatedInstructionThreshold( 86 "loop-flatten-cost-threshold", cl::Hidden, cl::init(2), 87 cl::desc("Limit on the cost of instructions that can be repeated due to " 88 "loop flattening")); 89 90 static cl::opt<bool> 91 AssumeNoOverflow("loop-flatten-assume-no-overflow", cl::Hidden, 92 cl::init(false), 93 cl::desc("Assume that the product of the two iteration " 94 "trip counts will never overflow")); 95 96 static cl::opt<bool> 97 WidenIV("loop-flatten-widen-iv", cl::Hidden, cl::init(true), 98 cl::desc("Widen the loop induction variables, if possible, so " 99 "overflow checks won't reject flattening")); 100 101 // We require all uses of both induction variables to match this pattern: 102 // 103 // (OuterPHI * InnerTripCount) + InnerPHI 104 // 105 // I.e., it needs to be a linear expression of the induction variables and the 106 // inner loop trip count. We keep track of all different expressions on which 107 // checks will be performed in this bookkeeping struct. 108 // 109 struct FlattenInfo { 110 Loop *OuterLoop = nullptr; // The loop pair to be flattened. 111 Loop *InnerLoop = nullptr; 112 113 PHINode *InnerInductionPHI = nullptr; // These PHINodes correspond to loop 114 PHINode *OuterInductionPHI = nullptr; // induction variables, which are 115 // expected to start at zero and 116 // increment by one on each loop. 117 118 Value *InnerTripCount = nullptr; // The product of these two tripcounts 119 Value *OuterTripCount = nullptr; // will be the new flattened loop 120 // tripcount. Also used to recognise a 121 // linear expression that will be replaced. 122 123 SmallPtrSet<Value *, 4> LinearIVUses; // Contains the linear expressions 124 // of the form i*M+j that will be 125 // replaced. 126 127 BinaryOperator *InnerIncrement = nullptr; // Uses of induction variables in 128 BinaryOperator *OuterIncrement = nullptr; // loop control statements that 129 BranchInst *InnerBranch = nullptr; // are safe to ignore. 130 131 BranchInst *OuterBranch = nullptr; // The instruction that needs to be 132 // updated with new tripcount. 133 134 SmallPtrSet<PHINode *, 4> InnerPHIsToTransform; 135 136 bool Widened = false; // Whether this holds the flatten info before or after 137 // widening. 138 139 PHINode *NarrowInnerInductionPHI = nullptr; // Holds the old/narrow induction 140 PHINode *NarrowOuterInductionPHI = nullptr; // phis, i.e. the Phis before IV 141 // has been apllied. Used to skip 142 // checks on phi nodes. 143 144 FlattenInfo(Loop *OL, Loop *IL) : OuterLoop(OL), InnerLoop(IL){}; 145 146 bool isNarrowInductionPhi(PHINode *Phi) { 147 // This can't be the narrow phi if we haven't widened the IV first. 148 if (!Widened) 149 return false; 150 return NarrowInnerInductionPHI == Phi || NarrowOuterInductionPHI == Phi; 151 } 152 }; 153 154 static bool 155 setLoopComponents(Value *&TC, Value *&TripCount, BinaryOperator *&Increment, 156 SmallPtrSetImpl<Instruction *> &IterationInstructions) { 157 TripCount = TC; 158 IterationInstructions.insert(Increment); 159 LLVM_DEBUG(dbgs() << "Found Increment: "; Increment->dump()); 160 LLVM_DEBUG(dbgs() << "Found trip count: "; TripCount->dump()); 161 LLVM_DEBUG(dbgs() << "Successfully found all loop components\n"); 162 return true; 163 } 164 165 // Finds the induction variable, increment and trip count for a simple loop that 166 // we can flatten. 167 static bool findLoopComponents( 168 Loop *L, SmallPtrSetImpl<Instruction *> &IterationInstructions, 169 PHINode *&InductionPHI, Value *&TripCount, BinaryOperator *&Increment, 170 BranchInst *&BackBranch, ScalarEvolution *SE, bool IsWidened) { 171 LLVM_DEBUG(dbgs() << "Finding components of loop: " << L->getName() << "\n"); 172 173 if (!L->isLoopSimplifyForm()) { 174 LLVM_DEBUG(dbgs() << "Loop is not in normal form\n"); 175 return false; 176 } 177 178 // Currently, to simplify the implementation, the Loop induction variable must 179 // start at zero and increment with a step size of one. 180 if (!L->isCanonical(*SE)) { 181 LLVM_DEBUG(dbgs() << "Loop is not canonical\n"); 182 return false; 183 } 184 185 // There must be exactly one exiting block, and it must be the same at the 186 // latch. 187 BasicBlock *Latch = L->getLoopLatch(); 188 if (L->getExitingBlock() != Latch) { 189 LLVM_DEBUG(dbgs() << "Exiting and latch block are different\n"); 190 return false; 191 } 192 193 // Find the induction PHI. If there is no induction PHI, we can't do the 194 // transformation. TODO: could other variables trigger this? Do we have to 195 // search for the best one? 196 InductionPHI = L->getInductionVariable(*SE); 197 if (!InductionPHI) { 198 LLVM_DEBUG(dbgs() << "Could not find induction PHI\n"); 199 return false; 200 } 201 LLVM_DEBUG(dbgs() << "Found induction PHI: "; InductionPHI->dump()); 202 203 bool ContinueOnTrue = L->contains(Latch->getTerminator()->getSuccessor(0)); 204 auto IsValidPredicate = [&](ICmpInst::Predicate Pred) { 205 if (ContinueOnTrue) 206 return Pred == CmpInst::ICMP_NE || Pred == CmpInst::ICMP_ULT; 207 else 208 return Pred == CmpInst::ICMP_EQ; 209 }; 210 211 // Find Compare and make sure it is valid. getLatchCmpInst checks that the 212 // back branch of the latch is conditional. 213 ICmpInst *Compare = L->getLatchCmpInst(); 214 if (!Compare || !IsValidPredicate(Compare->getUnsignedPredicate()) || 215 Compare->hasNUsesOrMore(2)) { 216 LLVM_DEBUG(dbgs() << "Could not find valid comparison\n"); 217 return false; 218 } 219 BackBranch = cast<BranchInst>(Latch->getTerminator()); 220 IterationInstructions.insert(BackBranch); 221 LLVM_DEBUG(dbgs() << "Found back branch: "; BackBranch->dump()); 222 IterationInstructions.insert(Compare); 223 LLVM_DEBUG(dbgs() << "Found comparison: "; Compare->dump()); 224 225 // Find increment and trip count. 226 // There are exactly 2 incoming values to the induction phi; one from the 227 // pre-header and one from the latch. The incoming latch value is the 228 // increment variable. 229 Increment = 230 dyn_cast<BinaryOperator>(InductionPHI->getIncomingValueForBlock(Latch)); 231 if (Increment->hasNUsesOrMore(3)) { 232 LLVM_DEBUG(dbgs() << "Could not find valid increment\n"); 233 return false; 234 } 235 // The trip count is the RHS of the compare. If this doesn't match the trip 236 // count computed by SCEV then this is because the trip count variable 237 // has been widened so the types don't match, or because it is a constant and 238 // another transformation has changed the compare (e.g. icmp ult %inc, 239 // tripcount -> icmp ult %j, tripcount-1), or both. 240 Value *RHS = Compare->getOperand(1); 241 const SCEV *BackedgeTakenCount = SE->getBackedgeTakenCount(L); 242 if (isa<SCEVCouldNotCompute>(BackedgeTakenCount)) { 243 LLVM_DEBUG(dbgs() << "Backedge-taken count is not predictable\n"); 244 return false; 245 } 246 // The use of the Extend=false flag on getTripCountFromExitCount was added 247 // during a refactoring to preserve existing behavior. However, there's 248 // nothing obvious in the surrounding code when handles the overflow case. 249 // FIXME: audit code to establish whether there's a latent bug here. 250 const SCEV *SCEVTripCount = 251 SE->getTripCountFromExitCount(BackedgeTakenCount, false); 252 const SCEV *SCEVRHS = SE->getSCEV(RHS); 253 if (SCEVRHS == SCEVTripCount) 254 return setLoopComponents(RHS, TripCount, Increment, IterationInstructions); 255 ConstantInt *ConstantRHS = dyn_cast<ConstantInt>(RHS); 256 if (ConstantRHS) { 257 const SCEV *BackedgeTCExt = nullptr; 258 if (IsWidened) { 259 const SCEV *SCEVTripCountExt; 260 // Find the extended backedge taken count and extended trip count using 261 // SCEV. One of these should now match the RHS of the compare. 262 BackedgeTCExt = SE->getZeroExtendExpr(BackedgeTakenCount, RHS->getType()); 263 SCEVTripCountExt = SE->getTripCountFromExitCount(BackedgeTCExt, false); 264 if (SCEVRHS != BackedgeTCExt && SCEVRHS != SCEVTripCountExt) { 265 LLVM_DEBUG(dbgs() << "Could not find valid trip count\n"); 266 return false; 267 } 268 } 269 // If the RHS of the compare is equal to the backedge taken count we need 270 // to add one to get the trip count. 271 if (SCEVRHS == BackedgeTCExt || SCEVRHS == BackedgeTakenCount) { 272 ConstantInt *One = ConstantInt::get(ConstantRHS->getType(), 1); 273 Value *NewRHS = ConstantInt::get( 274 ConstantRHS->getContext(), ConstantRHS->getValue() + One->getValue()); 275 return setLoopComponents(NewRHS, TripCount, Increment, 276 IterationInstructions); 277 } 278 return setLoopComponents(RHS, TripCount, Increment, IterationInstructions); 279 } 280 // If the RHS isn't a constant then check that the reason it doesn't match 281 // the SCEV trip count is because the RHS is a ZExt or SExt instruction 282 // (and take the trip count to be the RHS). 283 if (!IsWidened) { 284 LLVM_DEBUG(dbgs() << "Could not find valid trip count\n"); 285 return false; 286 } 287 auto *TripCountInst = dyn_cast<Instruction>(RHS); 288 if (!TripCountInst) { 289 LLVM_DEBUG(dbgs() << "Could not find valid trip count\n"); 290 return false; 291 } 292 if ((!isa<ZExtInst>(TripCountInst) && !isa<SExtInst>(TripCountInst)) || 293 SE->getSCEV(TripCountInst->getOperand(0)) != SCEVTripCount) { 294 LLVM_DEBUG(dbgs() << "Could not find valid extended trip count\n"); 295 return false; 296 } 297 return setLoopComponents(RHS, TripCount, Increment, IterationInstructions); 298 } 299 300 static bool checkPHIs(FlattenInfo &FI, const TargetTransformInfo *TTI) { 301 // All PHIs in the inner and outer headers must either be: 302 // - The induction PHI, which we are going to rewrite as one induction in 303 // the new loop. This is already checked by findLoopComponents. 304 // - An outer header PHI with all incoming values from outside the loop. 305 // LoopSimplify guarantees we have a pre-header, so we don't need to 306 // worry about that here. 307 // - Pairs of PHIs in the inner and outer headers, which implement a 308 // loop-carried dependency that will still be valid in the new loop. To 309 // be valid, this variable must be modified only in the inner loop. 310 311 // The set of PHI nodes in the outer loop header that we know will still be 312 // valid after the transformation. These will not need to be modified (with 313 // the exception of the induction variable), but we do need to check that 314 // there are no unsafe PHI nodes. 315 SmallPtrSet<PHINode *, 4> SafeOuterPHIs; 316 SafeOuterPHIs.insert(FI.OuterInductionPHI); 317 318 // Check that all PHI nodes in the inner loop header match one of the valid 319 // patterns. 320 for (PHINode &InnerPHI : FI.InnerLoop->getHeader()->phis()) { 321 // The induction PHIs break these rules, and that's OK because we treat 322 // them specially when doing the transformation. 323 if (&InnerPHI == FI.InnerInductionPHI) 324 continue; 325 if (FI.isNarrowInductionPhi(&InnerPHI)) 326 continue; 327 328 // Each inner loop PHI node must have two incoming values/blocks - one 329 // from the pre-header, and one from the latch. 330 assert(InnerPHI.getNumIncomingValues() == 2); 331 Value *PreHeaderValue = 332 InnerPHI.getIncomingValueForBlock(FI.InnerLoop->getLoopPreheader()); 333 Value *LatchValue = 334 InnerPHI.getIncomingValueForBlock(FI.InnerLoop->getLoopLatch()); 335 336 // The incoming value from the outer loop must be the PHI node in the 337 // outer loop header, with no modifications made in the top of the outer 338 // loop. 339 PHINode *OuterPHI = dyn_cast<PHINode>(PreHeaderValue); 340 if (!OuterPHI || OuterPHI->getParent() != FI.OuterLoop->getHeader()) { 341 LLVM_DEBUG(dbgs() << "value modified in top of outer loop\n"); 342 return false; 343 } 344 345 // The other incoming value must come from the inner loop, without any 346 // modifications in the tail end of the outer loop. We are in LCSSA form, 347 // so this will actually be a PHI in the inner loop's exit block, which 348 // only uses values from inside the inner loop. 349 PHINode *LCSSAPHI = dyn_cast<PHINode>( 350 OuterPHI->getIncomingValueForBlock(FI.OuterLoop->getLoopLatch())); 351 if (!LCSSAPHI) { 352 LLVM_DEBUG(dbgs() << "could not find LCSSA PHI\n"); 353 return false; 354 } 355 356 // The value used by the LCSSA PHI must be the same one that the inner 357 // loop's PHI uses. 358 if (LCSSAPHI->hasConstantValue() != LatchValue) { 359 LLVM_DEBUG( 360 dbgs() << "LCSSA PHI incoming value does not match latch value\n"); 361 return false; 362 } 363 364 LLVM_DEBUG(dbgs() << "PHI pair is safe:\n"); 365 LLVM_DEBUG(dbgs() << " Inner: "; InnerPHI.dump()); 366 LLVM_DEBUG(dbgs() << " Outer: "; OuterPHI->dump()); 367 SafeOuterPHIs.insert(OuterPHI); 368 FI.InnerPHIsToTransform.insert(&InnerPHI); 369 } 370 371 for (PHINode &OuterPHI : FI.OuterLoop->getHeader()->phis()) { 372 if (FI.isNarrowInductionPhi(&OuterPHI)) 373 continue; 374 if (!SafeOuterPHIs.count(&OuterPHI)) { 375 LLVM_DEBUG(dbgs() << "found unsafe PHI in outer loop: "; OuterPHI.dump()); 376 return false; 377 } 378 } 379 380 LLVM_DEBUG(dbgs() << "checkPHIs: OK\n"); 381 return true; 382 } 383 384 static bool 385 checkOuterLoopInsts(FlattenInfo &FI, 386 SmallPtrSetImpl<Instruction *> &IterationInstructions, 387 const TargetTransformInfo *TTI) { 388 // Check for instructions in the outer but not inner loop. If any of these 389 // have side-effects then this transformation is not legal, and if there is 390 // a significant amount of code here which can't be optimised out that it's 391 // not profitable (as these instructions would get executed for each 392 // iteration of the inner loop). 393 InstructionCost RepeatedInstrCost = 0; 394 for (auto *B : FI.OuterLoop->getBlocks()) { 395 if (FI.InnerLoop->contains(B)) 396 continue; 397 398 for (auto &I : *B) { 399 if (!isa<PHINode>(&I) && !I.isTerminator() && 400 !isSafeToSpeculativelyExecute(&I)) { 401 LLVM_DEBUG(dbgs() << "Cannot flatten because instruction may have " 402 "side effects: "; 403 I.dump()); 404 return false; 405 } 406 // The execution count of the outer loop's iteration instructions 407 // (increment, compare and branch) will be increased, but the 408 // equivalent instructions will be removed from the inner loop, so 409 // they make a net difference of zero. 410 if (IterationInstructions.count(&I)) 411 continue; 412 // The uncoditional branch to the inner loop's header will turn into 413 // a fall-through, so adds no cost. 414 BranchInst *Br = dyn_cast<BranchInst>(&I); 415 if (Br && Br->isUnconditional() && 416 Br->getSuccessor(0) == FI.InnerLoop->getHeader()) 417 continue; 418 // Multiplies of the outer iteration variable and inner iteration 419 // count will be optimised out. 420 if (match(&I, m_c_Mul(m_Specific(FI.OuterInductionPHI), 421 m_Specific(FI.InnerTripCount)))) 422 continue; 423 InstructionCost Cost = 424 TTI->getUserCost(&I, TargetTransformInfo::TCK_SizeAndLatency); 425 LLVM_DEBUG(dbgs() << "Cost " << Cost << ": "; I.dump()); 426 RepeatedInstrCost += Cost; 427 } 428 } 429 430 LLVM_DEBUG(dbgs() << "Cost of instructions that will be repeated: " 431 << RepeatedInstrCost << "\n"); 432 // Bail out if flattening the loops would cause instructions in the outer 433 // loop but not in the inner loop to be executed extra times. 434 if (RepeatedInstrCost > RepeatedInstructionThreshold) { 435 LLVM_DEBUG(dbgs() << "checkOuterLoopInsts: not profitable, bailing.\n"); 436 return false; 437 } 438 439 LLVM_DEBUG(dbgs() << "checkOuterLoopInsts: OK\n"); 440 return true; 441 } 442 443 static bool checkIVUsers(FlattenInfo &FI) { 444 // We require all uses of both induction variables to match this pattern: 445 // 446 // (OuterPHI * InnerTripCount) + InnerPHI 447 // 448 // Any uses of the induction variables not matching that pattern would 449 // require a div/mod to reconstruct in the flattened loop, so the 450 // transformation wouldn't be profitable. 451 452 Value *InnerTripCount = FI.InnerTripCount; 453 if (FI.Widened && 454 (isa<SExtInst>(InnerTripCount) || isa<ZExtInst>(InnerTripCount))) 455 InnerTripCount = cast<Instruction>(InnerTripCount)->getOperand(0); 456 457 // Check that all uses of the inner loop's induction variable match the 458 // expected pattern, recording the uses of the outer IV. 459 SmallPtrSet<Value *, 4> ValidOuterPHIUses; 460 for (User *U : FI.InnerInductionPHI->users()) { 461 if (U == FI.InnerIncrement) 462 continue; 463 464 // After widening the IVs, a trunc instruction might have been introduced, 465 // so look through truncs. 466 if (isa<TruncInst>(U)) { 467 if (!U->hasOneUse()) 468 return false; 469 U = *U->user_begin(); 470 } 471 472 // If the use is in the compare (which is also the condition of the inner 473 // branch) then the compare has been altered by another transformation e.g 474 // icmp ult %inc, tripcount -> icmp ult %j, tripcount-1, where tripcount is 475 // a constant. Ignore this use as the compare gets removed later anyway. 476 if (U == FI.InnerBranch->getCondition()) 477 continue; 478 479 LLVM_DEBUG(dbgs() << "Found use of inner induction variable: "; U->dump()); 480 481 Value *MatchedMul = nullptr; 482 Value *MatchedItCount = nullptr; 483 bool IsAdd = match(U, m_c_Add(m_Specific(FI.InnerInductionPHI), 484 m_Value(MatchedMul))) && 485 match(MatchedMul, m_c_Mul(m_Specific(FI.OuterInductionPHI), 486 m_Value(MatchedItCount))); 487 488 // Matches the same pattern as above, except it also looks for truncs 489 // on the phi, which can be the result of widening the induction variables. 490 bool IsAddTrunc = 491 match(U, m_c_Add(m_Trunc(m_Specific(FI.InnerInductionPHI)), 492 m_Value(MatchedMul))) && 493 match(MatchedMul, m_c_Mul(m_Trunc(m_Specific(FI.OuterInductionPHI)), 494 m_Value(MatchedItCount))); 495 496 if (!MatchedItCount) 497 return false; 498 // Look through extends if the IV has been widened. 499 if (FI.Widened && 500 (isa<SExtInst>(MatchedItCount) || isa<ZExtInst>(MatchedItCount))) { 501 assert(MatchedItCount->getType() == FI.InnerInductionPHI->getType() && 502 "Unexpected type mismatch in types after widening"); 503 MatchedItCount = isa<SExtInst>(MatchedItCount) 504 ? dyn_cast<SExtInst>(MatchedItCount)->getOperand(0) 505 : dyn_cast<ZExtInst>(MatchedItCount)->getOperand(0); 506 } 507 508 if ((IsAdd || IsAddTrunc) && MatchedItCount == InnerTripCount) { 509 LLVM_DEBUG(dbgs() << "Use is optimisable\n"); 510 ValidOuterPHIUses.insert(MatchedMul); 511 FI.LinearIVUses.insert(U); 512 } else { 513 LLVM_DEBUG(dbgs() << "Did not match expected pattern, bailing\n"); 514 return false; 515 } 516 } 517 518 // Check that there are no uses of the outer IV other than the ones found 519 // as part of the pattern above. 520 for (User *U : FI.OuterInductionPHI->users()) { 521 if (U == FI.OuterIncrement) 522 continue; 523 524 auto IsValidOuterPHIUses = [&] (User *U) -> bool { 525 LLVM_DEBUG(dbgs() << "Found use of outer induction variable: "; U->dump()); 526 if (!ValidOuterPHIUses.count(U)) { 527 LLVM_DEBUG(dbgs() << "Did not match expected pattern, bailing\n"); 528 return false; 529 } 530 LLVM_DEBUG(dbgs() << "Use is optimisable\n"); 531 return true; 532 }; 533 534 if (auto *V = dyn_cast<TruncInst>(U)) { 535 for (auto *K : V->users()) { 536 if (!IsValidOuterPHIUses(K)) 537 return false; 538 } 539 continue; 540 } 541 542 if (!IsValidOuterPHIUses(U)) 543 return false; 544 } 545 546 LLVM_DEBUG(dbgs() << "checkIVUsers: OK\n"; 547 dbgs() << "Found " << FI.LinearIVUses.size() 548 << " value(s) that can be replaced:\n"; 549 for (Value *V : FI.LinearIVUses) { 550 dbgs() << " "; 551 V->dump(); 552 }); 553 return true; 554 } 555 556 // Return an OverflowResult dependant on if overflow of the multiplication of 557 // InnerTripCount and OuterTripCount can be assumed not to happen. 558 static OverflowResult checkOverflow(FlattenInfo &FI, DominatorTree *DT, 559 AssumptionCache *AC) { 560 Function *F = FI.OuterLoop->getHeader()->getParent(); 561 const DataLayout &DL = F->getParent()->getDataLayout(); 562 563 // For debugging/testing. 564 if (AssumeNoOverflow) 565 return OverflowResult::NeverOverflows; 566 567 // Check if the multiply could not overflow due to known ranges of the 568 // input values. 569 OverflowResult OR = computeOverflowForUnsignedMul( 570 FI.InnerTripCount, FI.OuterTripCount, DL, AC, 571 FI.OuterLoop->getLoopPreheader()->getTerminator(), DT); 572 if (OR != OverflowResult::MayOverflow) 573 return OR; 574 575 for (Value *V : FI.LinearIVUses) { 576 for (Value *U : V->users()) { 577 if (auto *GEP = dyn_cast<GetElementPtrInst>(U)) { 578 for (Value *GEPUser : U->users()) { 579 auto *GEPUserInst = cast<Instruction>(GEPUser); 580 if (!isa<LoadInst>(GEPUserInst) && 581 !(isa<StoreInst>(GEPUserInst) && 582 GEP == GEPUserInst->getOperand(1))) 583 continue; 584 if (!isGuaranteedToExecuteForEveryIteration(GEPUserInst, 585 FI.InnerLoop)) 586 continue; 587 // The IV is used as the operand of a GEP which dominates the loop 588 // latch, and the IV is at least as wide as the address space of the 589 // GEP. In this case, the GEP would wrap around the address space 590 // before the IV increment wraps, which would be UB. 591 if (GEP->isInBounds() && 592 V->getType()->getIntegerBitWidth() >= 593 DL.getPointerTypeSizeInBits(GEP->getType())) { 594 LLVM_DEBUG( 595 dbgs() << "use of linear IV would be UB if overflow occurred: "; 596 GEP->dump()); 597 return OverflowResult::NeverOverflows; 598 } 599 } 600 } 601 } 602 } 603 604 return OverflowResult::MayOverflow; 605 } 606 607 static bool CanFlattenLoopPair(FlattenInfo &FI, DominatorTree *DT, LoopInfo *LI, 608 ScalarEvolution *SE, AssumptionCache *AC, 609 const TargetTransformInfo *TTI) { 610 SmallPtrSet<Instruction *, 8> IterationInstructions; 611 if (!findLoopComponents(FI.InnerLoop, IterationInstructions, 612 FI.InnerInductionPHI, FI.InnerTripCount, 613 FI.InnerIncrement, FI.InnerBranch, SE, FI.Widened)) 614 return false; 615 if (!findLoopComponents(FI.OuterLoop, IterationInstructions, 616 FI.OuterInductionPHI, FI.OuterTripCount, 617 FI.OuterIncrement, FI.OuterBranch, SE, FI.Widened)) 618 return false; 619 620 // Both of the loop trip count values must be invariant in the outer loop 621 // (non-instructions are all inherently invariant). 622 if (!FI.OuterLoop->isLoopInvariant(FI.InnerTripCount)) { 623 LLVM_DEBUG(dbgs() << "inner loop trip count not invariant\n"); 624 return false; 625 } 626 if (!FI.OuterLoop->isLoopInvariant(FI.OuterTripCount)) { 627 LLVM_DEBUG(dbgs() << "outer loop trip count not invariant\n"); 628 return false; 629 } 630 631 if (!checkPHIs(FI, TTI)) 632 return false; 633 634 // FIXME: it should be possible to handle different types correctly. 635 if (FI.InnerInductionPHI->getType() != FI.OuterInductionPHI->getType()) 636 return false; 637 638 if (!checkOuterLoopInsts(FI, IterationInstructions, TTI)) 639 return false; 640 641 // Find the values in the loop that can be replaced with the linearized 642 // induction variable, and check that there are no other uses of the inner 643 // or outer induction variable. If there were, we could still do this 644 // transformation, but we'd have to insert a div/mod to calculate the 645 // original IVs, so it wouldn't be profitable. 646 if (!checkIVUsers(FI)) 647 return false; 648 649 LLVM_DEBUG(dbgs() << "CanFlattenLoopPair: OK\n"); 650 return true; 651 } 652 653 static bool DoFlattenLoopPair(FlattenInfo &FI, DominatorTree *DT, LoopInfo *LI, 654 ScalarEvolution *SE, AssumptionCache *AC, 655 const TargetTransformInfo *TTI, LPMUpdater *U, 656 MemorySSAUpdater *MSSAU) { 657 Function *F = FI.OuterLoop->getHeader()->getParent(); 658 LLVM_DEBUG(dbgs() << "Checks all passed, doing the transformation\n"); 659 { 660 using namespace ore; 661 OptimizationRemark Remark(DEBUG_TYPE, "Flattened", FI.InnerLoop->getStartLoc(), 662 FI.InnerLoop->getHeader()); 663 OptimizationRemarkEmitter ORE(F); 664 Remark << "Flattened into outer loop"; 665 ORE.emit(Remark); 666 } 667 668 Value *NewTripCount = BinaryOperator::CreateMul( 669 FI.InnerTripCount, FI.OuterTripCount, "flatten.tripcount", 670 FI.OuterLoop->getLoopPreheader()->getTerminator()); 671 LLVM_DEBUG(dbgs() << "Created new trip count in preheader: "; 672 NewTripCount->dump()); 673 674 // Fix up PHI nodes that take values from the inner loop back-edge, which 675 // we are about to remove. 676 FI.InnerInductionPHI->removeIncomingValue(FI.InnerLoop->getLoopLatch()); 677 678 // The old Phi will be optimised away later, but for now we can't leave 679 // leave it in an invalid state, so are updating them too. 680 for (PHINode *PHI : FI.InnerPHIsToTransform) 681 PHI->removeIncomingValue(FI.InnerLoop->getLoopLatch()); 682 683 // Modify the trip count of the outer loop to be the product of the two 684 // trip counts. 685 cast<User>(FI.OuterBranch->getCondition())->setOperand(1, NewTripCount); 686 687 // Replace the inner loop backedge with an unconditional branch to the exit. 688 BasicBlock *InnerExitBlock = FI.InnerLoop->getExitBlock(); 689 BasicBlock *InnerExitingBlock = FI.InnerLoop->getExitingBlock(); 690 InnerExitingBlock->getTerminator()->eraseFromParent(); 691 BranchInst::Create(InnerExitBlock, InnerExitingBlock); 692 693 // Update the DomTree and MemorySSA. 694 DT->deleteEdge(InnerExitingBlock, FI.InnerLoop->getHeader()); 695 if (MSSAU) 696 MSSAU->removeEdge(InnerExitingBlock, FI.InnerLoop->getHeader()); 697 698 // Replace all uses of the polynomial calculated from the two induction 699 // variables with the one new one. 700 IRBuilder<> Builder(FI.OuterInductionPHI->getParent()->getTerminator()); 701 for (Value *V : FI.LinearIVUses) { 702 Value *OuterValue = FI.OuterInductionPHI; 703 if (FI.Widened) 704 OuterValue = Builder.CreateTrunc(FI.OuterInductionPHI, V->getType(), 705 "flatten.trunciv"); 706 707 LLVM_DEBUG(dbgs() << "Replacing: "; V->dump(); dbgs() << "with: "; 708 OuterValue->dump()); 709 V->replaceAllUsesWith(OuterValue); 710 } 711 712 // Tell LoopInfo, SCEV and the pass manager that the inner loop has been 713 // deleted, and any information that have about the outer loop invalidated. 714 SE->forgetLoop(FI.OuterLoop); 715 SE->forgetLoop(FI.InnerLoop); 716 if (U) 717 U->markLoopAsDeleted(*FI.InnerLoop, FI.InnerLoop->getName()); 718 LI->erase(FI.InnerLoop); 719 720 // Increment statistic value. 721 NumFlattened++; 722 723 return true; 724 } 725 726 static bool CanWidenIV(FlattenInfo &FI, DominatorTree *DT, LoopInfo *LI, 727 ScalarEvolution *SE, AssumptionCache *AC, 728 const TargetTransformInfo *TTI) { 729 if (!WidenIV) { 730 LLVM_DEBUG(dbgs() << "Widening the IVs is disabled\n"); 731 return false; 732 } 733 734 LLVM_DEBUG(dbgs() << "Try widening the IVs\n"); 735 Module *M = FI.InnerLoop->getHeader()->getParent()->getParent(); 736 auto &DL = M->getDataLayout(); 737 auto *InnerType = FI.InnerInductionPHI->getType(); 738 auto *OuterType = FI.OuterInductionPHI->getType(); 739 unsigned MaxLegalSize = DL.getLargestLegalIntTypeSizeInBits(); 740 auto *MaxLegalType = DL.getLargestLegalIntType(M->getContext()); 741 742 // If both induction types are less than the maximum legal integer width, 743 // promote both to the widest type available so we know calculating 744 // (OuterTripCount * InnerTripCount) as the new trip count is safe. 745 if (InnerType != OuterType || 746 InnerType->getScalarSizeInBits() >= MaxLegalSize || 747 MaxLegalType->getScalarSizeInBits() < 748 InnerType->getScalarSizeInBits() * 2) { 749 LLVM_DEBUG(dbgs() << "Can't widen the IV\n"); 750 return false; 751 } 752 753 SCEVExpander Rewriter(*SE, DL, "loopflatten"); 754 SmallVector<WeakTrackingVH, 4> DeadInsts; 755 unsigned ElimExt = 0; 756 unsigned Widened = 0; 757 758 auto CreateWideIV = [&](WideIVInfo WideIV, bool &Deleted) -> bool { 759 PHINode *WidePhi = 760 createWideIV(WideIV, LI, SE, Rewriter, DT, DeadInsts, ElimExt, Widened, 761 true /* HasGuards */, true /* UsePostIncrementRanges */); 762 if (!WidePhi) 763 return false; 764 LLVM_DEBUG(dbgs() << "Created wide phi: "; WidePhi->dump()); 765 LLVM_DEBUG(dbgs() << "Deleting old phi: "; WideIV.NarrowIV->dump()); 766 Deleted = RecursivelyDeleteDeadPHINode(WideIV.NarrowIV); 767 return true; 768 }; 769 770 bool Deleted; 771 if (!CreateWideIV({FI.InnerInductionPHI, MaxLegalType, false}, Deleted)) 772 return false; 773 // Add the narrow phi to list, so that it will be adjusted later when the 774 // the transformation is performed. 775 if (!Deleted) 776 FI.InnerPHIsToTransform.insert(FI.InnerInductionPHI); 777 778 if (!CreateWideIV({FI.OuterInductionPHI, MaxLegalType, false}, Deleted)) 779 return false; 780 781 assert(Widened && "Widened IV expected"); 782 FI.Widened = true; 783 784 // Save the old/narrow induction phis, which we need to ignore in CheckPHIs. 785 FI.NarrowInnerInductionPHI = FI.InnerInductionPHI; 786 FI.NarrowOuterInductionPHI = FI.OuterInductionPHI; 787 788 // After widening, rediscover all the loop components. 789 return CanFlattenLoopPair(FI, DT, LI, SE, AC, TTI); 790 } 791 792 static bool FlattenLoopPair(FlattenInfo &FI, DominatorTree *DT, LoopInfo *LI, 793 ScalarEvolution *SE, AssumptionCache *AC, 794 const TargetTransformInfo *TTI, LPMUpdater *U, 795 MemorySSAUpdater *MSSAU) { 796 LLVM_DEBUG( 797 dbgs() << "Loop flattening running on outer loop " 798 << FI.OuterLoop->getHeader()->getName() << " and inner loop " 799 << FI.InnerLoop->getHeader()->getName() << " in " 800 << FI.OuterLoop->getHeader()->getParent()->getName() << "\n"); 801 802 if (!CanFlattenLoopPair(FI, DT, LI, SE, AC, TTI)) 803 return false; 804 805 // Check if we can widen the induction variables to avoid overflow checks. 806 bool CanFlatten = CanWidenIV(FI, DT, LI, SE, AC, TTI); 807 808 // It can happen that after widening of the IV, flattening may not be 809 // possible/happening, e.g. when it is deemed unprofitable. So bail here if 810 // that is the case. 811 // TODO: IV widening without performing the actual flattening transformation 812 // is not ideal. While this codegen change should not matter much, it is an 813 // unnecessary change which is better to avoid. It's unlikely this happens 814 // often, because if it's unprofitibale after widening, it should be 815 // unprofitabe before widening as checked in the first round of checks. But 816 // 'RepeatedInstructionThreshold' is set to only 2, which can probably be 817 // relaxed. Because this is making a code change (the IV widening, but not 818 // the flattening), we return true here. 819 if (FI.Widened && !CanFlatten) 820 return true; 821 822 // If we have widened and can perform the transformation, do that here. 823 if (CanFlatten) 824 return DoFlattenLoopPair(FI, DT, LI, SE, AC, TTI, U, MSSAU); 825 826 // Otherwise, if we haven't widened the IV, check if the new iteration 827 // variable might overflow. In this case, we need to version the loop, and 828 // select the original version at runtime if the iteration space is too 829 // large. 830 // TODO: We currently don't version the loop. 831 OverflowResult OR = checkOverflow(FI, DT, AC); 832 if (OR == OverflowResult::AlwaysOverflowsHigh || 833 OR == OverflowResult::AlwaysOverflowsLow) { 834 LLVM_DEBUG(dbgs() << "Multiply would always overflow, so not profitable\n"); 835 return false; 836 } else if (OR == OverflowResult::MayOverflow) { 837 LLVM_DEBUG(dbgs() << "Multiply might overflow, not flattening\n"); 838 return false; 839 } 840 841 LLVM_DEBUG(dbgs() << "Multiply cannot overflow, modifying loop in-place\n"); 842 return DoFlattenLoopPair(FI, DT, LI, SE, AC, TTI, U, MSSAU); 843 } 844 845 bool Flatten(LoopNest &LN, DominatorTree *DT, LoopInfo *LI, ScalarEvolution *SE, 846 AssumptionCache *AC, TargetTransformInfo *TTI, LPMUpdater *U, 847 MemorySSAUpdater *MSSAU) { 848 bool Changed = false; 849 for (Loop *InnerLoop : LN.getLoops()) { 850 auto *OuterLoop = InnerLoop->getParentLoop(); 851 if (!OuterLoop) 852 continue; 853 FlattenInfo FI(OuterLoop, InnerLoop); 854 Changed |= FlattenLoopPair(FI, DT, LI, SE, AC, TTI, U, MSSAU); 855 } 856 return Changed; 857 } 858 859 PreservedAnalyses LoopFlattenPass::run(LoopNest &LN, LoopAnalysisManager &LAM, 860 LoopStandardAnalysisResults &AR, 861 LPMUpdater &U) { 862 863 bool Changed = false; 864 865 Optional<MemorySSAUpdater> MSSAU; 866 if (AR.MSSA) { 867 MSSAU = MemorySSAUpdater(AR.MSSA); 868 if (VerifyMemorySSA) 869 AR.MSSA->verifyMemorySSA(); 870 } 871 872 // The loop flattening pass requires loops to be 873 // in simplified form, and also needs LCSSA. Running 874 // this pass will simplify all loops that contain inner loops, 875 // regardless of whether anything ends up being flattened. 876 Changed |= Flatten(LN, &AR.DT, &AR.LI, &AR.SE, &AR.AC, &AR.TTI, &U, 877 MSSAU.hasValue() ? MSSAU.getPointer() : nullptr); 878 879 if (!Changed) 880 return PreservedAnalyses::all(); 881 882 if (AR.MSSA && VerifyMemorySSA) 883 AR.MSSA->verifyMemorySSA(); 884 885 auto PA = getLoopPassPreservedAnalyses(); 886 if (AR.MSSA) 887 PA.preserve<MemorySSAAnalysis>(); 888 return PA; 889 } 890 891 namespace { 892 class LoopFlattenLegacyPass : public FunctionPass { 893 public: 894 static char ID; // Pass ID, replacement for typeid 895 LoopFlattenLegacyPass() : FunctionPass(ID) { 896 initializeLoopFlattenLegacyPassPass(*PassRegistry::getPassRegistry()); 897 } 898 899 // Possibly flatten loop L into its child. 900 bool runOnFunction(Function &F) override; 901 902 void getAnalysisUsage(AnalysisUsage &AU) const override { 903 getLoopAnalysisUsage(AU); 904 AU.addRequired<TargetTransformInfoWrapperPass>(); 905 AU.addPreserved<TargetTransformInfoWrapperPass>(); 906 AU.addRequired<AssumptionCacheTracker>(); 907 AU.addPreserved<AssumptionCacheTracker>(); 908 AU.addPreserved<MemorySSAWrapperPass>(); 909 } 910 }; 911 } // namespace 912 913 char LoopFlattenLegacyPass::ID = 0; 914 INITIALIZE_PASS_BEGIN(LoopFlattenLegacyPass, "loop-flatten", "Flattens loops", 915 false, false) 916 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 917 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 918 INITIALIZE_PASS_END(LoopFlattenLegacyPass, "loop-flatten", "Flattens loops", 919 false, false) 920 921 FunctionPass *llvm::createLoopFlattenPass() { 922 return new LoopFlattenLegacyPass(); 923 } 924 925 bool LoopFlattenLegacyPass::runOnFunction(Function &F) { 926 ScalarEvolution *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE(); 927 LoopInfo *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); 928 auto *DTWP = getAnalysisIfAvailable<DominatorTreeWrapperPass>(); 929 DominatorTree *DT = DTWP ? &DTWP->getDomTree() : nullptr; 930 auto &TTIP = getAnalysis<TargetTransformInfoWrapperPass>(); 931 auto *TTI = &TTIP.getTTI(F); 932 auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); 933 auto *MSSA = getAnalysisIfAvailable<MemorySSAWrapperPass>(); 934 935 Optional<MemorySSAUpdater> MSSAU; 936 if (MSSA) 937 MSSAU = MemorySSAUpdater(&MSSA->getMSSA()); 938 939 bool Changed = false; 940 for (Loop *L : *LI) { 941 auto LN = LoopNest::getLoopNest(*L, *SE); 942 Changed |= Flatten(*LN, DT, LI, SE, AC, TTI, nullptr, 943 MSSAU.hasValue() ? MSSAU.getPointer() : nullptr); 944 } 945 return Changed; 946 } 947