1 //===- IndVarSimplify.cpp - Induction Variable Elimination ----------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This transformation analyzes and transforms the induction variables (and 11 // computations derived from them) into simpler forms suitable for subsequent 12 // analysis and transformation. 13 // 14 // If the trip count of a loop is computable, this pass also makes the following 15 // changes: 16 // 1. The exit condition for the loop is canonicalized to compare the 17 // induction value against the exit value. This turns loops like: 18 // 'for (i = 7; i*i < 1000; ++i)' into 'for (i = 0; i != 25; ++i)' 19 // 2. Any use outside of the loop of an expression derived from the indvar 20 // is changed to compute the derived value outside of the loop, eliminating 21 // the dependence on the exit value of the induction variable. If the only 22 // purpose of the loop is to compute the exit value of some derived 23 // expression, this transformation will make the loop dead. 24 // 25 //===----------------------------------------------------------------------===// 26 27 #include "llvm/Transforms/Scalar.h" 28 #include "llvm/ADT/DenseMap.h" 29 #include "llvm/ADT/SmallVector.h" 30 #include "llvm/ADT/Statistic.h" 31 #include "llvm/Analysis/GlobalsModRef.h" 32 #include "llvm/Analysis/LoopInfo.h" 33 #include "llvm/Analysis/LoopPass.h" 34 #include "llvm/Analysis/ScalarEvolutionExpander.h" 35 #include "llvm/Analysis/ScalarEvolutionAliasAnalysis.h" 36 #include "llvm/Analysis/TargetLibraryInfo.h" 37 #include "llvm/Analysis/TargetTransformInfo.h" 38 #include "llvm/IR/BasicBlock.h" 39 #include "llvm/IR/CFG.h" 40 #include "llvm/IR/Constants.h" 41 #include "llvm/IR/DataLayout.h" 42 #include "llvm/IR/Dominators.h" 43 #include "llvm/IR/Instructions.h" 44 #include "llvm/IR/IntrinsicInst.h" 45 #include "llvm/IR/LLVMContext.h" 46 #include "llvm/IR/PatternMatch.h" 47 #include "llvm/IR/Type.h" 48 #include "llvm/Support/CommandLine.h" 49 #include "llvm/Support/Debug.h" 50 #include "llvm/Support/raw_ostream.h" 51 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 52 #include "llvm/Transforms/Utils/Local.h" 53 #include "llvm/Transforms/Utils/LoopUtils.h" 54 #include "llvm/Transforms/Utils/SimplifyIndVar.h" 55 using namespace llvm; 56 57 #define DEBUG_TYPE "indvars" 58 59 STATISTIC(NumWidened , "Number of indvars widened"); 60 STATISTIC(NumReplaced , "Number of exit values replaced"); 61 STATISTIC(NumLFTR , "Number of loop exit tests replaced"); 62 STATISTIC(NumElimExt , "Number of IV sign/zero extends eliminated"); 63 STATISTIC(NumElimIV , "Number of congruent IVs eliminated"); 64 65 // Trip count verification can be enabled by default under NDEBUG if we 66 // implement a strong expression equivalence checker in SCEV. Until then, we 67 // use the verify-indvars flag, which may assert in some cases. 68 static cl::opt<bool> VerifyIndvars( 69 "verify-indvars", cl::Hidden, 70 cl::desc("Verify the ScalarEvolution result after running indvars")); 71 72 static cl::opt<bool> ReduceLiveIVs("liv-reduce", cl::Hidden, 73 cl::desc("Reduce live induction variables.")); 74 75 enum ReplaceExitVal { NeverRepl, OnlyCheapRepl, AlwaysRepl }; 76 77 static cl::opt<ReplaceExitVal> ReplaceExitValue( 78 "replexitval", cl::Hidden, cl::init(OnlyCheapRepl), 79 cl::desc("Choose the strategy to replace exit value in IndVarSimplify"), 80 cl::values(clEnumValN(NeverRepl, "never", "never replace exit value"), 81 clEnumValN(OnlyCheapRepl, "cheap", 82 "only replace exit value when the cost is cheap"), 83 clEnumValN(AlwaysRepl, "always", 84 "always replace exit value whenever possible"), 85 clEnumValEnd)); 86 87 namespace { 88 struct RewritePhi; 89 90 class IndVarSimplify : public LoopPass { 91 LoopInfo *LI; 92 ScalarEvolution *SE; 93 DominatorTree *DT; 94 TargetLibraryInfo *TLI; 95 const TargetTransformInfo *TTI; 96 97 SmallVector<WeakVH, 16> DeadInsts; 98 bool Changed; 99 public: 100 101 static char ID; // Pass identification, replacement for typeid 102 IndVarSimplify() 103 : LoopPass(ID), LI(nullptr), SE(nullptr), DT(nullptr), Changed(false) { 104 initializeIndVarSimplifyPass(*PassRegistry::getPassRegistry()); 105 } 106 107 bool runOnLoop(Loop *L, LPPassManager &LPM) override; 108 109 void getAnalysisUsage(AnalysisUsage &AU) const override { 110 AU.setPreservesCFG(); 111 getLoopAnalysisUsage(AU); 112 } 113 114 private: 115 void releaseMemory() override { 116 DeadInsts.clear(); 117 } 118 119 bool isValidRewrite(Value *FromVal, Value *ToVal); 120 121 void handleFloatingPointIV(Loop *L, PHINode *PH); 122 void rewriteNonIntegerIVs(Loop *L); 123 124 void simplifyAndExtend(Loop *L, SCEVExpander &Rewriter, LoopInfo *LI); 125 126 bool canLoopBeDeleted(Loop *L, SmallVector<RewritePhi, 8> &RewritePhiSet); 127 void rewriteLoopExitValues(Loop *L, SCEVExpander &Rewriter); 128 void rewriteFirstIterationLoopExitValues(Loop *L); 129 130 Value *linearFunctionTestReplace(Loop *L, const SCEV *BackedgeTakenCount, 131 PHINode *IndVar, SCEVExpander &Rewriter); 132 133 void sinkUnusedInvariants(Loop *L); 134 135 Value *expandSCEVIfNeeded(SCEVExpander &Rewriter, const SCEV *S, Loop *L, 136 Instruction *InsertPt, Type *Ty); 137 }; 138 } 139 140 char IndVarSimplify::ID = 0; 141 INITIALIZE_PASS_BEGIN(IndVarSimplify, "indvars", 142 "Induction Variable Simplification", false, false) 143 INITIALIZE_PASS_DEPENDENCY(LoopPass) 144 INITIALIZE_PASS_END(IndVarSimplify, "indvars", 145 "Induction Variable Simplification", false, false) 146 147 Pass *llvm::createIndVarSimplifyPass() { 148 return new IndVarSimplify(); 149 } 150 151 /// Return true if the SCEV expansion generated by the rewriter can replace the 152 /// original value. SCEV guarantees that it produces the same value, but the way 153 /// it is produced may be illegal IR. Ideally, this function will only be 154 /// called for verification. 155 bool IndVarSimplify::isValidRewrite(Value *FromVal, Value *ToVal) { 156 // If an SCEV expression subsumed multiple pointers, its expansion could 157 // reassociate the GEP changing the base pointer. This is illegal because the 158 // final address produced by a GEP chain must be inbounds relative to its 159 // underlying object. Otherwise basic alias analysis, among other things, 160 // could fail in a dangerous way. Ultimately, SCEV will be improved to avoid 161 // producing an expression involving multiple pointers. Until then, we must 162 // bail out here. 163 // 164 // Retrieve the pointer operand of the GEP. Don't use GetUnderlyingObject 165 // because it understands lcssa phis while SCEV does not. 166 Value *FromPtr = FromVal; 167 Value *ToPtr = ToVal; 168 if (auto *GEP = dyn_cast<GEPOperator>(FromVal)) { 169 FromPtr = GEP->getPointerOperand(); 170 } 171 if (auto *GEP = dyn_cast<GEPOperator>(ToVal)) { 172 ToPtr = GEP->getPointerOperand(); 173 } 174 if (FromPtr != FromVal || ToPtr != ToVal) { 175 // Quickly check the common case 176 if (FromPtr == ToPtr) 177 return true; 178 179 // SCEV may have rewritten an expression that produces the GEP's pointer 180 // operand. That's ok as long as the pointer operand has the same base 181 // pointer. Unlike GetUnderlyingObject(), getPointerBase() will find the 182 // base of a recurrence. This handles the case in which SCEV expansion 183 // converts a pointer type recurrence into a nonrecurrent pointer base 184 // indexed by an integer recurrence. 185 186 // If the GEP base pointer is a vector of pointers, abort. 187 if (!FromPtr->getType()->isPointerTy() || !ToPtr->getType()->isPointerTy()) 188 return false; 189 190 const SCEV *FromBase = SE->getPointerBase(SE->getSCEV(FromPtr)); 191 const SCEV *ToBase = SE->getPointerBase(SE->getSCEV(ToPtr)); 192 if (FromBase == ToBase) 193 return true; 194 195 DEBUG(dbgs() << "INDVARS: GEP rewrite bail out " 196 << *FromBase << " != " << *ToBase << "\n"); 197 198 return false; 199 } 200 return true; 201 } 202 203 /// Determine the insertion point for this user. By default, insert immediately 204 /// before the user. SCEVExpander or LICM will hoist loop invariants out of the 205 /// loop. For PHI nodes, there may be multiple uses, so compute the nearest 206 /// common dominator for the incoming blocks. 207 static Instruction *getInsertPointForUses(Instruction *User, Value *Def, 208 DominatorTree *DT, LoopInfo *LI) { 209 PHINode *PHI = dyn_cast<PHINode>(User); 210 if (!PHI) 211 return User; 212 213 Instruction *InsertPt = nullptr; 214 for (unsigned i = 0, e = PHI->getNumIncomingValues(); i != e; ++i) { 215 if (PHI->getIncomingValue(i) != Def) 216 continue; 217 218 BasicBlock *InsertBB = PHI->getIncomingBlock(i); 219 if (!InsertPt) { 220 InsertPt = InsertBB->getTerminator(); 221 continue; 222 } 223 InsertBB = DT->findNearestCommonDominator(InsertPt->getParent(), InsertBB); 224 InsertPt = InsertBB->getTerminator(); 225 } 226 assert(InsertPt && "Missing phi operand"); 227 228 auto *DefI = dyn_cast<Instruction>(Def); 229 if (!DefI) 230 return InsertPt; 231 232 assert(DT->dominates(DefI, InsertPt) && "def does not dominate all uses"); 233 234 auto *L = LI->getLoopFor(DefI->getParent()); 235 assert(!L || L->contains(LI->getLoopFor(InsertPt->getParent()))); 236 237 for (auto *DTN = (*DT)[InsertPt->getParent()]; DTN; DTN = DTN->getIDom()) 238 if (LI->getLoopFor(DTN->getBlock()) == L) 239 return DTN->getBlock()->getTerminator(); 240 241 llvm_unreachable("DefI dominates InsertPt!"); 242 } 243 244 //===----------------------------------------------------------------------===// 245 // rewriteNonIntegerIVs and helpers. Prefer integer IVs. 246 //===----------------------------------------------------------------------===// 247 248 /// Convert APF to an integer, if possible. 249 static bool ConvertToSInt(const APFloat &APF, int64_t &IntVal) { 250 bool isExact = false; 251 // See if we can convert this to an int64_t 252 uint64_t UIntVal; 253 if (APF.convertToInteger(&UIntVal, 64, true, APFloat::rmTowardZero, 254 &isExact) != APFloat::opOK || !isExact) 255 return false; 256 IntVal = UIntVal; 257 return true; 258 } 259 260 /// If the loop has floating induction variable then insert corresponding 261 /// integer induction variable if possible. 262 /// For example, 263 /// for(double i = 0; i < 10000; ++i) 264 /// bar(i) 265 /// is converted into 266 /// for(int i = 0; i < 10000; ++i) 267 /// bar((double)i); 268 /// 269 void IndVarSimplify::handleFloatingPointIV(Loop *L, PHINode *PN) { 270 unsigned IncomingEdge = L->contains(PN->getIncomingBlock(0)); 271 unsigned BackEdge = IncomingEdge^1; 272 273 // Check incoming value. 274 auto *InitValueVal = dyn_cast<ConstantFP>(PN->getIncomingValue(IncomingEdge)); 275 276 int64_t InitValue; 277 if (!InitValueVal || !ConvertToSInt(InitValueVal->getValueAPF(), InitValue)) 278 return; 279 280 // Check IV increment. Reject this PN if increment operation is not 281 // an add or increment value can not be represented by an integer. 282 auto *Incr = dyn_cast<BinaryOperator>(PN->getIncomingValue(BackEdge)); 283 if (Incr == nullptr || Incr->getOpcode() != Instruction::FAdd) return; 284 285 // If this is not an add of the PHI with a constantfp, or if the constant fp 286 // is not an integer, bail out. 287 ConstantFP *IncValueVal = dyn_cast<ConstantFP>(Incr->getOperand(1)); 288 int64_t IncValue; 289 if (IncValueVal == nullptr || Incr->getOperand(0) != PN || 290 !ConvertToSInt(IncValueVal->getValueAPF(), IncValue)) 291 return; 292 293 // Check Incr uses. One user is PN and the other user is an exit condition 294 // used by the conditional terminator. 295 Value::user_iterator IncrUse = Incr->user_begin(); 296 Instruction *U1 = cast<Instruction>(*IncrUse++); 297 if (IncrUse == Incr->user_end()) return; 298 Instruction *U2 = cast<Instruction>(*IncrUse++); 299 if (IncrUse != Incr->user_end()) return; 300 301 // Find exit condition, which is an fcmp. If it doesn't exist, or if it isn't 302 // only used by a branch, we can't transform it. 303 FCmpInst *Compare = dyn_cast<FCmpInst>(U1); 304 if (!Compare) 305 Compare = dyn_cast<FCmpInst>(U2); 306 if (!Compare || !Compare->hasOneUse() || 307 !isa<BranchInst>(Compare->user_back())) 308 return; 309 310 BranchInst *TheBr = cast<BranchInst>(Compare->user_back()); 311 312 // We need to verify that the branch actually controls the iteration count 313 // of the loop. If not, the new IV can overflow and no one will notice. 314 // The branch block must be in the loop and one of the successors must be out 315 // of the loop. 316 assert(TheBr->isConditional() && "Can't use fcmp if not conditional"); 317 if (!L->contains(TheBr->getParent()) || 318 (L->contains(TheBr->getSuccessor(0)) && 319 L->contains(TheBr->getSuccessor(1)))) 320 return; 321 322 323 // If it isn't a comparison with an integer-as-fp (the exit value), we can't 324 // transform it. 325 ConstantFP *ExitValueVal = dyn_cast<ConstantFP>(Compare->getOperand(1)); 326 int64_t ExitValue; 327 if (ExitValueVal == nullptr || 328 !ConvertToSInt(ExitValueVal->getValueAPF(), ExitValue)) 329 return; 330 331 // Find new predicate for integer comparison. 332 CmpInst::Predicate NewPred = CmpInst::BAD_ICMP_PREDICATE; 333 switch (Compare->getPredicate()) { 334 default: return; // Unknown comparison. 335 case CmpInst::FCMP_OEQ: 336 case CmpInst::FCMP_UEQ: NewPred = CmpInst::ICMP_EQ; break; 337 case CmpInst::FCMP_ONE: 338 case CmpInst::FCMP_UNE: NewPred = CmpInst::ICMP_NE; break; 339 case CmpInst::FCMP_OGT: 340 case CmpInst::FCMP_UGT: NewPred = CmpInst::ICMP_SGT; break; 341 case CmpInst::FCMP_OGE: 342 case CmpInst::FCMP_UGE: NewPred = CmpInst::ICMP_SGE; break; 343 case CmpInst::FCMP_OLT: 344 case CmpInst::FCMP_ULT: NewPred = CmpInst::ICMP_SLT; break; 345 case CmpInst::FCMP_OLE: 346 case CmpInst::FCMP_ULE: NewPred = CmpInst::ICMP_SLE; break; 347 } 348 349 // We convert the floating point induction variable to a signed i32 value if 350 // we can. This is only safe if the comparison will not overflow in a way 351 // that won't be trapped by the integer equivalent operations. Check for this 352 // now. 353 // TODO: We could use i64 if it is native and the range requires it. 354 355 // The start/stride/exit values must all fit in signed i32. 356 if (!isInt<32>(InitValue) || !isInt<32>(IncValue) || !isInt<32>(ExitValue)) 357 return; 358 359 // If not actually striding (add x, 0.0), avoid touching the code. 360 if (IncValue == 0) 361 return; 362 363 // Positive and negative strides have different safety conditions. 364 if (IncValue > 0) { 365 // If we have a positive stride, we require the init to be less than the 366 // exit value. 367 if (InitValue >= ExitValue) 368 return; 369 370 uint32_t Range = uint32_t(ExitValue-InitValue); 371 // Check for infinite loop, either: 372 // while (i <= Exit) or until (i > Exit) 373 if (NewPred == CmpInst::ICMP_SLE || NewPred == CmpInst::ICMP_SGT) { 374 if (++Range == 0) return; // Range overflows. 375 } 376 377 unsigned Leftover = Range % uint32_t(IncValue); 378 379 // If this is an equality comparison, we require that the strided value 380 // exactly land on the exit value, otherwise the IV condition will wrap 381 // around and do things the fp IV wouldn't. 382 if ((NewPred == CmpInst::ICMP_EQ || NewPred == CmpInst::ICMP_NE) && 383 Leftover != 0) 384 return; 385 386 // If the stride would wrap around the i32 before exiting, we can't 387 // transform the IV. 388 if (Leftover != 0 && int32_t(ExitValue+IncValue) < ExitValue) 389 return; 390 391 } else { 392 // If we have a negative stride, we require the init to be greater than the 393 // exit value. 394 if (InitValue <= ExitValue) 395 return; 396 397 uint32_t Range = uint32_t(InitValue-ExitValue); 398 // Check for infinite loop, either: 399 // while (i >= Exit) or until (i < Exit) 400 if (NewPred == CmpInst::ICMP_SGE || NewPred == CmpInst::ICMP_SLT) { 401 if (++Range == 0) return; // Range overflows. 402 } 403 404 unsigned Leftover = Range % uint32_t(-IncValue); 405 406 // If this is an equality comparison, we require that the strided value 407 // exactly land on the exit value, otherwise the IV condition will wrap 408 // around and do things the fp IV wouldn't. 409 if ((NewPred == CmpInst::ICMP_EQ || NewPred == CmpInst::ICMP_NE) && 410 Leftover != 0) 411 return; 412 413 // If the stride would wrap around the i32 before exiting, we can't 414 // transform the IV. 415 if (Leftover != 0 && int32_t(ExitValue+IncValue) > ExitValue) 416 return; 417 } 418 419 IntegerType *Int32Ty = Type::getInt32Ty(PN->getContext()); 420 421 // Insert new integer induction variable. 422 PHINode *NewPHI = PHINode::Create(Int32Ty, 2, PN->getName()+".int", PN); 423 NewPHI->addIncoming(ConstantInt::get(Int32Ty, InitValue), 424 PN->getIncomingBlock(IncomingEdge)); 425 426 Value *NewAdd = 427 BinaryOperator::CreateAdd(NewPHI, ConstantInt::get(Int32Ty, IncValue), 428 Incr->getName()+".int", Incr); 429 NewPHI->addIncoming(NewAdd, PN->getIncomingBlock(BackEdge)); 430 431 ICmpInst *NewCompare = new ICmpInst(TheBr, NewPred, NewAdd, 432 ConstantInt::get(Int32Ty, ExitValue), 433 Compare->getName()); 434 435 // In the following deletions, PN may become dead and may be deleted. 436 // Use a WeakVH to observe whether this happens. 437 WeakVH WeakPH = PN; 438 439 // Delete the old floating point exit comparison. The branch starts using the 440 // new comparison. 441 NewCompare->takeName(Compare); 442 Compare->replaceAllUsesWith(NewCompare); 443 RecursivelyDeleteTriviallyDeadInstructions(Compare, TLI); 444 445 // Delete the old floating point increment. 446 Incr->replaceAllUsesWith(UndefValue::get(Incr->getType())); 447 RecursivelyDeleteTriviallyDeadInstructions(Incr, TLI); 448 449 // If the FP induction variable still has uses, this is because something else 450 // in the loop uses its value. In order to canonicalize the induction 451 // variable, we chose to eliminate the IV and rewrite it in terms of an 452 // int->fp cast. 453 // 454 // We give preference to sitofp over uitofp because it is faster on most 455 // platforms. 456 if (WeakPH) { 457 Value *Conv = new SIToFPInst(NewPHI, PN->getType(), "indvar.conv", 458 &*PN->getParent()->getFirstInsertionPt()); 459 PN->replaceAllUsesWith(Conv); 460 RecursivelyDeleteTriviallyDeadInstructions(PN, TLI); 461 } 462 Changed = true; 463 } 464 465 void IndVarSimplify::rewriteNonIntegerIVs(Loop *L) { 466 // First step. Check to see if there are any floating-point recurrences. 467 // If there are, change them into integer recurrences, permitting analysis by 468 // the SCEV routines. 469 // 470 BasicBlock *Header = L->getHeader(); 471 472 SmallVector<WeakVH, 8> PHIs; 473 for (BasicBlock::iterator I = Header->begin(); 474 PHINode *PN = dyn_cast<PHINode>(I); ++I) 475 PHIs.push_back(PN); 476 477 for (unsigned i = 0, e = PHIs.size(); i != e; ++i) 478 if (PHINode *PN = dyn_cast_or_null<PHINode>(&*PHIs[i])) 479 handleFloatingPointIV(L, PN); 480 481 // If the loop previously had floating-point IV, ScalarEvolution 482 // may not have been able to compute a trip count. Now that we've done some 483 // re-writing, the trip count may be computable. 484 if (Changed) 485 SE->forgetLoop(L); 486 } 487 488 namespace { 489 // Collect information about PHI nodes which can be transformed in 490 // rewriteLoopExitValues. 491 struct RewritePhi { 492 PHINode *PN; 493 unsigned Ith; // Ith incoming value. 494 Value *Val; // Exit value after expansion. 495 bool HighCost; // High Cost when expansion. 496 497 RewritePhi(PHINode *P, unsigned I, Value *V, bool H) 498 : PN(P), Ith(I), Val(V), HighCost(H) {} 499 }; 500 } 501 502 Value *IndVarSimplify::expandSCEVIfNeeded(SCEVExpander &Rewriter, const SCEV *S, 503 Loop *L, Instruction *InsertPt, 504 Type *ResultTy) { 505 // Before expanding S into an expensive LLVM expression, see if we can use an 506 // already existing value as the expansion for S. 507 if (Value *ExistingValue = Rewriter.findExistingExpansion(S, InsertPt, L)) 508 if (ExistingValue->getType() == ResultTy) 509 return ExistingValue; 510 511 // We didn't find anything, fall back to using SCEVExpander. 512 return Rewriter.expandCodeFor(S, ResultTy, InsertPt); 513 } 514 515 //===----------------------------------------------------------------------===// 516 // rewriteLoopExitValues - Optimize IV users outside the loop. 517 // As a side effect, reduces the amount of IV processing within the loop. 518 //===----------------------------------------------------------------------===// 519 520 /// Check to see if this loop has a computable loop-invariant execution count. 521 /// If so, this means that we can compute the final value of any expressions 522 /// that are recurrent in the loop, and substitute the exit values from the loop 523 /// into any instructions outside of the loop that use the final values of the 524 /// current expressions. 525 /// 526 /// This is mostly redundant with the regular IndVarSimplify activities that 527 /// happen later, except that it's more powerful in some cases, because it's 528 /// able to brute-force evaluate arbitrary instructions as long as they have 529 /// constant operands at the beginning of the loop. 530 void IndVarSimplify::rewriteLoopExitValues(Loop *L, SCEVExpander &Rewriter) { 531 // Check a pre-condition. 532 assert(L->isRecursivelyLCSSAForm(*DT) && "Indvars did not preserve LCSSA!"); 533 534 SmallVector<BasicBlock*, 8> ExitBlocks; 535 L->getUniqueExitBlocks(ExitBlocks); 536 537 SmallVector<RewritePhi, 8> RewritePhiSet; 538 // Find all values that are computed inside the loop, but used outside of it. 539 // Because of LCSSA, these values will only occur in LCSSA PHI Nodes. Scan 540 // the exit blocks of the loop to find them. 541 for (BasicBlock *ExitBB : ExitBlocks) { 542 // If there are no PHI nodes in this exit block, then no values defined 543 // inside the loop are used on this path, skip it. 544 PHINode *PN = dyn_cast<PHINode>(ExitBB->begin()); 545 if (!PN) continue; 546 547 unsigned NumPreds = PN->getNumIncomingValues(); 548 549 // Iterate over all of the PHI nodes. 550 BasicBlock::iterator BBI = ExitBB->begin(); 551 while ((PN = dyn_cast<PHINode>(BBI++))) { 552 if (PN->use_empty()) 553 continue; // dead use, don't replace it 554 555 if (!SE->isSCEVable(PN->getType())) 556 continue; 557 558 // It's necessary to tell ScalarEvolution about this explicitly so that 559 // it can walk the def-use list and forget all SCEVs, as it may not be 560 // watching the PHI itself. Once the new exit value is in place, there 561 // may not be a def-use connection between the loop and every instruction 562 // which got a SCEVAddRecExpr for that loop. 563 SE->forgetValue(PN); 564 565 // Iterate over all of the values in all the PHI nodes. 566 for (unsigned i = 0; i != NumPreds; ++i) { 567 // If the value being merged in is not integer or is not defined 568 // in the loop, skip it. 569 Value *InVal = PN->getIncomingValue(i); 570 if (!isa<Instruction>(InVal)) 571 continue; 572 573 // If this pred is for a subloop, not L itself, skip it. 574 if (LI->getLoopFor(PN->getIncomingBlock(i)) != L) 575 continue; // The Block is in a subloop, skip it. 576 577 // Check that InVal is defined in the loop. 578 Instruction *Inst = cast<Instruction>(InVal); 579 if (!L->contains(Inst)) 580 continue; 581 582 // Okay, this instruction has a user outside of the current loop 583 // and varies predictably *inside* the loop. Evaluate the value it 584 // contains when the loop exits, if possible. 585 const SCEV *ExitValue = SE->getSCEVAtScope(Inst, L->getParentLoop()); 586 if (!SE->isLoopInvariant(ExitValue, L) || 587 !isSafeToExpand(ExitValue, *SE)) 588 continue; 589 590 // Computing the value outside of the loop brings no benefit if : 591 // - it is definitely used inside the loop in a way which can not be 592 // optimized away. 593 // - no use outside of the loop can take advantage of hoisting the 594 // computation out of the loop 595 if (ExitValue->getSCEVType()>=scMulExpr) { 596 unsigned NumHardInternalUses = 0; 597 unsigned NumSoftExternalUses = 0; 598 unsigned NumUses = 0; 599 for (auto IB = Inst->user_begin(), IE = Inst->user_end(); 600 IB != IE && NumUses <= 6; ++IB) { 601 Instruction *UseInstr = cast<Instruction>(*IB); 602 unsigned Opc = UseInstr->getOpcode(); 603 NumUses++; 604 if (L->contains(UseInstr)) { 605 if (Opc == Instruction::Call || Opc == Instruction::Ret) 606 NumHardInternalUses++; 607 } else { 608 if (Opc == Instruction::PHI) { 609 // Do not count the Phi as a use. LCSSA may have inserted 610 // plenty of trivial ones. 611 NumUses--; 612 for (auto PB = UseInstr->user_begin(), 613 PE = UseInstr->user_end(); 614 PB != PE && NumUses <= 6; ++PB, ++NumUses) { 615 unsigned PhiOpc = cast<Instruction>(*PB)->getOpcode(); 616 if (PhiOpc != Instruction::Call && PhiOpc != Instruction::Ret) 617 NumSoftExternalUses++; 618 } 619 continue; 620 } 621 if (Opc != Instruction::Call && Opc != Instruction::Ret) 622 NumSoftExternalUses++; 623 } 624 } 625 if (NumUses <= 6 && NumHardInternalUses && !NumSoftExternalUses) 626 continue; 627 } 628 629 bool HighCost = Rewriter.isHighCostExpansion(ExitValue, L, Inst); 630 Value *ExitVal = 631 expandSCEVIfNeeded(Rewriter, ExitValue, L, Inst, PN->getType()); 632 633 DEBUG(dbgs() << "INDVARS: RLEV: AfterLoopVal = " << *ExitVal << '\n' 634 << " LoopVal = " << *Inst << "\n"); 635 636 if (!isValidRewrite(Inst, ExitVal)) { 637 DeadInsts.push_back(ExitVal); 638 continue; 639 } 640 641 // Collect all the candidate PHINodes to be rewritten. 642 RewritePhiSet.emplace_back(PN, i, ExitVal, HighCost); 643 } 644 } 645 } 646 647 bool LoopCanBeDel = canLoopBeDeleted(L, RewritePhiSet); 648 649 // Transformation. 650 for (const RewritePhi &Phi : RewritePhiSet) { 651 PHINode *PN = Phi.PN; 652 Value *ExitVal = Phi.Val; 653 654 // Only do the rewrite when the ExitValue can be expanded cheaply. 655 // If LoopCanBeDel is true, rewrite exit value aggressively. 656 if (ReplaceExitValue == OnlyCheapRepl && !LoopCanBeDel && Phi.HighCost) { 657 DeadInsts.push_back(ExitVal); 658 continue; 659 } 660 661 Changed = true; 662 ++NumReplaced; 663 Instruction *Inst = cast<Instruction>(PN->getIncomingValue(Phi.Ith)); 664 PN->setIncomingValue(Phi.Ith, ExitVal); 665 666 // If this instruction is dead now, delete it. Don't do it now to avoid 667 // invalidating iterators. 668 if (isInstructionTriviallyDead(Inst, TLI)) 669 DeadInsts.push_back(Inst); 670 671 // Replace PN with ExitVal if that is legal and does not break LCSSA. 672 if (PN->getNumIncomingValues() == 1 && 673 LI->replacementPreservesLCSSAForm(PN, ExitVal)) { 674 PN->replaceAllUsesWith(ExitVal); 675 PN->eraseFromParent(); 676 } 677 } 678 679 // The insertion point instruction may have been deleted; clear it out 680 // so that the rewriter doesn't trip over it later. 681 Rewriter.clearInsertPoint(); 682 } 683 684 //===---------------------------------------------------------------------===// 685 // rewriteFirstIterationLoopExitValues: Rewrite loop exit values if we know 686 // they will exit at the first iteration. 687 //===---------------------------------------------------------------------===// 688 689 /// Check to see if this loop has loop invariant conditions which lead to loop 690 /// exits. If so, we know that if the exit path is taken, it is at the first 691 /// loop iteration. This lets us predict exit values of PHI nodes that live in 692 /// loop header. 693 void IndVarSimplify::rewriteFirstIterationLoopExitValues(Loop *L) { 694 // Verify the input to the pass is already in LCSSA form. 695 assert(L->isLCSSAForm(*DT)); 696 697 SmallVector<BasicBlock *, 8> ExitBlocks; 698 L->getUniqueExitBlocks(ExitBlocks); 699 auto *LoopHeader = L->getHeader(); 700 assert(LoopHeader && "Invalid loop"); 701 702 for (auto *ExitBB : ExitBlocks) { 703 BasicBlock::iterator BBI = ExitBB->begin(); 704 // If there are no more PHI nodes in this exit block, then no more 705 // values defined inside the loop are used on this path. 706 while (auto *PN = dyn_cast<PHINode>(BBI++)) { 707 for (unsigned IncomingValIdx = 0, E = PN->getNumIncomingValues(); 708 IncomingValIdx != E; ++IncomingValIdx) { 709 auto *IncomingBB = PN->getIncomingBlock(IncomingValIdx); 710 711 // We currently only support loop exits from loop header. If the 712 // incoming block is not loop header, we need to recursively check 713 // all conditions starting from loop header are loop invariants. 714 // Additional support might be added in the future. 715 if (IncomingBB != LoopHeader) 716 continue; 717 718 // Get condition that leads to the exit path. 719 auto *TermInst = IncomingBB->getTerminator(); 720 721 Value *Cond = nullptr; 722 if (auto *BI = dyn_cast<BranchInst>(TermInst)) { 723 // Must be a conditional branch, otherwise the block 724 // should not be in the loop. 725 Cond = BI->getCondition(); 726 } else if (auto *SI = dyn_cast<SwitchInst>(TermInst)) 727 Cond = SI->getCondition(); 728 else 729 continue; 730 731 if (!L->isLoopInvariant(Cond)) 732 continue; 733 734 auto *ExitVal = 735 dyn_cast<PHINode>(PN->getIncomingValue(IncomingValIdx)); 736 737 // Only deal with PHIs. 738 if (!ExitVal) 739 continue; 740 741 // If ExitVal is a PHI on the loop header, then we know its 742 // value along this exit because the exit can only be taken 743 // on the first iteration. 744 auto *LoopPreheader = L->getLoopPreheader(); 745 assert(LoopPreheader && "Invalid loop"); 746 int PreheaderIdx = ExitVal->getBasicBlockIndex(LoopPreheader); 747 if (PreheaderIdx != -1) { 748 assert(ExitVal->getParent() == LoopHeader && 749 "ExitVal must be in loop header"); 750 PN->setIncomingValue(IncomingValIdx, 751 ExitVal->getIncomingValue(PreheaderIdx)); 752 } 753 } 754 } 755 } 756 } 757 758 /// Check whether it is possible to delete the loop after rewriting exit 759 /// value. If it is possible, ignore ReplaceExitValue and do rewriting 760 /// aggressively. 761 bool IndVarSimplify::canLoopBeDeleted( 762 Loop *L, SmallVector<RewritePhi, 8> &RewritePhiSet) { 763 764 BasicBlock *Preheader = L->getLoopPreheader(); 765 // If there is no preheader, the loop will not be deleted. 766 if (!Preheader) 767 return false; 768 769 // In LoopDeletion pass Loop can be deleted when ExitingBlocks.size() > 1. 770 // We obviate multiple ExitingBlocks case for simplicity. 771 // TODO: If we see testcase with multiple ExitingBlocks can be deleted 772 // after exit value rewriting, we can enhance the logic here. 773 SmallVector<BasicBlock *, 4> ExitingBlocks; 774 L->getExitingBlocks(ExitingBlocks); 775 SmallVector<BasicBlock *, 8> ExitBlocks; 776 L->getUniqueExitBlocks(ExitBlocks); 777 if (ExitBlocks.size() > 1 || ExitingBlocks.size() > 1) 778 return false; 779 780 BasicBlock *ExitBlock = ExitBlocks[0]; 781 BasicBlock::iterator BI = ExitBlock->begin(); 782 while (PHINode *P = dyn_cast<PHINode>(BI)) { 783 Value *Incoming = P->getIncomingValueForBlock(ExitingBlocks[0]); 784 785 // If the Incoming value of P is found in RewritePhiSet, we know it 786 // could be rewritten to use a loop invariant value in transformation 787 // phase later. Skip it in the loop invariant check below. 788 bool found = false; 789 for (const RewritePhi &Phi : RewritePhiSet) { 790 unsigned i = Phi.Ith; 791 if (Phi.PN == P && (Phi.PN)->getIncomingValue(i) == Incoming) { 792 found = true; 793 break; 794 } 795 } 796 797 Instruction *I; 798 if (!found && (I = dyn_cast<Instruction>(Incoming))) 799 if (!L->hasLoopInvariantOperands(I)) 800 return false; 801 802 ++BI; 803 } 804 805 for (auto *BB : L->blocks()) 806 if (any_of(*BB, [](Instruction &I) { return I.mayHaveSideEffects(); })) 807 return false; 808 809 return true; 810 } 811 812 //===----------------------------------------------------------------------===// 813 // IV Widening - Extend the width of an IV to cover its widest uses. 814 //===----------------------------------------------------------------------===// 815 816 namespace { 817 // Collect information about induction variables that are used by sign/zero 818 // extend operations. This information is recorded by CollectExtend and provides 819 // the input to WidenIV. 820 struct WideIVInfo { 821 PHINode *NarrowIV = nullptr; 822 Type *WidestNativeType = nullptr; // Widest integer type created [sz]ext 823 bool IsSigned = false; // Was a sext user seen before a zext? 824 }; 825 } 826 827 /// Update information about the induction variable that is extended by this 828 /// sign or zero extend operation. This is used to determine the final width of 829 /// the IV before actually widening it. 830 static void visitIVCast(CastInst *Cast, WideIVInfo &WI, ScalarEvolution *SE, 831 const TargetTransformInfo *TTI) { 832 bool IsSigned = Cast->getOpcode() == Instruction::SExt; 833 if (!IsSigned && Cast->getOpcode() != Instruction::ZExt) 834 return; 835 836 Type *Ty = Cast->getType(); 837 uint64_t Width = SE->getTypeSizeInBits(Ty); 838 if (!Cast->getModule()->getDataLayout().isLegalInteger(Width)) 839 return; 840 841 // Cast is either an sext or zext up to this point. 842 // We should not widen an indvar if arithmetics on the wider indvar are more 843 // expensive than those on the narrower indvar. We check only the cost of ADD 844 // because at least an ADD is required to increment the induction variable. We 845 // could compute more comprehensively the cost of all instructions on the 846 // induction variable when necessary. 847 if (TTI && 848 TTI->getArithmeticInstrCost(Instruction::Add, Ty) > 849 TTI->getArithmeticInstrCost(Instruction::Add, 850 Cast->getOperand(0)->getType())) { 851 return; 852 } 853 854 if (!WI.WidestNativeType) { 855 WI.WidestNativeType = SE->getEffectiveSCEVType(Ty); 856 WI.IsSigned = IsSigned; 857 return; 858 } 859 860 // We extend the IV to satisfy the sign of its first user, arbitrarily. 861 if (WI.IsSigned != IsSigned) 862 return; 863 864 if (Width > SE->getTypeSizeInBits(WI.WidestNativeType)) 865 WI.WidestNativeType = SE->getEffectiveSCEVType(Ty); 866 } 867 868 namespace { 869 870 /// Record a link in the Narrow IV def-use chain along with the WideIV that 871 /// computes the same value as the Narrow IV def. This avoids caching Use* 872 /// pointers. 873 struct NarrowIVDefUse { 874 Instruction *NarrowDef = nullptr; 875 Instruction *NarrowUse = nullptr; 876 Instruction *WideDef = nullptr; 877 878 // True if the narrow def is never negative. Tracking this information lets 879 // us use a sign extension instead of a zero extension or vice versa, when 880 // profitable and legal. 881 bool NeverNegative = false; 882 883 NarrowIVDefUse(Instruction *ND, Instruction *NU, Instruction *WD, 884 bool NeverNegative) 885 : NarrowDef(ND), NarrowUse(NU), WideDef(WD), 886 NeverNegative(NeverNegative) {} 887 }; 888 889 /// The goal of this transform is to remove sign and zero extends without 890 /// creating any new induction variables. To do this, it creates a new phi of 891 /// the wider type and redirects all users, either removing extends or inserting 892 /// truncs whenever we stop propagating the type. 893 /// 894 class WidenIV { 895 // Parameters 896 PHINode *OrigPhi; 897 Type *WideType; 898 bool IsSigned; 899 900 // Context 901 LoopInfo *LI; 902 Loop *L; 903 ScalarEvolution *SE; 904 DominatorTree *DT; 905 906 // Result 907 PHINode *WidePhi; 908 Instruction *WideInc; 909 const SCEV *WideIncExpr; 910 SmallVectorImpl<WeakVH> &DeadInsts; 911 912 SmallPtrSet<Instruction*,16> Widened; 913 SmallVector<NarrowIVDefUse, 8> NarrowIVUsers; 914 915 public: 916 WidenIV(const WideIVInfo &WI, LoopInfo *LInfo, 917 ScalarEvolution *SEv, DominatorTree *DTree, 918 SmallVectorImpl<WeakVH> &DI) : 919 OrigPhi(WI.NarrowIV), 920 WideType(WI.WidestNativeType), 921 IsSigned(WI.IsSigned), 922 LI(LInfo), 923 L(LI->getLoopFor(OrigPhi->getParent())), 924 SE(SEv), 925 DT(DTree), 926 WidePhi(nullptr), 927 WideInc(nullptr), 928 WideIncExpr(nullptr), 929 DeadInsts(DI) { 930 assert(L->getHeader() == OrigPhi->getParent() && "Phi must be an IV"); 931 } 932 933 PHINode *createWideIV(SCEVExpander &Rewriter); 934 935 protected: 936 Value *createExtendInst(Value *NarrowOper, Type *WideType, bool IsSigned, 937 Instruction *Use); 938 939 Instruction *cloneIVUser(NarrowIVDefUse DU, const SCEVAddRecExpr *WideAR); 940 Instruction *cloneArithmeticIVUser(NarrowIVDefUse DU, 941 const SCEVAddRecExpr *WideAR); 942 Instruction *cloneBitwiseIVUser(NarrowIVDefUse DU); 943 944 const SCEVAddRecExpr *getWideRecurrence(Instruction *NarrowUse); 945 946 const SCEVAddRecExpr* getExtendedOperandRecurrence(NarrowIVDefUse DU); 947 948 const SCEV *getSCEVByOpCode(const SCEV *LHS, const SCEV *RHS, 949 unsigned OpCode) const; 950 951 Instruction *widenIVUse(NarrowIVDefUse DU, SCEVExpander &Rewriter); 952 953 bool widenLoopCompare(NarrowIVDefUse DU); 954 955 void pushNarrowIVUsers(Instruction *NarrowDef, Instruction *WideDef); 956 }; 957 } // anonymous namespace 958 959 /// Perform a quick domtree based check for loop invariance assuming that V is 960 /// used within the loop. LoopInfo::isLoopInvariant() seems gratuitous for this 961 /// purpose. 962 static bool isLoopInvariant(Value *V, const Loop *L, const DominatorTree *DT) { 963 Instruction *Inst = dyn_cast<Instruction>(V); 964 if (!Inst) 965 return true; 966 967 return DT->properlyDominates(Inst->getParent(), L->getHeader()); 968 } 969 970 Value *WidenIV::createExtendInst(Value *NarrowOper, Type *WideType, 971 bool IsSigned, Instruction *Use) { 972 // Set the debug location and conservative insertion point. 973 IRBuilder<> Builder(Use); 974 // Hoist the insertion point into loop preheaders as far as possible. 975 for (const Loop *L = LI->getLoopFor(Use->getParent()); 976 L && L->getLoopPreheader() && isLoopInvariant(NarrowOper, L, DT); 977 L = L->getParentLoop()) 978 Builder.SetInsertPoint(L->getLoopPreheader()->getTerminator()); 979 980 return IsSigned ? Builder.CreateSExt(NarrowOper, WideType) : 981 Builder.CreateZExt(NarrowOper, WideType); 982 } 983 984 /// Instantiate a wide operation to replace a narrow operation. This only needs 985 /// to handle operations that can evaluation to SCEVAddRec. It can safely return 986 /// 0 for any operation we decide not to clone. 987 Instruction *WidenIV::cloneIVUser(NarrowIVDefUse DU, 988 const SCEVAddRecExpr *WideAR) { 989 unsigned Opcode = DU.NarrowUse->getOpcode(); 990 switch (Opcode) { 991 default: 992 return nullptr; 993 case Instruction::Add: 994 case Instruction::Mul: 995 case Instruction::UDiv: 996 case Instruction::Sub: 997 return cloneArithmeticIVUser(DU, WideAR); 998 999 case Instruction::And: 1000 case Instruction::Or: 1001 case Instruction::Xor: 1002 case Instruction::Shl: 1003 case Instruction::LShr: 1004 case Instruction::AShr: 1005 return cloneBitwiseIVUser(DU); 1006 } 1007 } 1008 1009 Instruction *WidenIV::cloneBitwiseIVUser(NarrowIVDefUse DU) { 1010 Instruction *NarrowUse = DU.NarrowUse; 1011 Instruction *NarrowDef = DU.NarrowDef; 1012 Instruction *WideDef = DU.WideDef; 1013 1014 DEBUG(dbgs() << "Cloning bitwise IVUser: " << *NarrowUse << "\n"); 1015 1016 // Replace NarrowDef operands with WideDef. Otherwise, we don't know anything 1017 // about the narrow operand yet so must insert a [sz]ext. It is probably loop 1018 // invariant and will be folded or hoisted. If it actually comes from a 1019 // widened IV, it should be removed during a future call to widenIVUse. 1020 Value *LHS = (NarrowUse->getOperand(0) == NarrowDef) 1021 ? WideDef 1022 : createExtendInst(NarrowUse->getOperand(0), WideType, 1023 IsSigned, NarrowUse); 1024 Value *RHS = (NarrowUse->getOperand(1) == NarrowDef) 1025 ? WideDef 1026 : createExtendInst(NarrowUse->getOperand(1), WideType, 1027 IsSigned, NarrowUse); 1028 1029 auto *NarrowBO = cast<BinaryOperator>(NarrowUse); 1030 auto *WideBO = BinaryOperator::Create(NarrowBO->getOpcode(), LHS, RHS, 1031 NarrowBO->getName()); 1032 IRBuilder<> Builder(NarrowUse); 1033 Builder.Insert(WideBO); 1034 WideBO->copyIRFlags(NarrowBO); 1035 return WideBO; 1036 } 1037 1038 Instruction *WidenIV::cloneArithmeticIVUser(NarrowIVDefUse DU, 1039 const SCEVAddRecExpr *WideAR) { 1040 Instruction *NarrowUse = DU.NarrowUse; 1041 Instruction *NarrowDef = DU.NarrowDef; 1042 Instruction *WideDef = DU.WideDef; 1043 1044 DEBUG(dbgs() << "Cloning arithmetic IVUser: " << *NarrowUse << "\n"); 1045 1046 unsigned IVOpIdx = (NarrowUse->getOperand(0) == NarrowDef) ? 0 : 1; 1047 1048 // We're trying to find X such that 1049 // 1050 // Widen(NarrowDef `op` NonIVNarrowDef) == WideAR == WideDef `op.wide` X 1051 // 1052 // We guess two solutions to X, sext(NonIVNarrowDef) and zext(NonIVNarrowDef), 1053 // and check using SCEV if any of them are correct. 1054 1055 // Returns true if extending NonIVNarrowDef according to `SignExt` is a 1056 // correct solution to X. 1057 auto GuessNonIVOperand = [&](bool SignExt) { 1058 const SCEV *WideLHS; 1059 const SCEV *WideRHS; 1060 1061 auto GetExtend = [this, SignExt](const SCEV *S, Type *Ty) { 1062 if (SignExt) 1063 return SE->getSignExtendExpr(S, Ty); 1064 return SE->getZeroExtendExpr(S, Ty); 1065 }; 1066 1067 if (IVOpIdx == 0) { 1068 WideLHS = SE->getSCEV(WideDef); 1069 const SCEV *NarrowRHS = SE->getSCEV(NarrowUse->getOperand(1)); 1070 WideRHS = GetExtend(NarrowRHS, WideType); 1071 } else { 1072 const SCEV *NarrowLHS = SE->getSCEV(NarrowUse->getOperand(0)); 1073 WideLHS = GetExtend(NarrowLHS, WideType); 1074 WideRHS = SE->getSCEV(WideDef); 1075 } 1076 1077 // WideUse is "WideDef `op.wide` X" as described in the comment. 1078 const SCEV *WideUse = nullptr; 1079 1080 switch (NarrowUse->getOpcode()) { 1081 default: 1082 llvm_unreachable("No other possibility!"); 1083 1084 case Instruction::Add: 1085 WideUse = SE->getAddExpr(WideLHS, WideRHS); 1086 break; 1087 1088 case Instruction::Mul: 1089 WideUse = SE->getMulExpr(WideLHS, WideRHS); 1090 break; 1091 1092 case Instruction::UDiv: 1093 WideUse = SE->getUDivExpr(WideLHS, WideRHS); 1094 break; 1095 1096 case Instruction::Sub: 1097 WideUse = SE->getMinusSCEV(WideLHS, WideRHS); 1098 break; 1099 } 1100 1101 return WideUse == WideAR; 1102 }; 1103 1104 bool SignExtend = IsSigned; 1105 if (!GuessNonIVOperand(SignExtend)) { 1106 SignExtend = !SignExtend; 1107 if (!GuessNonIVOperand(SignExtend)) 1108 return nullptr; 1109 } 1110 1111 Value *LHS = (NarrowUse->getOperand(0) == NarrowDef) 1112 ? WideDef 1113 : createExtendInst(NarrowUse->getOperand(0), WideType, 1114 SignExtend, NarrowUse); 1115 Value *RHS = (NarrowUse->getOperand(1) == NarrowDef) 1116 ? WideDef 1117 : createExtendInst(NarrowUse->getOperand(1), WideType, 1118 SignExtend, NarrowUse); 1119 1120 auto *NarrowBO = cast<BinaryOperator>(NarrowUse); 1121 auto *WideBO = BinaryOperator::Create(NarrowBO->getOpcode(), LHS, RHS, 1122 NarrowBO->getName()); 1123 1124 IRBuilder<> Builder(NarrowUse); 1125 Builder.Insert(WideBO); 1126 WideBO->copyIRFlags(NarrowBO); 1127 return WideBO; 1128 } 1129 1130 const SCEV *WidenIV::getSCEVByOpCode(const SCEV *LHS, const SCEV *RHS, 1131 unsigned OpCode) const { 1132 if (OpCode == Instruction::Add) 1133 return SE->getAddExpr(LHS, RHS); 1134 if (OpCode == Instruction::Sub) 1135 return SE->getMinusSCEV(LHS, RHS); 1136 if (OpCode == Instruction::Mul) 1137 return SE->getMulExpr(LHS, RHS); 1138 1139 llvm_unreachable("Unsupported opcode."); 1140 } 1141 1142 /// No-wrap operations can transfer sign extension of their result to their 1143 /// operands. Generate the SCEV value for the widened operation without 1144 /// actually modifying the IR yet. If the expression after extending the 1145 /// operands is an AddRec for this loop, return it. 1146 const SCEVAddRecExpr* WidenIV::getExtendedOperandRecurrence(NarrowIVDefUse DU) { 1147 1148 // Handle the common case of add<nsw/nuw> 1149 const unsigned OpCode = DU.NarrowUse->getOpcode(); 1150 // Only Add/Sub/Mul instructions supported yet. 1151 if (OpCode != Instruction::Add && OpCode != Instruction::Sub && 1152 OpCode != Instruction::Mul) 1153 return nullptr; 1154 1155 // One operand (NarrowDef) has already been extended to WideDef. Now determine 1156 // if extending the other will lead to a recurrence. 1157 const unsigned ExtendOperIdx = 1158 DU.NarrowUse->getOperand(0) == DU.NarrowDef ? 1 : 0; 1159 assert(DU.NarrowUse->getOperand(1-ExtendOperIdx) == DU.NarrowDef && "bad DU"); 1160 1161 const SCEV *ExtendOperExpr = nullptr; 1162 const OverflowingBinaryOperator *OBO = 1163 cast<OverflowingBinaryOperator>(DU.NarrowUse); 1164 if (IsSigned && OBO->hasNoSignedWrap()) 1165 ExtendOperExpr = SE->getSignExtendExpr( 1166 SE->getSCEV(DU.NarrowUse->getOperand(ExtendOperIdx)), WideType); 1167 else if(!IsSigned && OBO->hasNoUnsignedWrap()) 1168 ExtendOperExpr = SE->getZeroExtendExpr( 1169 SE->getSCEV(DU.NarrowUse->getOperand(ExtendOperIdx)), WideType); 1170 else 1171 return nullptr; 1172 1173 // When creating this SCEV expr, don't apply the current operations NSW or NUW 1174 // flags. This instruction may be guarded by control flow that the no-wrap 1175 // behavior depends on. Non-control-equivalent instructions can be mapped to 1176 // the same SCEV expression, and it would be incorrect to transfer NSW/NUW 1177 // semantics to those operations. 1178 const SCEV *lhs = SE->getSCEV(DU.WideDef); 1179 const SCEV *rhs = ExtendOperExpr; 1180 1181 // Let's swap operands to the initial order for the case of non-commutative 1182 // operations, like SUB. See PR21014. 1183 if (ExtendOperIdx == 0) 1184 std::swap(lhs, rhs); 1185 const SCEVAddRecExpr *AddRec = 1186 dyn_cast<SCEVAddRecExpr>(getSCEVByOpCode(lhs, rhs, OpCode)); 1187 1188 if (!AddRec || AddRec->getLoop() != L) 1189 return nullptr; 1190 return AddRec; 1191 } 1192 1193 /// Is this instruction potentially interesting for further simplification after 1194 /// widening it's type? In other words, can the extend be safely hoisted out of 1195 /// the loop with SCEV reducing the value to a recurrence on the same loop. If 1196 /// so, return the sign or zero extended recurrence. Otherwise return NULL. 1197 const SCEVAddRecExpr *WidenIV::getWideRecurrence(Instruction *NarrowUse) { 1198 if (!SE->isSCEVable(NarrowUse->getType())) 1199 return nullptr; 1200 1201 const SCEV *NarrowExpr = SE->getSCEV(NarrowUse); 1202 if (SE->getTypeSizeInBits(NarrowExpr->getType()) 1203 >= SE->getTypeSizeInBits(WideType)) { 1204 // NarrowUse implicitly widens its operand. e.g. a gep with a narrow 1205 // index. So don't follow this use. 1206 return nullptr; 1207 } 1208 1209 const SCEV *WideExpr = IsSigned ? 1210 SE->getSignExtendExpr(NarrowExpr, WideType) : 1211 SE->getZeroExtendExpr(NarrowExpr, WideType); 1212 const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(WideExpr); 1213 if (!AddRec || AddRec->getLoop() != L) 1214 return nullptr; 1215 return AddRec; 1216 } 1217 1218 /// This IV user cannot be widen. Replace this use of the original narrow IV 1219 /// with a truncation of the new wide IV to isolate and eliminate the narrow IV. 1220 static void truncateIVUse(NarrowIVDefUse DU, DominatorTree *DT, LoopInfo *LI) { 1221 DEBUG(dbgs() << "INDVARS: Truncate IV " << *DU.WideDef 1222 << " for user " << *DU.NarrowUse << "\n"); 1223 IRBuilder<> Builder( 1224 getInsertPointForUses(DU.NarrowUse, DU.NarrowDef, DT, LI)); 1225 Value *Trunc = Builder.CreateTrunc(DU.WideDef, DU.NarrowDef->getType()); 1226 DU.NarrowUse->replaceUsesOfWith(DU.NarrowDef, Trunc); 1227 } 1228 1229 /// If the narrow use is a compare instruction, then widen the compare 1230 // (and possibly the other operand). The extend operation is hoisted into the 1231 // loop preheader as far as possible. 1232 bool WidenIV::widenLoopCompare(NarrowIVDefUse DU) { 1233 ICmpInst *Cmp = dyn_cast<ICmpInst>(DU.NarrowUse); 1234 if (!Cmp) 1235 return false; 1236 1237 // We can legally widen the comparison in the following two cases: 1238 // 1239 // - The signedness of the IV extension and comparison match 1240 // 1241 // - The narrow IV is always positive (and thus its sign extension is equal 1242 // to its zero extension). For instance, let's say we're zero extending 1243 // %narrow for the following use 1244 // 1245 // icmp slt i32 %narrow, %val ... (A) 1246 // 1247 // and %narrow is always positive. Then 1248 // 1249 // (A) == icmp slt i32 sext(%narrow), sext(%val) 1250 // == icmp slt i32 zext(%narrow), sext(%val) 1251 1252 if (!(DU.NeverNegative || IsSigned == Cmp->isSigned())) 1253 return false; 1254 1255 Value *Op = Cmp->getOperand(Cmp->getOperand(0) == DU.NarrowDef ? 1 : 0); 1256 unsigned CastWidth = SE->getTypeSizeInBits(Op->getType()); 1257 unsigned IVWidth = SE->getTypeSizeInBits(WideType); 1258 assert (CastWidth <= IVWidth && "Unexpected width while widening compare."); 1259 1260 // Widen the compare instruction. 1261 IRBuilder<> Builder( 1262 getInsertPointForUses(DU.NarrowUse, DU.NarrowDef, DT, LI)); 1263 DU.NarrowUse->replaceUsesOfWith(DU.NarrowDef, DU.WideDef); 1264 1265 // Widen the other operand of the compare, if necessary. 1266 if (CastWidth < IVWidth) { 1267 Value *ExtOp = createExtendInst(Op, WideType, Cmp->isSigned(), Cmp); 1268 DU.NarrowUse->replaceUsesOfWith(Op, ExtOp); 1269 } 1270 return true; 1271 } 1272 1273 /// Determine whether an individual user of the narrow IV can be widened. If so, 1274 /// return the wide clone of the user. 1275 Instruction *WidenIV::widenIVUse(NarrowIVDefUse DU, SCEVExpander &Rewriter) { 1276 1277 // Stop traversing the def-use chain at inner-loop phis or post-loop phis. 1278 if (PHINode *UsePhi = dyn_cast<PHINode>(DU.NarrowUse)) { 1279 if (LI->getLoopFor(UsePhi->getParent()) != L) { 1280 // For LCSSA phis, sink the truncate outside the loop. 1281 // After SimplifyCFG most loop exit targets have a single predecessor. 1282 // Otherwise fall back to a truncate within the loop. 1283 if (UsePhi->getNumOperands() != 1) 1284 truncateIVUse(DU, DT, LI); 1285 else { 1286 PHINode *WidePhi = 1287 PHINode::Create(DU.WideDef->getType(), 1, UsePhi->getName() + ".wide", 1288 UsePhi); 1289 WidePhi->addIncoming(DU.WideDef, UsePhi->getIncomingBlock(0)); 1290 IRBuilder<> Builder(&*WidePhi->getParent()->getFirstInsertionPt()); 1291 Value *Trunc = Builder.CreateTrunc(WidePhi, DU.NarrowDef->getType()); 1292 UsePhi->replaceAllUsesWith(Trunc); 1293 DeadInsts.emplace_back(UsePhi); 1294 DEBUG(dbgs() << "INDVARS: Widen lcssa phi " << *UsePhi 1295 << " to " << *WidePhi << "\n"); 1296 } 1297 return nullptr; 1298 } 1299 } 1300 // Our raison d'etre! Eliminate sign and zero extension. 1301 if (IsSigned ? isa<SExtInst>(DU.NarrowUse) : isa<ZExtInst>(DU.NarrowUse)) { 1302 Value *NewDef = DU.WideDef; 1303 if (DU.NarrowUse->getType() != WideType) { 1304 unsigned CastWidth = SE->getTypeSizeInBits(DU.NarrowUse->getType()); 1305 unsigned IVWidth = SE->getTypeSizeInBits(WideType); 1306 if (CastWidth < IVWidth) { 1307 // The cast isn't as wide as the IV, so insert a Trunc. 1308 IRBuilder<> Builder(DU.NarrowUse); 1309 NewDef = Builder.CreateTrunc(DU.WideDef, DU.NarrowUse->getType()); 1310 } 1311 else { 1312 // A wider extend was hidden behind a narrower one. This may induce 1313 // another round of IV widening in which the intermediate IV becomes 1314 // dead. It should be very rare. 1315 DEBUG(dbgs() << "INDVARS: New IV " << *WidePhi 1316 << " not wide enough to subsume " << *DU.NarrowUse << "\n"); 1317 DU.NarrowUse->replaceUsesOfWith(DU.NarrowDef, DU.WideDef); 1318 NewDef = DU.NarrowUse; 1319 } 1320 } 1321 if (NewDef != DU.NarrowUse) { 1322 DEBUG(dbgs() << "INDVARS: eliminating " << *DU.NarrowUse 1323 << " replaced by " << *DU.WideDef << "\n"); 1324 ++NumElimExt; 1325 DU.NarrowUse->replaceAllUsesWith(NewDef); 1326 DeadInsts.emplace_back(DU.NarrowUse); 1327 } 1328 // Now that the extend is gone, we want to expose it's uses for potential 1329 // further simplification. We don't need to directly inform SimplifyIVUsers 1330 // of the new users, because their parent IV will be processed later as a 1331 // new loop phi. If we preserved IVUsers analysis, we would also want to 1332 // push the uses of WideDef here. 1333 1334 // No further widening is needed. The deceased [sz]ext had done it for us. 1335 return nullptr; 1336 } 1337 1338 // Does this user itself evaluate to a recurrence after widening? 1339 const SCEVAddRecExpr *WideAddRec = getWideRecurrence(DU.NarrowUse); 1340 if (!WideAddRec) 1341 WideAddRec = getExtendedOperandRecurrence(DU); 1342 1343 if (!WideAddRec) { 1344 // If use is a loop condition, try to promote the condition instead of 1345 // truncating the IV first. 1346 if (widenLoopCompare(DU)) 1347 return nullptr; 1348 1349 // This user does not evaluate to a recurence after widening, so don't 1350 // follow it. Instead insert a Trunc to kill off the original use, 1351 // eventually isolating the original narrow IV so it can be removed. 1352 truncateIVUse(DU, DT, LI); 1353 return nullptr; 1354 } 1355 // Assume block terminators cannot evaluate to a recurrence. We can't to 1356 // insert a Trunc after a terminator if there happens to be a critical edge. 1357 assert(DU.NarrowUse != DU.NarrowUse->getParent()->getTerminator() && 1358 "SCEV is not expected to evaluate a block terminator"); 1359 1360 // Reuse the IV increment that SCEVExpander created as long as it dominates 1361 // NarrowUse. 1362 Instruction *WideUse = nullptr; 1363 if (WideAddRec == WideIncExpr 1364 && Rewriter.hoistIVInc(WideInc, DU.NarrowUse)) 1365 WideUse = WideInc; 1366 else { 1367 WideUse = cloneIVUser(DU, WideAddRec); 1368 if (!WideUse) 1369 return nullptr; 1370 } 1371 // Evaluation of WideAddRec ensured that the narrow expression could be 1372 // extended outside the loop without overflow. This suggests that the wide use 1373 // evaluates to the same expression as the extended narrow use, but doesn't 1374 // absolutely guarantee it. Hence the following failsafe check. In rare cases 1375 // where it fails, we simply throw away the newly created wide use. 1376 if (WideAddRec != SE->getSCEV(WideUse)) { 1377 DEBUG(dbgs() << "Wide use expression mismatch: " << *WideUse 1378 << ": " << *SE->getSCEV(WideUse) << " != " << *WideAddRec << "\n"); 1379 DeadInsts.emplace_back(WideUse); 1380 return nullptr; 1381 } 1382 1383 // Returning WideUse pushes it on the worklist. 1384 return WideUse; 1385 } 1386 1387 /// Add eligible users of NarrowDef to NarrowIVUsers. 1388 /// 1389 void WidenIV::pushNarrowIVUsers(Instruction *NarrowDef, Instruction *WideDef) { 1390 const SCEV *NarrowSCEV = SE->getSCEV(NarrowDef); 1391 bool NeverNegative = 1392 SE->isKnownPredicate(ICmpInst::ICMP_SGE, NarrowSCEV, 1393 SE->getConstant(NarrowSCEV->getType(), 0)); 1394 for (User *U : NarrowDef->users()) { 1395 Instruction *NarrowUser = cast<Instruction>(U); 1396 1397 // Handle data flow merges and bizarre phi cycles. 1398 if (!Widened.insert(NarrowUser).second) 1399 continue; 1400 1401 NarrowIVUsers.emplace_back(NarrowDef, NarrowUser, WideDef, NeverNegative); 1402 } 1403 } 1404 1405 /// Process a single induction variable. First use the SCEVExpander to create a 1406 /// wide induction variable that evaluates to the same recurrence as the 1407 /// original narrow IV. Then use a worklist to forward traverse the narrow IV's 1408 /// def-use chain. After widenIVUse has processed all interesting IV users, the 1409 /// narrow IV will be isolated for removal by DeleteDeadPHIs. 1410 /// 1411 /// It would be simpler to delete uses as they are processed, but we must avoid 1412 /// invalidating SCEV expressions. 1413 /// 1414 PHINode *WidenIV::createWideIV(SCEVExpander &Rewriter) { 1415 // Is this phi an induction variable? 1416 const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(SE->getSCEV(OrigPhi)); 1417 if (!AddRec) 1418 return nullptr; 1419 1420 // Widen the induction variable expression. 1421 const SCEV *WideIVExpr = IsSigned ? 1422 SE->getSignExtendExpr(AddRec, WideType) : 1423 SE->getZeroExtendExpr(AddRec, WideType); 1424 1425 assert(SE->getEffectiveSCEVType(WideIVExpr->getType()) == WideType && 1426 "Expect the new IV expression to preserve its type"); 1427 1428 // Can the IV be extended outside the loop without overflow? 1429 AddRec = dyn_cast<SCEVAddRecExpr>(WideIVExpr); 1430 if (!AddRec || AddRec->getLoop() != L) 1431 return nullptr; 1432 1433 // An AddRec must have loop-invariant operands. Since this AddRec is 1434 // materialized by a loop header phi, the expression cannot have any post-loop 1435 // operands, so they must dominate the loop header. 1436 assert(SE->properlyDominates(AddRec->getStart(), L->getHeader()) && 1437 SE->properlyDominates(AddRec->getStepRecurrence(*SE), L->getHeader()) 1438 && "Loop header phi recurrence inputs do not dominate the loop"); 1439 1440 // The rewriter provides a value for the desired IV expression. This may 1441 // either find an existing phi or materialize a new one. Either way, we 1442 // expect a well-formed cyclic phi-with-increments. i.e. any operand not part 1443 // of the phi-SCC dominates the loop entry. 1444 Instruction *InsertPt = &L->getHeader()->front(); 1445 WidePhi = cast<PHINode>(Rewriter.expandCodeFor(AddRec, WideType, InsertPt)); 1446 1447 // Remembering the WideIV increment generated by SCEVExpander allows 1448 // widenIVUse to reuse it when widening the narrow IV's increment. We don't 1449 // employ a general reuse mechanism because the call above is the only call to 1450 // SCEVExpander. Henceforth, we produce 1-to-1 narrow to wide uses. 1451 if (BasicBlock *LatchBlock = L->getLoopLatch()) { 1452 WideInc = 1453 cast<Instruction>(WidePhi->getIncomingValueForBlock(LatchBlock)); 1454 WideIncExpr = SE->getSCEV(WideInc); 1455 } 1456 1457 DEBUG(dbgs() << "Wide IV: " << *WidePhi << "\n"); 1458 ++NumWidened; 1459 1460 // Traverse the def-use chain using a worklist starting at the original IV. 1461 assert(Widened.empty() && NarrowIVUsers.empty() && "expect initial state" ); 1462 1463 Widened.insert(OrigPhi); 1464 pushNarrowIVUsers(OrigPhi, WidePhi); 1465 1466 while (!NarrowIVUsers.empty()) { 1467 NarrowIVDefUse DU = NarrowIVUsers.pop_back_val(); 1468 1469 // Process a def-use edge. This may replace the use, so don't hold a 1470 // use_iterator across it. 1471 Instruction *WideUse = widenIVUse(DU, Rewriter); 1472 1473 // Follow all def-use edges from the previous narrow use. 1474 if (WideUse) 1475 pushNarrowIVUsers(DU.NarrowUse, WideUse); 1476 1477 // widenIVUse may have removed the def-use edge. 1478 if (DU.NarrowDef->use_empty()) 1479 DeadInsts.emplace_back(DU.NarrowDef); 1480 } 1481 return WidePhi; 1482 } 1483 1484 //===----------------------------------------------------------------------===// 1485 // Live IV Reduction - Minimize IVs live across the loop. 1486 //===----------------------------------------------------------------------===// 1487 1488 1489 //===----------------------------------------------------------------------===// 1490 // Simplification of IV users based on SCEV evaluation. 1491 //===----------------------------------------------------------------------===// 1492 1493 namespace { 1494 class IndVarSimplifyVisitor : public IVVisitor { 1495 ScalarEvolution *SE; 1496 const TargetTransformInfo *TTI; 1497 PHINode *IVPhi; 1498 1499 public: 1500 WideIVInfo WI; 1501 1502 IndVarSimplifyVisitor(PHINode *IV, ScalarEvolution *SCEV, 1503 const TargetTransformInfo *TTI, 1504 const DominatorTree *DTree) 1505 : SE(SCEV), TTI(TTI), IVPhi(IV) { 1506 DT = DTree; 1507 WI.NarrowIV = IVPhi; 1508 if (ReduceLiveIVs) 1509 setSplitOverflowIntrinsics(); 1510 } 1511 1512 // Implement the interface used by simplifyUsersOfIV. 1513 void visitCast(CastInst *Cast) override { visitIVCast(Cast, WI, SE, TTI); } 1514 }; 1515 } 1516 1517 /// Iteratively perform simplification on a worklist of IV users. Each 1518 /// successive simplification may push more users which may themselves be 1519 /// candidates for simplification. 1520 /// 1521 /// Sign/Zero extend elimination is interleaved with IV simplification. 1522 /// 1523 void IndVarSimplify::simplifyAndExtend(Loop *L, 1524 SCEVExpander &Rewriter, 1525 LoopInfo *LI) { 1526 SmallVector<WideIVInfo, 8> WideIVs; 1527 1528 SmallVector<PHINode*, 8> LoopPhis; 1529 for (BasicBlock::iterator I = L->getHeader()->begin(); isa<PHINode>(I); ++I) { 1530 LoopPhis.push_back(cast<PHINode>(I)); 1531 } 1532 // Each round of simplification iterates through the SimplifyIVUsers worklist 1533 // for all current phis, then determines whether any IVs can be 1534 // widened. Widening adds new phis to LoopPhis, inducing another round of 1535 // simplification on the wide IVs. 1536 while (!LoopPhis.empty()) { 1537 // Evaluate as many IV expressions as possible before widening any IVs. This 1538 // forces SCEV to set no-wrap flags before evaluating sign/zero 1539 // extension. The first time SCEV attempts to normalize sign/zero extension, 1540 // the result becomes final. So for the most predictable results, we delay 1541 // evaluation of sign/zero extend evaluation until needed, and avoid running 1542 // other SCEV based analysis prior to simplifyAndExtend. 1543 do { 1544 PHINode *CurrIV = LoopPhis.pop_back_val(); 1545 1546 // Information about sign/zero extensions of CurrIV. 1547 IndVarSimplifyVisitor Visitor(CurrIV, SE, TTI, DT); 1548 1549 Changed |= simplifyUsersOfIV(CurrIV, SE, DT, LI, DeadInsts, &Visitor); 1550 1551 if (Visitor.WI.WidestNativeType) { 1552 WideIVs.push_back(Visitor.WI); 1553 } 1554 } while(!LoopPhis.empty()); 1555 1556 for (; !WideIVs.empty(); WideIVs.pop_back()) { 1557 WidenIV Widener(WideIVs.back(), LI, SE, DT, DeadInsts); 1558 if (PHINode *WidePhi = Widener.createWideIV(Rewriter)) { 1559 Changed = true; 1560 LoopPhis.push_back(WidePhi); 1561 } 1562 } 1563 } 1564 } 1565 1566 //===----------------------------------------------------------------------===// 1567 // linearFunctionTestReplace and its kin. Rewrite the loop exit condition. 1568 //===----------------------------------------------------------------------===// 1569 1570 /// Return true if this loop's backedge taken count expression can be safely and 1571 /// cheaply expanded into an instruction sequence that can be used by 1572 /// linearFunctionTestReplace. 1573 /// 1574 /// TODO: This fails for pointer-type loop counters with greater than one byte 1575 /// strides, consequently preventing LFTR from running. For the purpose of LFTR 1576 /// we could skip this check in the case that the LFTR loop counter (chosen by 1577 /// FindLoopCounter) is also pointer type. Instead, we could directly convert 1578 /// the loop test to an inequality test by checking the target data's alignment 1579 /// of element types (given that the initial pointer value originates from or is 1580 /// used by ABI constrained operation, as opposed to inttoptr/ptrtoint). 1581 /// However, we don't yet have a strong motivation for converting loop tests 1582 /// into inequality tests. 1583 static bool canExpandBackedgeTakenCount(Loop *L, ScalarEvolution *SE, 1584 SCEVExpander &Rewriter) { 1585 const SCEV *BackedgeTakenCount = SE->getBackedgeTakenCount(L); 1586 if (isa<SCEVCouldNotCompute>(BackedgeTakenCount) || 1587 BackedgeTakenCount->isZero()) 1588 return false; 1589 1590 if (!L->getExitingBlock()) 1591 return false; 1592 1593 // Can't rewrite non-branch yet. 1594 if (!isa<BranchInst>(L->getExitingBlock()->getTerminator())) 1595 return false; 1596 1597 if (Rewriter.isHighCostExpansion(BackedgeTakenCount, L)) 1598 return false; 1599 1600 return true; 1601 } 1602 1603 /// Return the loop header phi IFF IncV adds a loop invariant value to the phi. 1604 static PHINode *getLoopPhiForCounter(Value *IncV, Loop *L, DominatorTree *DT) { 1605 Instruction *IncI = dyn_cast<Instruction>(IncV); 1606 if (!IncI) 1607 return nullptr; 1608 1609 switch (IncI->getOpcode()) { 1610 case Instruction::Add: 1611 case Instruction::Sub: 1612 break; 1613 case Instruction::GetElementPtr: 1614 // An IV counter must preserve its type. 1615 if (IncI->getNumOperands() == 2) 1616 break; 1617 default: 1618 return nullptr; 1619 } 1620 1621 PHINode *Phi = dyn_cast<PHINode>(IncI->getOperand(0)); 1622 if (Phi && Phi->getParent() == L->getHeader()) { 1623 if (isLoopInvariant(IncI->getOperand(1), L, DT)) 1624 return Phi; 1625 return nullptr; 1626 } 1627 if (IncI->getOpcode() == Instruction::GetElementPtr) 1628 return nullptr; 1629 1630 // Allow add/sub to be commuted. 1631 Phi = dyn_cast<PHINode>(IncI->getOperand(1)); 1632 if (Phi && Phi->getParent() == L->getHeader()) { 1633 if (isLoopInvariant(IncI->getOperand(0), L, DT)) 1634 return Phi; 1635 } 1636 return nullptr; 1637 } 1638 1639 /// Return the compare guarding the loop latch, or NULL for unrecognized tests. 1640 static ICmpInst *getLoopTest(Loop *L) { 1641 assert(L->getExitingBlock() && "expected loop exit"); 1642 1643 BasicBlock *LatchBlock = L->getLoopLatch(); 1644 // Don't bother with LFTR if the loop is not properly simplified. 1645 if (!LatchBlock) 1646 return nullptr; 1647 1648 BranchInst *BI = dyn_cast<BranchInst>(L->getExitingBlock()->getTerminator()); 1649 assert(BI && "expected exit branch"); 1650 1651 return dyn_cast<ICmpInst>(BI->getCondition()); 1652 } 1653 1654 /// linearFunctionTestReplace policy. Return true unless we can show that the 1655 /// current exit test is already sufficiently canonical. 1656 static bool needsLFTR(Loop *L, DominatorTree *DT) { 1657 // Do LFTR to simplify the exit condition to an ICMP. 1658 ICmpInst *Cond = getLoopTest(L); 1659 if (!Cond) 1660 return true; 1661 1662 // Do LFTR to simplify the exit ICMP to EQ/NE 1663 ICmpInst::Predicate Pred = Cond->getPredicate(); 1664 if (Pred != ICmpInst::ICMP_NE && Pred != ICmpInst::ICMP_EQ) 1665 return true; 1666 1667 // Look for a loop invariant RHS 1668 Value *LHS = Cond->getOperand(0); 1669 Value *RHS = Cond->getOperand(1); 1670 if (!isLoopInvariant(RHS, L, DT)) { 1671 if (!isLoopInvariant(LHS, L, DT)) 1672 return true; 1673 std::swap(LHS, RHS); 1674 } 1675 // Look for a simple IV counter LHS 1676 PHINode *Phi = dyn_cast<PHINode>(LHS); 1677 if (!Phi) 1678 Phi = getLoopPhiForCounter(LHS, L, DT); 1679 1680 if (!Phi) 1681 return true; 1682 1683 // Do LFTR if PHI node is defined in the loop, but is *not* a counter. 1684 int Idx = Phi->getBasicBlockIndex(L->getLoopLatch()); 1685 if (Idx < 0) 1686 return true; 1687 1688 // Do LFTR if the exit condition's IV is *not* a simple counter. 1689 Value *IncV = Phi->getIncomingValue(Idx); 1690 return Phi != getLoopPhiForCounter(IncV, L, DT); 1691 } 1692 1693 /// Recursive helper for hasConcreteDef(). Unfortunately, this currently boils 1694 /// down to checking that all operands are constant and listing instructions 1695 /// that may hide undef. 1696 static bool hasConcreteDefImpl(Value *V, SmallPtrSetImpl<Value*> &Visited, 1697 unsigned Depth) { 1698 if (isa<Constant>(V)) 1699 return !isa<UndefValue>(V); 1700 1701 if (Depth >= 6) 1702 return false; 1703 1704 // Conservatively handle non-constant non-instructions. For example, Arguments 1705 // may be undef. 1706 Instruction *I = dyn_cast<Instruction>(V); 1707 if (!I) 1708 return false; 1709 1710 // Load and return values may be undef. 1711 if(I->mayReadFromMemory() || isa<CallInst>(I) || isa<InvokeInst>(I)) 1712 return false; 1713 1714 // Optimistically handle other instructions. 1715 for (Value *Op : I->operands()) { 1716 if (!Visited.insert(Op).second) 1717 continue; 1718 if (!hasConcreteDefImpl(Op, Visited, Depth+1)) 1719 return false; 1720 } 1721 return true; 1722 } 1723 1724 /// Return true if the given value is concrete. We must prove that undef can 1725 /// never reach it. 1726 /// 1727 /// TODO: If we decide that this is a good approach to checking for undef, we 1728 /// may factor it into a common location. 1729 static bool hasConcreteDef(Value *V) { 1730 SmallPtrSet<Value*, 8> Visited; 1731 Visited.insert(V); 1732 return hasConcreteDefImpl(V, Visited, 0); 1733 } 1734 1735 /// Return true if this IV has any uses other than the (soon to be rewritten) 1736 /// loop exit test. 1737 static bool AlmostDeadIV(PHINode *Phi, BasicBlock *LatchBlock, Value *Cond) { 1738 int LatchIdx = Phi->getBasicBlockIndex(LatchBlock); 1739 Value *IncV = Phi->getIncomingValue(LatchIdx); 1740 1741 for (User *U : Phi->users()) 1742 if (U != Cond && U != IncV) return false; 1743 1744 for (User *U : IncV->users()) 1745 if (U != Cond && U != Phi) return false; 1746 return true; 1747 } 1748 1749 /// Find an affine IV in canonical form. 1750 /// 1751 /// BECount may be an i8* pointer type. The pointer difference is already 1752 /// valid count without scaling the address stride, so it remains a pointer 1753 /// expression as far as SCEV is concerned. 1754 /// 1755 /// Currently only valid for LFTR. See the comments on hasConcreteDef below. 1756 /// 1757 /// FIXME: Accept -1 stride and set IVLimit = IVInit - BECount 1758 /// 1759 /// FIXME: Accept non-unit stride as long as SCEV can reduce BECount * Stride. 1760 /// This is difficult in general for SCEV because of potential overflow. But we 1761 /// could at least handle constant BECounts. 1762 static PHINode *FindLoopCounter(Loop *L, const SCEV *BECount, 1763 ScalarEvolution *SE, DominatorTree *DT) { 1764 uint64_t BCWidth = SE->getTypeSizeInBits(BECount->getType()); 1765 1766 Value *Cond = 1767 cast<BranchInst>(L->getExitingBlock()->getTerminator())->getCondition(); 1768 1769 // Loop over all of the PHI nodes, looking for a simple counter. 1770 PHINode *BestPhi = nullptr; 1771 const SCEV *BestInit = nullptr; 1772 BasicBlock *LatchBlock = L->getLoopLatch(); 1773 assert(LatchBlock && "needsLFTR should guarantee a loop latch"); 1774 const DataLayout &DL = L->getHeader()->getModule()->getDataLayout(); 1775 1776 for (BasicBlock::iterator I = L->getHeader()->begin(); isa<PHINode>(I); ++I) { 1777 PHINode *Phi = cast<PHINode>(I); 1778 if (!SE->isSCEVable(Phi->getType())) 1779 continue; 1780 1781 // Avoid comparing an integer IV against a pointer Limit. 1782 if (BECount->getType()->isPointerTy() && !Phi->getType()->isPointerTy()) 1783 continue; 1784 1785 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(SE->getSCEV(Phi)); 1786 if (!AR || AR->getLoop() != L || !AR->isAffine()) 1787 continue; 1788 1789 // AR may be a pointer type, while BECount is an integer type. 1790 // AR may be wider than BECount. With eq/ne tests overflow is immaterial. 1791 // AR may not be a narrower type, or we may never exit. 1792 uint64_t PhiWidth = SE->getTypeSizeInBits(AR->getType()); 1793 if (PhiWidth < BCWidth || !DL.isLegalInteger(PhiWidth)) 1794 continue; 1795 1796 const SCEV *Step = dyn_cast<SCEVConstant>(AR->getStepRecurrence(*SE)); 1797 if (!Step || !Step->isOne()) 1798 continue; 1799 1800 int LatchIdx = Phi->getBasicBlockIndex(LatchBlock); 1801 Value *IncV = Phi->getIncomingValue(LatchIdx); 1802 if (getLoopPhiForCounter(IncV, L, DT) != Phi) 1803 continue; 1804 1805 // Avoid reusing a potentially undef value to compute other values that may 1806 // have originally had a concrete definition. 1807 if (!hasConcreteDef(Phi)) { 1808 // We explicitly allow unknown phis as long as they are already used by 1809 // the loop test. In this case we assume that performing LFTR could not 1810 // increase the number of undef users. 1811 if (ICmpInst *Cond = getLoopTest(L)) { 1812 if (Phi != getLoopPhiForCounter(Cond->getOperand(0), L, DT) 1813 && Phi != getLoopPhiForCounter(Cond->getOperand(1), L, DT)) { 1814 continue; 1815 } 1816 } 1817 } 1818 const SCEV *Init = AR->getStart(); 1819 1820 if (BestPhi && !AlmostDeadIV(BestPhi, LatchBlock, Cond)) { 1821 // Don't force a live loop counter if another IV can be used. 1822 if (AlmostDeadIV(Phi, LatchBlock, Cond)) 1823 continue; 1824 1825 // Prefer to count-from-zero. This is a more "canonical" counter form. It 1826 // also prefers integer to pointer IVs. 1827 if (BestInit->isZero() != Init->isZero()) { 1828 if (BestInit->isZero()) 1829 continue; 1830 } 1831 // If two IVs both count from zero or both count from nonzero then the 1832 // narrower is likely a dead phi that has been widened. Use the wider phi 1833 // to allow the other to be eliminated. 1834 else if (PhiWidth <= SE->getTypeSizeInBits(BestPhi->getType())) 1835 continue; 1836 } 1837 BestPhi = Phi; 1838 BestInit = Init; 1839 } 1840 return BestPhi; 1841 } 1842 1843 /// Help linearFunctionTestReplace by generating a value that holds the RHS of 1844 /// the new loop test. 1845 static Value *genLoopLimit(PHINode *IndVar, const SCEV *IVCount, Loop *L, 1846 SCEVExpander &Rewriter, ScalarEvolution *SE) { 1847 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(SE->getSCEV(IndVar)); 1848 assert(AR && AR->getLoop() == L && AR->isAffine() && "bad loop counter"); 1849 const SCEV *IVInit = AR->getStart(); 1850 1851 // IVInit may be a pointer while IVCount is an integer when FindLoopCounter 1852 // finds a valid pointer IV. Sign extend BECount in order to materialize a 1853 // GEP. Avoid running SCEVExpander on a new pointer value, instead reusing 1854 // the existing GEPs whenever possible. 1855 if (IndVar->getType()->isPointerTy() 1856 && !IVCount->getType()->isPointerTy()) { 1857 1858 // IVOffset will be the new GEP offset that is interpreted by GEP as a 1859 // signed value. IVCount on the other hand represents the loop trip count, 1860 // which is an unsigned value. FindLoopCounter only allows induction 1861 // variables that have a positive unit stride of one. This means we don't 1862 // have to handle the case of negative offsets (yet) and just need to zero 1863 // extend IVCount. 1864 Type *OfsTy = SE->getEffectiveSCEVType(IVInit->getType()); 1865 const SCEV *IVOffset = SE->getTruncateOrZeroExtend(IVCount, OfsTy); 1866 1867 // Expand the code for the iteration count. 1868 assert(SE->isLoopInvariant(IVOffset, L) && 1869 "Computed iteration count is not loop invariant!"); 1870 BranchInst *BI = cast<BranchInst>(L->getExitingBlock()->getTerminator()); 1871 Value *GEPOffset = Rewriter.expandCodeFor(IVOffset, OfsTy, BI); 1872 1873 Value *GEPBase = IndVar->getIncomingValueForBlock(L->getLoopPreheader()); 1874 assert(AR->getStart() == SE->getSCEV(GEPBase) && "bad loop counter"); 1875 // We could handle pointer IVs other than i8*, but we need to compensate for 1876 // gep index scaling. See canExpandBackedgeTakenCount comments. 1877 assert(SE->getSizeOfExpr(IntegerType::getInt64Ty(IndVar->getContext()), 1878 cast<PointerType>(GEPBase->getType())->getElementType())->isOne() 1879 && "unit stride pointer IV must be i8*"); 1880 1881 IRBuilder<> Builder(L->getLoopPreheader()->getTerminator()); 1882 return Builder.CreateGEP(nullptr, GEPBase, GEPOffset, "lftr.limit"); 1883 } 1884 else { 1885 // In any other case, convert both IVInit and IVCount to integers before 1886 // comparing. This may result in SCEV expension of pointers, but in practice 1887 // SCEV will fold the pointer arithmetic away as such: 1888 // BECount = (IVEnd - IVInit - 1) => IVLimit = IVInit (postinc). 1889 // 1890 // Valid Cases: (1) both integers is most common; (2) both may be pointers 1891 // for simple memset-style loops. 1892 // 1893 // IVInit integer and IVCount pointer would only occur if a canonical IV 1894 // were generated on top of case #2, which is not expected. 1895 1896 const SCEV *IVLimit = nullptr; 1897 // For unit stride, IVCount = Start + BECount with 2's complement overflow. 1898 // For non-zero Start, compute IVCount here. 1899 if (AR->getStart()->isZero()) 1900 IVLimit = IVCount; 1901 else { 1902 assert(AR->getStepRecurrence(*SE)->isOne() && "only handles unit stride"); 1903 const SCEV *IVInit = AR->getStart(); 1904 1905 // For integer IVs, truncate the IV before computing IVInit + BECount. 1906 if (SE->getTypeSizeInBits(IVInit->getType()) 1907 > SE->getTypeSizeInBits(IVCount->getType())) 1908 IVInit = SE->getTruncateExpr(IVInit, IVCount->getType()); 1909 1910 IVLimit = SE->getAddExpr(IVInit, IVCount); 1911 } 1912 // Expand the code for the iteration count. 1913 BranchInst *BI = cast<BranchInst>(L->getExitingBlock()->getTerminator()); 1914 IRBuilder<> Builder(BI); 1915 assert(SE->isLoopInvariant(IVLimit, L) && 1916 "Computed iteration count is not loop invariant!"); 1917 // Ensure that we generate the same type as IndVar, or a smaller integer 1918 // type. In the presence of null pointer values, we have an integer type 1919 // SCEV expression (IVInit) for a pointer type IV value (IndVar). 1920 Type *LimitTy = IVCount->getType()->isPointerTy() ? 1921 IndVar->getType() : IVCount->getType(); 1922 return Rewriter.expandCodeFor(IVLimit, LimitTy, BI); 1923 } 1924 } 1925 1926 /// This method rewrites the exit condition of the loop to be a canonical != 1927 /// comparison against the incremented loop induction variable. This pass is 1928 /// able to rewrite the exit tests of any loop where the SCEV analysis can 1929 /// determine a loop-invariant trip count of the loop, which is actually a much 1930 /// broader range than just linear tests. 1931 Value *IndVarSimplify:: 1932 linearFunctionTestReplace(Loop *L, 1933 const SCEV *BackedgeTakenCount, 1934 PHINode *IndVar, 1935 SCEVExpander &Rewriter) { 1936 assert(canExpandBackedgeTakenCount(L, SE, Rewriter) && "precondition"); 1937 1938 // Initialize CmpIndVar and IVCount to their preincremented values. 1939 Value *CmpIndVar = IndVar; 1940 const SCEV *IVCount = BackedgeTakenCount; 1941 1942 // If the exiting block is the same as the backedge block, we prefer to 1943 // compare against the post-incremented value, otherwise we must compare 1944 // against the preincremented value. 1945 if (L->getExitingBlock() == L->getLoopLatch()) { 1946 // Add one to the "backedge-taken" count to get the trip count. 1947 // This addition may overflow, which is valid as long as the comparison is 1948 // truncated to BackedgeTakenCount->getType(). 1949 IVCount = SE->getAddExpr(BackedgeTakenCount, 1950 SE->getOne(BackedgeTakenCount->getType())); 1951 // The BackedgeTaken expression contains the number of times that the 1952 // backedge branches to the loop header. This is one less than the 1953 // number of times the loop executes, so use the incremented indvar. 1954 CmpIndVar = IndVar->getIncomingValueForBlock(L->getExitingBlock()); 1955 } 1956 1957 Value *ExitCnt = genLoopLimit(IndVar, IVCount, L, Rewriter, SE); 1958 assert(ExitCnt->getType()->isPointerTy() == IndVar->getType()->isPointerTy() 1959 && "genLoopLimit missed a cast"); 1960 1961 // Insert a new icmp_ne or icmp_eq instruction before the branch. 1962 BranchInst *BI = cast<BranchInst>(L->getExitingBlock()->getTerminator()); 1963 ICmpInst::Predicate P; 1964 if (L->contains(BI->getSuccessor(0))) 1965 P = ICmpInst::ICMP_NE; 1966 else 1967 P = ICmpInst::ICMP_EQ; 1968 1969 DEBUG(dbgs() << "INDVARS: Rewriting loop exit condition to:\n" 1970 << " LHS:" << *CmpIndVar << '\n' 1971 << " op:\t" 1972 << (P == ICmpInst::ICMP_NE ? "!=" : "==") << "\n" 1973 << " RHS:\t" << *ExitCnt << "\n" 1974 << " IVCount:\t" << *IVCount << "\n"); 1975 1976 IRBuilder<> Builder(BI); 1977 1978 // LFTR can ignore IV overflow and truncate to the width of 1979 // BECount. This avoids materializing the add(zext(add)) expression. 1980 unsigned CmpIndVarSize = SE->getTypeSizeInBits(CmpIndVar->getType()); 1981 unsigned ExitCntSize = SE->getTypeSizeInBits(ExitCnt->getType()); 1982 if (CmpIndVarSize > ExitCntSize) { 1983 const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(SE->getSCEV(IndVar)); 1984 const SCEV *ARStart = AR->getStart(); 1985 const SCEV *ARStep = AR->getStepRecurrence(*SE); 1986 // For constant IVCount, avoid truncation. 1987 if (isa<SCEVConstant>(ARStart) && isa<SCEVConstant>(IVCount)) { 1988 const APInt &Start = cast<SCEVConstant>(ARStart)->getAPInt(); 1989 APInt Count = cast<SCEVConstant>(IVCount)->getAPInt(); 1990 // Note that the post-inc value of BackedgeTakenCount may have overflowed 1991 // above such that IVCount is now zero. 1992 if (IVCount != BackedgeTakenCount && Count == 0) { 1993 Count = APInt::getMaxValue(Count.getBitWidth()).zext(CmpIndVarSize); 1994 ++Count; 1995 } 1996 else 1997 Count = Count.zext(CmpIndVarSize); 1998 APInt NewLimit; 1999 if (cast<SCEVConstant>(ARStep)->getValue()->isNegative()) 2000 NewLimit = Start - Count; 2001 else 2002 NewLimit = Start + Count; 2003 ExitCnt = ConstantInt::get(CmpIndVar->getType(), NewLimit); 2004 2005 DEBUG(dbgs() << " Widen RHS:\t" << *ExitCnt << "\n"); 2006 } else { 2007 CmpIndVar = Builder.CreateTrunc(CmpIndVar, ExitCnt->getType(), 2008 "lftr.wideiv"); 2009 } 2010 } 2011 Value *Cond = Builder.CreateICmp(P, CmpIndVar, ExitCnt, "exitcond"); 2012 Value *OrigCond = BI->getCondition(); 2013 // It's tempting to use replaceAllUsesWith here to fully replace the old 2014 // comparison, but that's not immediately safe, since users of the old 2015 // comparison may not be dominated by the new comparison. Instead, just 2016 // update the branch to use the new comparison; in the common case this 2017 // will make old comparison dead. 2018 BI->setCondition(Cond); 2019 DeadInsts.push_back(OrigCond); 2020 2021 ++NumLFTR; 2022 Changed = true; 2023 return Cond; 2024 } 2025 2026 //===----------------------------------------------------------------------===// 2027 // sinkUnusedInvariants. A late subpass to cleanup loop preheaders. 2028 //===----------------------------------------------------------------------===// 2029 2030 /// If there's a single exit block, sink any loop-invariant values that 2031 /// were defined in the preheader but not used inside the loop into the 2032 /// exit block to reduce register pressure in the loop. 2033 void IndVarSimplify::sinkUnusedInvariants(Loop *L) { 2034 BasicBlock *ExitBlock = L->getExitBlock(); 2035 if (!ExitBlock) return; 2036 2037 BasicBlock *Preheader = L->getLoopPreheader(); 2038 if (!Preheader) return; 2039 2040 Instruction *InsertPt = &*ExitBlock->getFirstInsertionPt(); 2041 BasicBlock::iterator I(Preheader->getTerminator()); 2042 while (I != Preheader->begin()) { 2043 --I; 2044 // New instructions were inserted at the end of the preheader. 2045 if (isa<PHINode>(I)) 2046 break; 2047 2048 // Don't move instructions which might have side effects, since the side 2049 // effects need to complete before instructions inside the loop. Also don't 2050 // move instructions which might read memory, since the loop may modify 2051 // memory. Note that it's okay if the instruction might have undefined 2052 // behavior: LoopSimplify guarantees that the preheader dominates the exit 2053 // block. 2054 if (I->mayHaveSideEffects() || I->mayReadFromMemory()) 2055 continue; 2056 2057 // Skip debug info intrinsics. 2058 if (isa<DbgInfoIntrinsic>(I)) 2059 continue; 2060 2061 // Skip eh pad instructions. 2062 if (I->isEHPad()) 2063 continue; 2064 2065 // Don't sink alloca: we never want to sink static alloca's out of the 2066 // entry block, and correctly sinking dynamic alloca's requires 2067 // checks for stacksave/stackrestore intrinsics. 2068 // FIXME: Refactor this check somehow? 2069 if (isa<AllocaInst>(I)) 2070 continue; 2071 2072 // Determine if there is a use in or before the loop (direct or 2073 // otherwise). 2074 bool UsedInLoop = false; 2075 for (Use &U : I->uses()) { 2076 Instruction *User = cast<Instruction>(U.getUser()); 2077 BasicBlock *UseBB = User->getParent(); 2078 if (PHINode *P = dyn_cast<PHINode>(User)) { 2079 unsigned i = 2080 PHINode::getIncomingValueNumForOperand(U.getOperandNo()); 2081 UseBB = P->getIncomingBlock(i); 2082 } 2083 if (UseBB == Preheader || L->contains(UseBB)) { 2084 UsedInLoop = true; 2085 break; 2086 } 2087 } 2088 2089 // If there is, the def must remain in the preheader. 2090 if (UsedInLoop) 2091 continue; 2092 2093 // Otherwise, sink it to the exit block. 2094 Instruction *ToMove = &*I; 2095 bool Done = false; 2096 2097 if (I != Preheader->begin()) { 2098 // Skip debug info intrinsics. 2099 do { 2100 --I; 2101 } while (isa<DbgInfoIntrinsic>(I) && I != Preheader->begin()); 2102 2103 if (isa<DbgInfoIntrinsic>(I) && I == Preheader->begin()) 2104 Done = true; 2105 } else { 2106 Done = true; 2107 } 2108 2109 ToMove->moveBefore(InsertPt); 2110 if (Done) break; 2111 InsertPt = ToMove; 2112 } 2113 } 2114 2115 //===----------------------------------------------------------------------===// 2116 // IndVarSimplify driver. Manage several subpasses of IV simplification. 2117 //===----------------------------------------------------------------------===// 2118 2119 bool IndVarSimplify::runOnLoop(Loop *L, LPPassManager &LPM) { 2120 if (skipOptnoneFunction(L)) 2121 return false; 2122 2123 // If LoopSimplify form is not available, stay out of trouble. Some notes: 2124 // - LSR currently only supports LoopSimplify-form loops. Indvars' 2125 // canonicalization can be a pessimization without LSR to "clean up" 2126 // afterwards. 2127 // - We depend on having a preheader; in particular, 2128 // Loop::getCanonicalInductionVariable only supports loops with preheaders, 2129 // and we're in trouble if we can't find the induction variable even when 2130 // we've manually inserted one. 2131 if (!L->isLoopSimplifyForm()) 2132 return false; 2133 2134 LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); 2135 SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE(); 2136 DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 2137 auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>(); 2138 TLI = TLIP ? &TLIP->getTLI() : nullptr; 2139 auto *TTIP = getAnalysisIfAvailable<TargetTransformInfoWrapperPass>(); 2140 TTI = TTIP ? &TTIP->getTTI(*L->getHeader()->getParent()) : nullptr; 2141 const DataLayout &DL = L->getHeader()->getModule()->getDataLayout(); 2142 2143 DeadInsts.clear(); 2144 Changed = false; 2145 2146 // If there are any floating-point recurrences, attempt to 2147 // transform them to use integer recurrences. 2148 rewriteNonIntegerIVs(L); 2149 2150 const SCEV *BackedgeTakenCount = SE->getBackedgeTakenCount(L); 2151 2152 // Create a rewriter object which we'll use to transform the code with. 2153 SCEVExpander Rewriter(*SE, DL, "indvars"); 2154 #ifndef NDEBUG 2155 Rewriter.setDebugType(DEBUG_TYPE); 2156 #endif 2157 2158 // Eliminate redundant IV users. 2159 // 2160 // Simplification works best when run before other consumers of SCEV. We 2161 // attempt to avoid evaluating SCEVs for sign/zero extend operations until 2162 // other expressions involving loop IVs have been evaluated. This helps SCEV 2163 // set no-wrap flags before normalizing sign/zero extension. 2164 Rewriter.disableCanonicalMode(); 2165 simplifyAndExtend(L, Rewriter, LI); 2166 2167 // Check to see if this loop has a computable loop-invariant execution count. 2168 // If so, this means that we can compute the final value of any expressions 2169 // that are recurrent in the loop, and substitute the exit values from the 2170 // loop into any instructions outside of the loop that use the final values of 2171 // the current expressions. 2172 // 2173 if (ReplaceExitValue != NeverRepl && 2174 !isa<SCEVCouldNotCompute>(BackedgeTakenCount)) 2175 rewriteLoopExitValues(L, Rewriter); 2176 2177 // Eliminate redundant IV cycles. 2178 NumElimIV += Rewriter.replaceCongruentIVs(L, DT, DeadInsts); 2179 2180 // If we have a trip count expression, rewrite the loop's exit condition 2181 // using it. We can currently only handle loops with a single exit. 2182 if (canExpandBackedgeTakenCount(L, SE, Rewriter) && needsLFTR(L, DT)) { 2183 PHINode *IndVar = FindLoopCounter(L, BackedgeTakenCount, SE, DT); 2184 if (IndVar) { 2185 // Check preconditions for proper SCEVExpander operation. SCEV does not 2186 // express SCEVExpander's dependencies, such as LoopSimplify. Instead any 2187 // pass that uses the SCEVExpander must do it. This does not work well for 2188 // loop passes because SCEVExpander makes assumptions about all loops, 2189 // while LoopPassManager only forces the current loop to be simplified. 2190 // 2191 // FIXME: SCEV expansion has no way to bail out, so the caller must 2192 // explicitly check any assumptions made by SCEV. Brittle. 2193 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(BackedgeTakenCount); 2194 if (!AR || AR->getLoop()->getLoopPreheader()) 2195 (void)linearFunctionTestReplace(L, BackedgeTakenCount, IndVar, 2196 Rewriter); 2197 } 2198 } 2199 // Clear the rewriter cache, because values that are in the rewriter's cache 2200 // can be deleted in the loop below, causing the AssertingVH in the cache to 2201 // trigger. 2202 Rewriter.clear(); 2203 2204 // Now that we're done iterating through lists, clean up any instructions 2205 // which are now dead. 2206 while (!DeadInsts.empty()) 2207 if (Instruction *Inst = 2208 dyn_cast_or_null<Instruction>(DeadInsts.pop_back_val())) 2209 RecursivelyDeleteTriviallyDeadInstructions(Inst, TLI); 2210 2211 // The Rewriter may not be used from this point on. 2212 2213 // Loop-invariant instructions in the preheader that aren't used in the 2214 // loop may be sunk below the loop to reduce register pressure. 2215 sinkUnusedInvariants(L); 2216 2217 // rewriteFirstIterationLoopExitValues does not rely on the computation of 2218 // trip count and therefore can further simplify exit values in addition to 2219 // rewriteLoopExitValues. 2220 rewriteFirstIterationLoopExitValues(L); 2221 2222 // Clean up dead instructions. 2223 Changed |= DeleteDeadPHIs(L->getHeader(), TLI); 2224 2225 // Check a post-condition. 2226 assert(L->isRecursivelyLCSSAForm(*DT) && "Indvars did not preserve LCSSA!"); 2227 2228 // Verify that LFTR, and any other change have not interfered with SCEV's 2229 // ability to compute trip count. 2230 #ifndef NDEBUG 2231 if (VerifyIndvars && !isa<SCEVCouldNotCompute>(BackedgeTakenCount)) { 2232 SE->forgetLoop(L); 2233 const SCEV *NewBECount = SE->getBackedgeTakenCount(L); 2234 if (SE->getTypeSizeInBits(BackedgeTakenCount->getType()) < 2235 SE->getTypeSizeInBits(NewBECount->getType())) 2236 NewBECount = SE->getTruncateOrNoop(NewBECount, 2237 BackedgeTakenCount->getType()); 2238 else 2239 BackedgeTakenCount = SE->getTruncateOrNoop(BackedgeTakenCount, 2240 NewBECount->getType()); 2241 assert(BackedgeTakenCount == NewBECount && "indvars must preserve SCEV"); 2242 } 2243 #endif 2244 2245 return Changed; 2246 } 2247