1 //===- ScalarEvolution.cpp - Scalar Evolution Analysis ----------*- C++ -*-===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file contains the implementation of the scalar evolution analysis 11 // engine, which is used primarily to analyze expressions involving induction 12 // variables in loops. 13 // 14 // There are several aspects to this library. First is the representation of 15 // scalar expressions, which are represented as subclasses of the SCEV class. 16 // These classes are used to represent certain types of subexpressions that we 17 // can handle. We only create one SCEV of a particular shape, so 18 // pointer-comparisons for equality are legal. 19 // 20 // One important aspect of the SCEV objects is that they are never cyclic, even 21 // if there is a cycle in the dataflow for an expression (ie, a PHI node). If 22 // the PHI node is one of the idioms that we can represent (e.g., a polynomial 23 // recurrence) then we represent it directly as a recurrence node, otherwise we 24 // represent it as a SCEVUnknown node. 25 // 26 // In addition to being able to represent expressions of various types, we also 27 // have folders that are used to build the *canonical* representation for a 28 // particular expression. These folders are capable of using a variety of 29 // rewrite rules to simplify the expressions. 30 // 31 // Once the folders are defined, we can implement the more interesting 32 // higher-level code, such as the code that recognizes PHI nodes of various 33 // types, computes the execution count of a loop, etc. 34 // 35 // TODO: We should use these routines and value representations to implement 36 // dependence analysis! 37 // 38 //===----------------------------------------------------------------------===// 39 // 40 // There are several good references for the techniques used in this analysis. 41 // 42 // Chains of recurrences -- a method to expedite the evaluation 43 // of closed-form functions 44 // Olaf Bachmann, Paul S. Wang, Eugene V. Zima 45 // 46 // On computational properties of chains of recurrences 47 // Eugene V. Zima 48 // 49 // Symbolic Evaluation of Chains of Recurrences for Loop Optimization 50 // Robert A. van Engelen 51 // 52 // Efficient Symbolic Analysis for Optimizing Compilers 53 // Robert A. van Engelen 54 // 55 // Using the chains of recurrences algebra for data dependence testing and 56 // induction variable substitution 57 // MS Thesis, Johnie Birch 58 // 59 //===----------------------------------------------------------------------===// 60 61 #define DEBUG_TYPE "scalar-evolution" 62 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 63 #include "llvm/Constants.h" 64 #include "llvm/DerivedTypes.h" 65 #include "llvm/GlobalVariable.h" 66 #include "llvm/GlobalAlias.h" 67 #include "llvm/Instructions.h" 68 #include "llvm/LLVMContext.h" 69 #include "llvm/Operator.h" 70 #include "llvm/Analysis/ConstantFolding.h" 71 #include "llvm/Analysis/Dominators.h" 72 #include "llvm/Analysis/LoopInfo.h" 73 #include "llvm/Analysis/ValueTracking.h" 74 #include "llvm/Assembly/Writer.h" 75 #include "llvm/Target/TargetData.h" 76 #include "llvm/Support/CommandLine.h" 77 #include "llvm/Support/ConstantRange.h" 78 #include "llvm/Support/Debug.h" 79 #include "llvm/Support/ErrorHandling.h" 80 #include "llvm/Support/GetElementPtrTypeIterator.h" 81 #include "llvm/Support/InstIterator.h" 82 #include "llvm/Support/MathExtras.h" 83 #include "llvm/Support/raw_ostream.h" 84 #include "llvm/ADT/Statistic.h" 85 #include "llvm/ADT/STLExtras.h" 86 #include "llvm/ADT/SmallPtrSet.h" 87 #include <algorithm> 88 using namespace llvm; 89 90 STATISTIC(NumArrayLenItCounts, 91 "Number of trip counts computed with array length"); 92 STATISTIC(NumTripCountsComputed, 93 "Number of loops with predictable loop counts"); 94 STATISTIC(NumTripCountsNotComputed, 95 "Number of loops without predictable loop counts"); 96 STATISTIC(NumBruteForceTripCountsComputed, 97 "Number of loops with trip counts computed by force"); 98 99 static cl::opt<unsigned> 100 MaxBruteForceIterations("scalar-evolution-max-iterations", cl::ReallyHidden, 101 cl::desc("Maximum number of iterations SCEV will " 102 "symbolically execute a constant " 103 "derived loop"), 104 cl::init(100)); 105 106 INITIALIZE_PASS(ScalarEvolution, "scalar-evolution", 107 "Scalar Evolution Analysis", false, true) 108 char ScalarEvolution::ID = 0; 109 110 //===----------------------------------------------------------------------===// 111 // SCEV class definitions 112 //===----------------------------------------------------------------------===// 113 114 //===----------------------------------------------------------------------===// 115 // Implementation of the SCEV class. 116 // 117 118 SCEV::~SCEV() {} 119 120 void SCEV::dump() const { 121 print(dbgs()); 122 dbgs() << '\n'; 123 } 124 125 bool SCEV::isZero() const { 126 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this)) 127 return SC->getValue()->isZero(); 128 return false; 129 } 130 131 bool SCEV::isOne() const { 132 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this)) 133 return SC->getValue()->isOne(); 134 return false; 135 } 136 137 bool SCEV::isAllOnesValue() const { 138 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this)) 139 return SC->getValue()->isAllOnesValue(); 140 return false; 141 } 142 143 SCEVCouldNotCompute::SCEVCouldNotCompute() : 144 SCEV(FoldingSetNodeIDRef(), scCouldNotCompute) {} 145 146 bool SCEVCouldNotCompute::isLoopInvariant(const Loop *L) const { 147 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 148 return false; 149 } 150 151 const Type *SCEVCouldNotCompute::getType() const { 152 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 153 return 0; 154 } 155 156 bool SCEVCouldNotCompute::hasComputableLoopEvolution(const Loop *L) const { 157 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 158 return false; 159 } 160 161 bool SCEVCouldNotCompute::hasOperand(const SCEV *) const { 162 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 163 return false; 164 } 165 166 void SCEVCouldNotCompute::print(raw_ostream &OS) const { 167 OS << "***COULDNOTCOMPUTE***"; 168 } 169 170 bool SCEVCouldNotCompute::classof(const SCEV *S) { 171 return S->getSCEVType() == scCouldNotCompute; 172 } 173 174 const SCEV *ScalarEvolution::getConstant(ConstantInt *V) { 175 FoldingSetNodeID ID; 176 ID.AddInteger(scConstant); 177 ID.AddPointer(V); 178 void *IP = 0; 179 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 180 SCEV *S = new (SCEVAllocator) SCEVConstant(ID.Intern(SCEVAllocator), V); 181 UniqueSCEVs.InsertNode(S, IP); 182 return S; 183 } 184 185 const SCEV *ScalarEvolution::getConstant(const APInt& Val) { 186 return getConstant(ConstantInt::get(getContext(), Val)); 187 } 188 189 const SCEV * 190 ScalarEvolution::getConstant(const Type *Ty, uint64_t V, bool isSigned) { 191 const IntegerType *ITy = cast<IntegerType>(getEffectiveSCEVType(Ty)); 192 return getConstant(ConstantInt::get(ITy, V, isSigned)); 193 } 194 195 const Type *SCEVConstant::getType() const { return V->getType(); } 196 197 void SCEVConstant::print(raw_ostream &OS) const { 198 WriteAsOperand(OS, V, false); 199 } 200 201 SCEVCastExpr::SCEVCastExpr(const FoldingSetNodeIDRef ID, 202 unsigned SCEVTy, const SCEV *op, const Type *ty) 203 : SCEV(ID, SCEVTy), Op(op), Ty(ty) {} 204 205 bool SCEVCastExpr::dominates(BasicBlock *BB, DominatorTree *DT) const { 206 return Op->dominates(BB, DT); 207 } 208 209 bool SCEVCastExpr::properlyDominates(BasicBlock *BB, DominatorTree *DT) const { 210 return Op->properlyDominates(BB, DT); 211 } 212 213 SCEVTruncateExpr::SCEVTruncateExpr(const FoldingSetNodeIDRef ID, 214 const SCEV *op, const Type *ty) 215 : SCEVCastExpr(ID, scTruncate, op, ty) { 216 assert((Op->getType()->isIntegerTy() || Op->getType()->isPointerTy()) && 217 (Ty->isIntegerTy() || Ty->isPointerTy()) && 218 "Cannot truncate non-integer value!"); 219 } 220 221 void SCEVTruncateExpr::print(raw_ostream &OS) const { 222 OS << "(trunc " << *Op->getType() << " " << *Op << " to " << *Ty << ")"; 223 } 224 225 SCEVZeroExtendExpr::SCEVZeroExtendExpr(const FoldingSetNodeIDRef ID, 226 const SCEV *op, const Type *ty) 227 : SCEVCastExpr(ID, scZeroExtend, op, ty) { 228 assert((Op->getType()->isIntegerTy() || Op->getType()->isPointerTy()) && 229 (Ty->isIntegerTy() || Ty->isPointerTy()) && 230 "Cannot zero extend non-integer value!"); 231 } 232 233 void SCEVZeroExtendExpr::print(raw_ostream &OS) const { 234 OS << "(zext " << *Op->getType() << " " << *Op << " to " << *Ty << ")"; 235 } 236 237 SCEVSignExtendExpr::SCEVSignExtendExpr(const FoldingSetNodeIDRef ID, 238 const SCEV *op, const Type *ty) 239 : SCEVCastExpr(ID, scSignExtend, op, ty) { 240 assert((Op->getType()->isIntegerTy() || Op->getType()->isPointerTy()) && 241 (Ty->isIntegerTy() || Ty->isPointerTy()) && 242 "Cannot sign extend non-integer value!"); 243 } 244 245 void SCEVSignExtendExpr::print(raw_ostream &OS) const { 246 OS << "(sext " << *Op->getType() << " " << *Op << " to " << *Ty << ")"; 247 } 248 249 void SCEVCommutativeExpr::print(raw_ostream &OS) const { 250 const char *OpStr = getOperationStr(); 251 OS << "("; 252 for (op_iterator I = op_begin(), E = op_end(); I != E; ++I) { 253 OS << **I; 254 if (llvm::next(I) != E) 255 OS << OpStr; 256 } 257 OS << ")"; 258 } 259 260 bool SCEVNAryExpr::dominates(BasicBlock *BB, DominatorTree *DT) const { 261 for (op_iterator I = op_begin(), E = op_end(); I != E; ++I) 262 if (!(*I)->dominates(BB, DT)) 263 return false; 264 return true; 265 } 266 267 bool SCEVNAryExpr::properlyDominates(BasicBlock *BB, DominatorTree *DT) const { 268 for (op_iterator I = op_begin(), E = op_end(); I != E; ++I) 269 if (!(*I)->properlyDominates(BB, DT)) 270 return false; 271 return true; 272 } 273 274 bool SCEVNAryExpr::isLoopInvariant(const Loop *L) const { 275 for (op_iterator I = op_begin(), E = op_end(); I != E; ++I) 276 if (!(*I)->isLoopInvariant(L)) 277 return false; 278 return true; 279 } 280 281 // hasComputableLoopEvolution - N-ary expressions have computable loop 282 // evolutions iff they have at least one operand that varies with the loop, 283 // but that all varying operands are computable. 284 bool SCEVNAryExpr::hasComputableLoopEvolution(const Loop *L) const { 285 bool HasVarying = false; 286 for (op_iterator I = op_begin(), E = op_end(); I != E; ++I) { 287 const SCEV *S = *I; 288 if (!S->isLoopInvariant(L)) { 289 if (S->hasComputableLoopEvolution(L)) 290 HasVarying = true; 291 else 292 return false; 293 } 294 } 295 return HasVarying; 296 } 297 298 bool SCEVNAryExpr::hasOperand(const SCEV *O) const { 299 for (op_iterator I = op_begin(), E = op_end(); I != E; ++I) { 300 const SCEV *S = *I; 301 if (O == S || S->hasOperand(O)) 302 return true; 303 } 304 return false; 305 } 306 307 bool SCEVUDivExpr::dominates(BasicBlock *BB, DominatorTree *DT) const { 308 return LHS->dominates(BB, DT) && RHS->dominates(BB, DT); 309 } 310 311 bool SCEVUDivExpr::properlyDominates(BasicBlock *BB, DominatorTree *DT) const { 312 return LHS->properlyDominates(BB, DT) && RHS->properlyDominates(BB, DT); 313 } 314 315 void SCEVUDivExpr::print(raw_ostream &OS) const { 316 OS << "(" << *LHS << " /u " << *RHS << ")"; 317 } 318 319 const Type *SCEVUDivExpr::getType() const { 320 // In most cases the types of LHS and RHS will be the same, but in some 321 // crazy cases one or the other may be a pointer. ScalarEvolution doesn't 322 // depend on the type for correctness, but handling types carefully can 323 // avoid extra casts in the SCEVExpander. The LHS is more likely to be 324 // a pointer type than the RHS, so use the RHS' type here. 325 return RHS->getType(); 326 } 327 328 bool SCEVAddRecExpr::isLoopInvariant(const Loop *QueryLoop) const { 329 // Add recurrences are never invariant in the function-body (null loop). 330 if (!QueryLoop) 331 return false; 332 333 // This recurrence is variant w.r.t. QueryLoop if QueryLoop contains L. 334 if (QueryLoop->contains(L)) 335 return false; 336 337 // This recurrence is invariant w.r.t. QueryLoop if L contains QueryLoop. 338 if (L->contains(QueryLoop)) 339 return true; 340 341 // This recurrence is variant w.r.t. QueryLoop if any of its operands 342 // are variant. 343 for (op_iterator I = op_begin(), E = op_end(); I != E; ++I) 344 if (!(*I)->isLoopInvariant(QueryLoop)) 345 return false; 346 347 // Otherwise it's loop-invariant. 348 return true; 349 } 350 351 bool 352 SCEVAddRecExpr::dominates(BasicBlock *BB, DominatorTree *DT) const { 353 return DT->dominates(L->getHeader(), BB) && 354 SCEVNAryExpr::dominates(BB, DT); 355 } 356 357 bool 358 SCEVAddRecExpr::properlyDominates(BasicBlock *BB, DominatorTree *DT) const { 359 // This uses a "dominates" query instead of "properly dominates" query because 360 // the instruction which produces the addrec's value is a PHI, and a PHI 361 // effectively properly dominates its entire containing block. 362 return DT->dominates(L->getHeader(), BB) && 363 SCEVNAryExpr::properlyDominates(BB, DT); 364 } 365 366 void SCEVAddRecExpr::print(raw_ostream &OS) const { 367 OS << "{" << *Operands[0]; 368 for (unsigned i = 1, e = NumOperands; i != e; ++i) 369 OS << ",+," << *Operands[i]; 370 OS << "}<"; 371 WriteAsOperand(OS, L->getHeader(), /*PrintType=*/false); 372 OS << ">"; 373 } 374 375 void SCEVUnknown::deleted() { 376 // Clear this SCEVUnknown from ValuesAtScopes. 377 SE->ValuesAtScopes.erase(this); 378 379 // Remove this SCEVUnknown from the uniquing map. 380 SE->UniqueSCEVs.RemoveNode(this); 381 382 // Release the value. 383 setValPtr(0); 384 } 385 386 void SCEVUnknown::allUsesReplacedWith(Value *New) { 387 // Clear this SCEVUnknown from ValuesAtScopes. 388 SE->ValuesAtScopes.erase(this); 389 390 // Remove this SCEVUnknown from the uniquing map. 391 SE->UniqueSCEVs.RemoveNode(this); 392 393 // Update this SCEVUnknown to point to the new value. This is needed 394 // because there may still be outstanding SCEVs which still point to 395 // this SCEVUnknown. 396 setValPtr(New); 397 } 398 399 bool SCEVUnknown::isLoopInvariant(const Loop *L) const { 400 // All non-instruction values are loop invariant. All instructions are loop 401 // invariant if they are not contained in the specified loop. 402 // Instructions are never considered invariant in the function body 403 // (null loop) because they are defined within the "loop". 404 if (Instruction *I = dyn_cast<Instruction>(getValue())) 405 return L && !L->contains(I); 406 return true; 407 } 408 409 bool SCEVUnknown::dominates(BasicBlock *BB, DominatorTree *DT) const { 410 if (Instruction *I = dyn_cast<Instruction>(getValue())) 411 return DT->dominates(I->getParent(), BB); 412 return true; 413 } 414 415 bool SCEVUnknown::properlyDominates(BasicBlock *BB, DominatorTree *DT) const { 416 if (Instruction *I = dyn_cast<Instruction>(getValue())) 417 return DT->properlyDominates(I->getParent(), BB); 418 return true; 419 } 420 421 const Type *SCEVUnknown::getType() const { 422 return getValue()->getType(); 423 } 424 425 bool SCEVUnknown::isSizeOf(const Type *&AllocTy) const { 426 if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue())) 427 if (VCE->getOpcode() == Instruction::PtrToInt) 428 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0))) 429 if (CE->getOpcode() == Instruction::GetElementPtr && 430 CE->getOperand(0)->isNullValue() && 431 CE->getNumOperands() == 2) 432 if (ConstantInt *CI = dyn_cast<ConstantInt>(CE->getOperand(1))) 433 if (CI->isOne()) { 434 AllocTy = cast<PointerType>(CE->getOperand(0)->getType()) 435 ->getElementType(); 436 return true; 437 } 438 439 return false; 440 } 441 442 bool SCEVUnknown::isAlignOf(const Type *&AllocTy) const { 443 if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue())) 444 if (VCE->getOpcode() == Instruction::PtrToInt) 445 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0))) 446 if (CE->getOpcode() == Instruction::GetElementPtr && 447 CE->getOperand(0)->isNullValue()) { 448 const Type *Ty = 449 cast<PointerType>(CE->getOperand(0)->getType())->getElementType(); 450 if (const StructType *STy = dyn_cast<StructType>(Ty)) 451 if (!STy->isPacked() && 452 CE->getNumOperands() == 3 && 453 CE->getOperand(1)->isNullValue()) { 454 if (ConstantInt *CI = dyn_cast<ConstantInt>(CE->getOperand(2))) 455 if (CI->isOne() && 456 STy->getNumElements() == 2 && 457 STy->getElementType(0)->isIntegerTy(1)) { 458 AllocTy = STy->getElementType(1); 459 return true; 460 } 461 } 462 } 463 464 return false; 465 } 466 467 bool SCEVUnknown::isOffsetOf(const Type *&CTy, Constant *&FieldNo) const { 468 if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue())) 469 if (VCE->getOpcode() == Instruction::PtrToInt) 470 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0))) 471 if (CE->getOpcode() == Instruction::GetElementPtr && 472 CE->getNumOperands() == 3 && 473 CE->getOperand(0)->isNullValue() && 474 CE->getOperand(1)->isNullValue()) { 475 const Type *Ty = 476 cast<PointerType>(CE->getOperand(0)->getType())->getElementType(); 477 // Ignore vector types here so that ScalarEvolutionExpander doesn't 478 // emit getelementptrs that index into vectors. 479 if (Ty->isStructTy() || Ty->isArrayTy()) { 480 CTy = Ty; 481 FieldNo = CE->getOperand(2); 482 return true; 483 } 484 } 485 486 return false; 487 } 488 489 void SCEVUnknown::print(raw_ostream &OS) const { 490 const Type *AllocTy; 491 if (isSizeOf(AllocTy)) { 492 OS << "sizeof(" << *AllocTy << ")"; 493 return; 494 } 495 if (isAlignOf(AllocTy)) { 496 OS << "alignof(" << *AllocTy << ")"; 497 return; 498 } 499 500 const Type *CTy; 501 Constant *FieldNo; 502 if (isOffsetOf(CTy, FieldNo)) { 503 OS << "offsetof(" << *CTy << ", "; 504 WriteAsOperand(OS, FieldNo, false); 505 OS << ")"; 506 return; 507 } 508 509 // Otherwise just print it normally. 510 WriteAsOperand(OS, getValue(), false); 511 } 512 513 //===----------------------------------------------------------------------===// 514 // SCEV Utilities 515 //===----------------------------------------------------------------------===// 516 517 namespace { 518 /// SCEVComplexityCompare - Return true if the complexity of the LHS is less 519 /// than the complexity of the RHS. This comparator is used to canonicalize 520 /// expressions. 521 class SCEVComplexityCompare { 522 const LoopInfo *const LI; 523 public: 524 explicit SCEVComplexityCompare(const LoopInfo *li) : LI(li) {} 525 526 // Return true or false if LHS is less than, or at least RHS, respectively. 527 bool operator()(const SCEV *LHS, const SCEV *RHS) const { 528 return compare(LHS, RHS) < 0; 529 } 530 531 // Return negative, zero, or positive, if LHS is less than, equal to, or 532 // greater than RHS, respectively. A three-way result allows recursive 533 // comparisons to be more efficient. 534 int compare(const SCEV *LHS, const SCEV *RHS) const { 535 // Fast-path: SCEVs are uniqued so we can do a quick equality check. 536 if (LHS == RHS) 537 return 0; 538 539 // Primarily, sort the SCEVs by their getSCEVType(). 540 unsigned LType = LHS->getSCEVType(), RType = RHS->getSCEVType(); 541 if (LType != RType) 542 return (int)LType - (int)RType; 543 544 // Aside from the getSCEVType() ordering, the particular ordering 545 // isn't very important except that it's beneficial to be consistent, 546 // so that (a + b) and (b + a) don't end up as different expressions. 547 switch (LType) { 548 case scUnknown: { 549 const SCEVUnknown *LU = cast<SCEVUnknown>(LHS); 550 const SCEVUnknown *RU = cast<SCEVUnknown>(RHS); 551 552 // Sort SCEVUnknown values with some loose heuristics. TODO: This is 553 // not as complete as it could be. 554 const Value *LV = LU->getValue(), *RV = RU->getValue(); 555 556 // Order pointer values after integer values. This helps SCEVExpander 557 // form GEPs. 558 bool LIsPointer = LV->getType()->isPointerTy(), 559 RIsPointer = RV->getType()->isPointerTy(); 560 if (LIsPointer != RIsPointer) 561 return (int)LIsPointer - (int)RIsPointer; 562 563 // Compare getValueID values. 564 unsigned LID = LV->getValueID(), 565 RID = RV->getValueID(); 566 if (LID != RID) 567 return (int)LID - (int)RID; 568 569 // Sort arguments by their position. 570 if (const Argument *LA = dyn_cast<Argument>(LV)) { 571 const Argument *RA = cast<Argument>(RV); 572 unsigned LArgNo = LA->getArgNo(), RArgNo = RA->getArgNo(); 573 return (int)LArgNo - (int)RArgNo; 574 } 575 576 // For instructions, compare their loop depth, and their operand 577 // count. This is pretty loose. 578 if (const Instruction *LInst = dyn_cast<Instruction>(LV)) { 579 const Instruction *RInst = cast<Instruction>(RV); 580 581 // Compare loop depths. 582 const BasicBlock *LParent = LInst->getParent(), 583 *RParent = RInst->getParent(); 584 if (LParent != RParent) { 585 unsigned LDepth = LI->getLoopDepth(LParent), 586 RDepth = LI->getLoopDepth(RParent); 587 if (LDepth != RDepth) 588 return (int)LDepth - (int)RDepth; 589 } 590 591 // Compare the number of operands. 592 unsigned LNumOps = LInst->getNumOperands(), 593 RNumOps = RInst->getNumOperands(); 594 return (int)LNumOps - (int)RNumOps; 595 } 596 597 return 0; 598 } 599 600 case scConstant: { 601 const SCEVConstant *LC = cast<SCEVConstant>(LHS); 602 const SCEVConstant *RC = cast<SCEVConstant>(RHS); 603 604 // Compare constant values. 605 const APInt &LA = LC->getValue()->getValue(); 606 const APInt &RA = RC->getValue()->getValue(); 607 unsigned LBitWidth = LA.getBitWidth(), RBitWidth = RA.getBitWidth(); 608 if (LBitWidth != RBitWidth) 609 return (int)LBitWidth - (int)RBitWidth; 610 return LA.ult(RA) ? -1 : 1; 611 } 612 613 case scAddRecExpr: { 614 const SCEVAddRecExpr *LA = cast<SCEVAddRecExpr>(LHS); 615 const SCEVAddRecExpr *RA = cast<SCEVAddRecExpr>(RHS); 616 617 // Compare addrec loop depths. 618 const Loop *LLoop = LA->getLoop(), *RLoop = RA->getLoop(); 619 if (LLoop != RLoop) { 620 unsigned LDepth = LLoop->getLoopDepth(), 621 RDepth = RLoop->getLoopDepth(); 622 if (LDepth != RDepth) 623 return (int)LDepth - (int)RDepth; 624 } 625 626 // Addrec complexity grows with operand count. 627 unsigned LNumOps = LA->getNumOperands(), RNumOps = RA->getNumOperands(); 628 if (LNumOps != RNumOps) 629 return (int)LNumOps - (int)RNumOps; 630 631 // Lexicographically compare. 632 for (unsigned i = 0; i != LNumOps; ++i) { 633 long X = compare(LA->getOperand(i), RA->getOperand(i)); 634 if (X != 0) 635 return X; 636 } 637 638 return 0; 639 } 640 641 case scAddExpr: 642 case scMulExpr: 643 case scSMaxExpr: 644 case scUMaxExpr: { 645 const SCEVNAryExpr *LC = cast<SCEVNAryExpr>(LHS); 646 const SCEVNAryExpr *RC = cast<SCEVNAryExpr>(RHS); 647 648 // Lexicographically compare n-ary expressions. 649 unsigned LNumOps = LC->getNumOperands(), RNumOps = RC->getNumOperands(); 650 for (unsigned i = 0; i != LNumOps; ++i) { 651 if (i >= RNumOps) 652 return 1; 653 long X = compare(LC->getOperand(i), RC->getOperand(i)); 654 if (X != 0) 655 return X; 656 } 657 return (int)LNumOps - (int)RNumOps; 658 } 659 660 case scUDivExpr: { 661 const SCEVUDivExpr *LC = cast<SCEVUDivExpr>(LHS); 662 const SCEVUDivExpr *RC = cast<SCEVUDivExpr>(RHS); 663 664 // Lexicographically compare udiv expressions. 665 long X = compare(LC->getLHS(), RC->getLHS()); 666 if (X != 0) 667 return X; 668 return compare(LC->getRHS(), RC->getRHS()); 669 } 670 671 case scTruncate: 672 case scZeroExtend: 673 case scSignExtend: { 674 const SCEVCastExpr *LC = cast<SCEVCastExpr>(LHS); 675 const SCEVCastExpr *RC = cast<SCEVCastExpr>(RHS); 676 677 // Compare cast expressions by operand. 678 return compare(LC->getOperand(), RC->getOperand()); 679 } 680 681 default: 682 break; 683 } 684 685 llvm_unreachable("Unknown SCEV kind!"); 686 return 0; 687 } 688 }; 689 } 690 691 /// GroupByComplexity - Given a list of SCEV objects, order them by their 692 /// complexity, and group objects of the same complexity together by value. 693 /// When this routine is finished, we know that any duplicates in the vector are 694 /// consecutive and that complexity is monotonically increasing. 695 /// 696 /// Note that we go take special precautions to ensure that we get deterministic 697 /// results from this routine. In other words, we don't want the results of 698 /// this to depend on where the addresses of various SCEV objects happened to 699 /// land in memory. 700 /// 701 static void GroupByComplexity(SmallVectorImpl<const SCEV *> &Ops, 702 LoopInfo *LI) { 703 if (Ops.size() < 2) return; // Noop 704 if (Ops.size() == 2) { 705 // This is the common case, which also happens to be trivially simple. 706 // Special case it. 707 const SCEV *&LHS = Ops[0], *&RHS = Ops[1]; 708 if (SCEVComplexityCompare(LI)(RHS, LHS)) 709 std::swap(LHS, RHS); 710 return; 711 } 712 713 // Do the rough sort by complexity. 714 std::stable_sort(Ops.begin(), Ops.end(), SCEVComplexityCompare(LI)); 715 716 // Now that we are sorted by complexity, group elements of the same 717 // complexity. Note that this is, at worst, N^2, but the vector is likely to 718 // be extremely short in practice. Note that we take this approach because we 719 // do not want to depend on the addresses of the objects we are grouping. 720 for (unsigned i = 0, e = Ops.size(); i != e-2; ++i) { 721 const SCEV *S = Ops[i]; 722 unsigned Complexity = S->getSCEVType(); 723 724 // If there are any objects of the same complexity and same value as this 725 // one, group them. 726 for (unsigned j = i+1; j != e && Ops[j]->getSCEVType() == Complexity; ++j) { 727 if (Ops[j] == S) { // Found a duplicate. 728 // Move it to immediately after i'th element. 729 std::swap(Ops[i+1], Ops[j]); 730 ++i; // no need to rescan it. 731 if (i == e-2) return; // Done! 732 } 733 } 734 } 735 } 736 737 738 739 //===----------------------------------------------------------------------===// 740 // Simple SCEV method implementations 741 //===----------------------------------------------------------------------===// 742 743 /// BinomialCoefficient - Compute BC(It, K). The result has width W. 744 /// Assume, K > 0. 745 static const SCEV *BinomialCoefficient(const SCEV *It, unsigned K, 746 ScalarEvolution &SE, 747 const Type* ResultTy) { 748 // Handle the simplest case efficiently. 749 if (K == 1) 750 return SE.getTruncateOrZeroExtend(It, ResultTy); 751 752 // We are using the following formula for BC(It, K): 753 // 754 // BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / K! 755 // 756 // Suppose, W is the bitwidth of the return value. We must be prepared for 757 // overflow. Hence, we must assure that the result of our computation is 758 // equal to the accurate one modulo 2^W. Unfortunately, division isn't 759 // safe in modular arithmetic. 760 // 761 // However, this code doesn't use exactly that formula; the formula it uses 762 // is something like the following, where T is the number of factors of 2 in 763 // K! (i.e. trailing zeros in the binary representation of K!), and ^ is 764 // exponentiation: 765 // 766 // BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / 2^T / (K! / 2^T) 767 // 768 // This formula is trivially equivalent to the previous formula. However, 769 // this formula can be implemented much more efficiently. The trick is that 770 // K! / 2^T is odd, and exact division by an odd number *is* safe in modular 771 // arithmetic. To do exact division in modular arithmetic, all we have 772 // to do is multiply by the inverse. Therefore, this step can be done at 773 // width W. 774 // 775 // The next issue is how to safely do the division by 2^T. The way this 776 // is done is by doing the multiplication step at a width of at least W + T 777 // bits. This way, the bottom W+T bits of the product are accurate. Then, 778 // when we perform the division by 2^T (which is equivalent to a right shift 779 // by T), the bottom W bits are accurate. Extra bits are okay; they'll get 780 // truncated out after the division by 2^T. 781 // 782 // In comparison to just directly using the first formula, this technique 783 // is much more efficient; using the first formula requires W * K bits, 784 // but this formula less than W + K bits. Also, the first formula requires 785 // a division step, whereas this formula only requires multiplies and shifts. 786 // 787 // It doesn't matter whether the subtraction step is done in the calculation 788 // width or the input iteration count's width; if the subtraction overflows, 789 // the result must be zero anyway. We prefer here to do it in the width of 790 // the induction variable because it helps a lot for certain cases; CodeGen 791 // isn't smart enough to ignore the overflow, which leads to much less 792 // efficient code if the width of the subtraction is wider than the native 793 // register width. 794 // 795 // (It's possible to not widen at all by pulling out factors of 2 before 796 // the multiplication; for example, K=2 can be calculated as 797 // It/2*(It+(It*INT_MIN/INT_MIN)+-1). However, it requires 798 // extra arithmetic, so it's not an obvious win, and it gets 799 // much more complicated for K > 3.) 800 801 // Protection from insane SCEVs; this bound is conservative, 802 // but it probably doesn't matter. 803 if (K > 1000) 804 return SE.getCouldNotCompute(); 805 806 unsigned W = SE.getTypeSizeInBits(ResultTy); 807 808 // Calculate K! / 2^T and T; we divide out the factors of two before 809 // multiplying for calculating K! / 2^T to avoid overflow. 810 // Other overflow doesn't matter because we only care about the bottom 811 // W bits of the result. 812 APInt OddFactorial(W, 1); 813 unsigned T = 1; 814 for (unsigned i = 3; i <= K; ++i) { 815 APInt Mult(W, i); 816 unsigned TwoFactors = Mult.countTrailingZeros(); 817 T += TwoFactors; 818 Mult = Mult.lshr(TwoFactors); 819 OddFactorial *= Mult; 820 } 821 822 // We need at least W + T bits for the multiplication step 823 unsigned CalculationBits = W + T; 824 825 // Calculate 2^T, at width T+W. 826 APInt DivFactor = APInt(CalculationBits, 1).shl(T); 827 828 // Calculate the multiplicative inverse of K! / 2^T; 829 // this multiplication factor will perform the exact division by 830 // K! / 2^T. 831 APInt Mod = APInt::getSignedMinValue(W+1); 832 APInt MultiplyFactor = OddFactorial.zext(W+1); 833 MultiplyFactor = MultiplyFactor.multiplicativeInverse(Mod); 834 MultiplyFactor = MultiplyFactor.trunc(W); 835 836 // Calculate the product, at width T+W 837 const IntegerType *CalculationTy = IntegerType::get(SE.getContext(), 838 CalculationBits); 839 const SCEV *Dividend = SE.getTruncateOrZeroExtend(It, CalculationTy); 840 for (unsigned i = 1; i != K; ++i) { 841 const SCEV *S = SE.getMinusSCEV(It, SE.getConstant(It->getType(), i)); 842 Dividend = SE.getMulExpr(Dividend, 843 SE.getTruncateOrZeroExtend(S, CalculationTy)); 844 } 845 846 // Divide by 2^T 847 const SCEV *DivResult = SE.getUDivExpr(Dividend, SE.getConstant(DivFactor)); 848 849 // Truncate the result, and divide by K! / 2^T. 850 851 return SE.getMulExpr(SE.getConstant(MultiplyFactor), 852 SE.getTruncateOrZeroExtend(DivResult, ResultTy)); 853 } 854 855 /// evaluateAtIteration - Return the value of this chain of recurrences at 856 /// the specified iteration number. We can evaluate this recurrence by 857 /// multiplying each element in the chain by the binomial coefficient 858 /// corresponding to it. In other words, we can evaluate {A,+,B,+,C,+,D} as: 859 /// 860 /// A*BC(It, 0) + B*BC(It, 1) + C*BC(It, 2) + D*BC(It, 3) 861 /// 862 /// where BC(It, k) stands for binomial coefficient. 863 /// 864 const SCEV *SCEVAddRecExpr::evaluateAtIteration(const SCEV *It, 865 ScalarEvolution &SE) const { 866 const SCEV *Result = getStart(); 867 for (unsigned i = 1, e = getNumOperands(); i != e; ++i) { 868 // The computation is correct in the face of overflow provided that the 869 // multiplication is performed _after_ the evaluation of the binomial 870 // coefficient. 871 const SCEV *Coeff = BinomialCoefficient(It, i, SE, getType()); 872 if (isa<SCEVCouldNotCompute>(Coeff)) 873 return Coeff; 874 875 Result = SE.getAddExpr(Result, SE.getMulExpr(getOperand(i), Coeff)); 876 } 877 return Result; 878 } 879 880 //===----------------------------------------------------------------------===// 881 // SCEV Expression folder implementations 882 //===----------------------------------------------------------------------===// 883 884 const SCEV *ScalarEvolution::getTruncateExpr(const SCEV *Op, 885 const Type *Ty) { 886 assert(getTypeSizeInBits(Op->getType()) > getTypeSizeInBits(Ty) && 887 "This is not a truncating conversion!"); 888 assert(isSCEVable(Ty) && 889 "This is not a conversion to a SCEVable type!"); 890 Ty = getEffectiveSCEVType(Ty); 891 892 FoldingSetNodeID ID; 893 ID.AddInteger(scTruncate); 894 ID.AddPointer(Op); 895 ID.AddPointer(Ty); 896 void *IP = 0; 897 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 898 899 // Fold if the operand is constant. 900 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) 901 return getConstant( 902 cast<ConstantInt>(ConstantExpr::getTrunc(SC->getValue(), 903 getEffectiveSCEVType(Ty)))); 904 905 // trunc(trunc(x)) --> trunc(x) 906 if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) 907 return getTruncateExpr(ST->getOperand(), Ty); 908 909 // trunc(sext(x)) --> sext(x) if widening or trunc(x) if narrowing 910 if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op)) 911 return getTruncateOrSignExtend(SS->getOperand(), Ty); 912 913 // trunc(zext(x)) --> zext(x) if widening or trunc(x) if narrowing 914 if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op)) 915 return getTruncateOrZeroExtend(SZ->getOperand(), Ty); 916 917 // If the input value is a chrec scev, truncate the chrec's operands. 918 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(Op)) { 919 SmallVector<const SCEV *, 4> Operands; 920 for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) 921 Operands.push_back(getTruncateExpr(AddRec->getOperand(i), Ty)); 922 return getAddRecExpr(Operands, AddRec->getLoop()); 923 } 924 925 // As a special case, fold trunc(undef) to undef. We don't want to 926 // know too much about SCEVUnknowns, but this special case is handy 927 // and harmless. 928 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(Op)) 929 if (isa<UndefValue>(U->getValue())) 930 return getSCEV(UndefValue::get(Ty)); 931 932 // The cast wasn't folded; create an explicit cast node. We can reuse 933 // the existing insert position since if we get here, we won't have 934 // made any changes which would invalidate it. 935 SCEV *S = new (SCEVAllocator) SCEVTruncateExpr(ID.Intern(SCEVAllocator), 936 Op, Ty); 937 UniqueSCEVs.InsertNode(S, IP); 938 return S; 939 } 940 941 const SCEV *ScalarEvolution::getZeroExtendExpr(const SCEV *Op, 942 const Type *Ty) { 943 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && 944 "This is not an extending conversion!"); 945 assert(isSCEVable(Ty) && 946 "This is not a conversion to a SCEVable type!"); 947 Ty = getEffectiveSCEVType(Ty); 948 949 // Fold if the operand is constant. 950 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) 951 return getConstant( 952 cast<ConstantInt>(ConstantExpr::getZExt(SC->getValue(), 953 getEffectiveSCEVType(Ty)))); 954 955 // zext(zext(x)) --> zext(x) 956 if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op)) 957 return getZeroExtendExpr(SZ->getOperand(), Ty); 958 959 // Before doing any expensive analysis, check to see if we've already 960 // computed a SCEV for this Op and Ty. 961 FoldingSetNodeID ID; 962 ID.AddInteger(scZeroExtend); 963 ID.AddPointer(Op); 964 ID.AddPointer(Ty); 965 void *IP = 0; 966 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 967 968 // If the input value is a chrec scev, and we can prove that the value 969 // did not overflow the old, smaller, value, we can zero extend all of the 970 // operands (often constants). This allows analysis of something like 971 // this: for (unsigned char X = 0; X < 100; ++X) { int Y = X; } 972 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) 973 if (AR->isAffine()) { 974 const SCEV *Start = AR->getStart(); 975 const SCEV *Step = AR->getStepRecurrence(*this); 976 unsigned BitWidth = getTypeSizeInBits(AR->getType()); 977 const Loop *L = AR->getLoop(); 978 979 // If we have special knowledge that this addrec won't overflow, 980 // we don't need to do any further analysis. 981 if (AR->hasNoUnsignedWrap()) 982 return getAddRecExpr(getZeroExtendExpr(Start, Ty), 983 getZeroExtendExpr(Step, Ty), 984 L); 985 986 // Check whether the backedge-taken count is SCEVCouldNotCompute. 987 // Note that this serves two purposes: It filters out loops that are 988 // simply not analyzable, and it covers the case where this code is 989 // being called from within backedge-taken count analysis, such that 990 // attempting to ask for the backedge-taken count would likely result 991 // in infinite recursion. In the later case, the analysis code will 992 // cope with a conservative value, and it will take care to purge 993 // that value once it has finished. 994 const SCEV *MaxBECount = getMaxBackedgeTakenCount(L); 995 if (!isa<SCEVCouldNotCompute>(MaxBECount)) { 996 // Manually compute the final value for AR, checking for 997 // overflow. 998 999 // Check whether the backedge-taken count can be losslessly casted to 1000 // the addrec's type. The count is always unsigned. 1001 const SCEV *CastedMaxBECount = 1002 getTruncateOrZeroExtend(MaxBECount, Start->getType()); 1003 const SCEV *RecastedMaxBECount = 1004 getTruncateOrZeroExtend(CastedMaxBECount, MaxBECount->getType()); 1005 if (MaxBECount == RecastedMaxBECount) { 1006 const Type *WideTy = IntegerType::get(getContext(), BitWidth * 2); 1007 // Check whether Start+Step*MaxBECount has no unsigned overflow. 1008 const SCEV *ZMul = getMulExpr(CastedMaxBECount, Step); 1009 const SCEV *Add = getAddExpr(Start, ZMul); 1010 const SCEV *OperandExtendedAdd = 1011 getAddExpr(getZeroExtendExpr(Start, WideTy), 1012 getMulExpr(getZeroExtendExpr(CastedMaxBECount, WideTy), 1013 getZeroExtendExpr(Step, WideTy))); 1014 if (getZeroExtendExpr(Add, WideTy) == OperandExtendedAdd) 1015 // Return the expression with the addrec on the outside. 1016 return getAddRecExpr(getZeroExtendExpr(Start, Ty), 1017 getZeroExtendExpr(Step, Ty), 1018 L); 1019 1020 // Similar to above, only this time treat the step value as signed. 1021 // This covers loops that count down. 1022 const SCEV *SMul = getMulExpr(CastedMaxBECount, Step); 1023 Add = getAddExpr(Start, SMul); 1024 OperandExtendedAdd = 1025 getAddExpr(getZeroExtendExpr(Start, WideTy), 1026 getMulExpr(getZeroExtendExpr(CastedMaxBECount, WideTy), 1027 getSignExtendExpr(Step, WideTy))); 1028 if (getZeroExtendExpr(Add, WideTy) == OperandExtendedAdd) 1029 // Return the expression with the addrec on the outside. 1030 return getAddRecExpr(getZeroExtendExpr(Start, Ty), 1031 getSignExtendExpr(Step, Ty), 1032 L); 1033 } 1034 1035 // If the backedge is guarded by a comparison with the pre-inc value 1036 // the addrec is safe. Also, if the entry is guarded by a comparison 1037 // with the start value and the backedge is guarded by a comparison 1038 // with the post-inc value, the addrec is safe. 1039 if (isKnownPositive(Step)) { 1040 const SCEV *N = getConstant(APInt::getMinValue(BitWidth) - 1041 getUnsignedRange(Step).getUnsignedMax()); 1042 if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_ULT, AR, N) || 1043 (isLoopEntryGuardedByCond(L, ICmpInst::ICMP_ULT, Start, N) && 1044 isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_ULT, 1045 AR->getPostIncExpr(*this), N))) 1046 // Return the expression with the addrec on the outside. 1047 return getAddRecExpr(getZeroExtendExpr(Start, Ty), 1048 getZeroExtendExpr(Step, Ty), 1049 L); 1050 } else if (isKnownNegative(Step)) { 1051 const SCEV *N = getConstant(APInt::getMaxValue(BitWidth) - 1052 getSignedRange(Step).getSignedMin()); 1053 if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_UGT, AR, N) || 1054 (isLoopEntryGuardedByCond(L, ICmpInst::ICMP_UGT, Start, N) && 1055 isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_UGT, 1056 AR->getPostIncExpr(*this), N))) 1057 // Return the expression with the addrec on the outside. 1058 return getAddRecExpr(getZeroExtendExpr(Start, Ty), 1059 getSignExtendExpr(Step, Ty), 1060 L); 1061 } 1062 } 1063 } 1064 1065 // The cast wasn't folded; create an explicit cast node. 1066 // Recompute the insert position, as it may have been invalidated. 1067 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 1068 SCEV *S = new (SCEVAllocator) SCEVZeroExtendExpr(ID.Intern(SCEVAllocator), 1069 Op, Ty); 1070 UniqueSCEVs.InsertNode(S, IP); 1071 return S; 1072 } 1073 1074 const SCEV *ScalarEvolution::getSignExtendExpr(const SCEV *Op, 1075 const Type *Ty) { 1076 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && 1077 "This is not an extending conversion!"); 1078 assert(isSCEVable(Ty) && 1079 "This is not a conversion to a SCEVable type!"); 1080 Ty = getEffectiveSCEVType(Ty); 1081 1082 // Fold if the operand is constant. 1083 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) 1084 return getConstant( 1085 cast<ConstantInt>(ConstantExpr::getSExt(SC->getValue(), 1086 getEffectiveSCEVType(Ty)))); 1087 1088 // sext(sext(x)) --> sext(x) 1089 if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op)) 1090 return getSignExtendExpr(SS->getOperand(), Ty); 1091 1092 // Before doing any expensive analysis, check to see if we've already 1093 // computed a SCEV for this Op and Ty. 1094 FoldingSetNodeID ID; 1095 ID.AddInteger(scSignExtend); 1096 ID.AddPointer(Op); 1097 ID.AddPointer(Ty); 1098 void *IP = 0; 1099 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 1100 1101 // If the input value is a chrec scev, and we can prove that the value 1102 // did not overflow the old, smaller, value, we can sign extend all of the 1103 // operands (often constants). This allows analysis of something like 1104 // this: for (signed char X = 0; X < 100; ++X) { int Y = X; } 1105 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) 1106 if (AR->isAffine()) { 1107 const SCEV *Start = AR->getStart(); 1108 const SCEV *Step = AR->getStepRecurrence(*this); 1109 unsigned BitWidth = getTypeSizeInBits(AR->getType()); 1110 const Loop *L = AR->getLoop(); 1111 1112 // If we have special knowledge that this addrec won't overflow, 1113 // we don't need to do any further analysis. 1114 if (AR->hasNoSignedWrap()) 1115 return getAddRecExpr(getSignExtendExpr(Start, Ty), 1116 getSignExtendExpr(Step, Ty), 1117 L); 1118 1119 // Check whether the backedge-taken count is SCEVCouldNotCompute. 1120 // Note that this serves two purposes: It filters out loops that are 1121 // simply not analyzable, and it covers the case where this code is 1122 // being called from within backedge-taken count analysis, such that 1123 // attempting to ask for the backedge-taken count would likely result 1124 // in infinite recursion. In the later case, the analysis code will 1125 // cope with a conservative value, and it will take care to purge 1126 // that value once it has finished. 1127 const SCEV *MaxBECount = getMaxBackedgeTakenCount(L); 1128 if (!isa<SCEVCouldNotCompute>(MaxBECount)) { 1129 // Manually compute the final value for AR, checking for 1130 // overflow. 1131 1132 // Check whether the backedge-taken count can be losslessly casted to 1133 // the addrec's type. The count is always unsigned. 1134 const SCEV *CastedMaxBECount = 1135 getTruncateOrZeroExtend(MaxBECount, Start->getType()); 1136 const SCEV *RecastedMaxBECount = 1137 getTruncateOrZeroExtend(CastedMaxBECount, MaxBECount->getType()); 1138 if (MaxBECount == RecastedMaxBECount) { 1139 const Type *WideTy = IntegerType::get(getContext(), BitWidth * 2); 1140 // Check whether Start+Step*MaxBECount has no signed overflow. 1141 const SCEV *SMul = getMulExpr(CastedMaxBECount, Step); 1142 const SCEV *Add = getAddExpr(Start, SMul); 1143 const SCEV *OperandExtendedAdd = 1144 getAddExpr(getSignExtendExpr(Start, WideTy), 1145 getMulExpr(getZeroExtendExpr(CastedMaxBECount, WideTy), 1146 getSignExtendExpr(Step, WideTy))); 1147 if (getSignExtendExpr(Add, WideTy) == OperandExtendedAdd) 1148 // Return the expression with the addrec on the outside. 1149 return getAddRecExpr(getSignExtendExpr(Start, Ty), 1150 getSignExtendExpr(Step, Ty), 1151 L); 1152 1153 // Similar to above, only this time treat the step value as unsigned. 1154 // This covers loops that count up with an unsigned step. 1155 const SCEV *UMul = getMulExpr(CastedMaxBECount, Step); 1156 Add = getAddExpr(Start, UMul); 1157 OperandExtendedAdd = 1158 getAddExpr(getSignExtendExpr(Start, WideTy), 1159 getMulExpr(getZeroExtendExpr(CastedMaxBECount, WideTy), 1160 getZeroExtendExpr(Step, WideTy))); 1161 if (getSignExtendExpr(Add, WideTy) == OperandExtendedAdd) 1162 // Return the expression with the addrec on the outside. 1163 return getAddRecExpr(getSignExtendExpr(Start, Ty), 1164 getZeroExtendExpr(Step, Ty), 1165 L); 1166 } 1167 1168 // If the backedge is guarded by a comparison with the pre-inc value 1169 // the addrec is safe. Also, if the entry is guarded by a comparison 1170 // with the start value and the backedge is guarded by a comparison 1171 // with the post-inc value, the addrec is safe. 1172 if (isKnownPositive(Step)) { 1173 const SCEV *N = getConstant(APInt::getSignedMinValue(BitWidth) - 1174 getSignedRange(Step).getSignedMax()); 1175 if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_SLT, AR, N) || 1176 (isLoopEntryGuardedByCond(L, ICmpInst::ICMP_SLT, Start, N) && 1177 isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_SLT, 1178 AR->getPostIncExpr(*this), N))) 1179 // Return the expression with the addrec on the outside. 1180 return getAddRecExpr(getSignExtendExpr(Start, Ty), 1181 getSignExtendExpr(Step, Ty), 1182 L); 1183 } else if (isKnownNegative(Step)) { 1184 const SCEV *N = getConstant(APInt::getSignedMaxValue(BitWidth) - 1185 getSignedRange(Step).getSignedMin()); 1186 if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_SGT, AR, N) || 1187 (isLoopEntryGuardedByCond(L, ICmpInst::ICMP_SGT, Start, N) && 1188 isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_SGT, 1189 AR->getPostIncExpr(*this), N))) 1190 // Return the expression with the addrec on the outside. 1191 return getAddRecExpr(getSignExtendExpr(Start, Ty), 1192 getSignExtendExpr(Step, Ty), 1193 L); 1194 } 1195 } 1196 } 1197 1198 // The cast wasn't folded; create an explicit cast node. 1199 // Recompute the insert position, as it may have been invalidated. 1200 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 1201 SCEV *S = new (SCEVAllocator) SCEVSignExtendExpr(ID.Intern(SCEVAllocator), 1202 Op, Ty); 1203 UniqueSCEVs.InsertNode(S, IP); 1204 return S; 1205 } 1206 1207 /// getAnyExtendExpr - Return a SCEV for the given operand extended with 1208 /// unspecified bits out to the given type. 1209 /// 1210 const SCEV *ScalarEvolution::getAnyExtendExpr(const SCEV *Op, 1211 const Type *Ty) { 1212 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && 1213 "This is not an extending conversion!"); 1214 assert(isSCEVable(Ty) && 1215 "This is not a conversion to a SCEVable type!"); 1216 Ty = getEffectiveSCEVType(Ty); 1217 1218 // Sign-extend negative constants. 1219 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) 1220 if (SC->getValue()->getValue().isNegative()) 1221 return getSignExtendExpr(Op, Ty); 1222 1223 // Peel off a truncate cast. 1224 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Op)) { 1225 const SCEV *NewOp = T->getOperand(); 1226 if (getTypeSizeInBits(NewOp->getType()) < getTypeSizeInBits(Ty)) 1227 return getAnyExtendExpr(NewOp, Ty); 1228 return getTruncateOrNoop(NewOp, Ty); 1229 } 1230 1231 // Next try a zext cast. If the cast is folded, use it. 1232 const SCEV *ZExt = getZeroExtendExpr(Op, Ty); 1233 if (!isa<SCEVZeroExtendExpr>(ZExt)) 1234 return ZExt; 1235 1236 // Next try a sext cast. If the cast is folded, use it. 1237 const SCEV *SExt = getSignExtendExpr(Op, Ty); 1238 if (!isa<SCEVSignExtendExpr>(SExt)) 1239 return SExt; 1240 1241 // Force the cast to be folded into the operands of an addrec. 1242 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) { 1243 SmallVector<const SCEV *, 4> Ops; 1244 for (SCEVAddRecExpr::op_iterator I = AR->op_begin(), E = AR->op_end(); 1245 I != E; ++I) 1246 Ops.push_back(getAnyExtendExpr(*I, Ty)); 1247 return getAddRecExpr(Ops, AR->getLoop()); 1248 } 1249 1250 // As a special case, fold anyext(undef) to undef. We don't want to 1251 // know too much about SCEVUnknowns, but this special case is handy 1252 // and harmless. 1253 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(Op)) 1254 if (isa<UndefValue>(U->getValue())) 1255 return getSCEV(UndefValue::get(Ty)); 1256 1257 // If the expression is obviously signed, use the sext cast value. 1258 if (isa<SCEVSMaxExpr>(Op)) 1259 return SExt; 1260 1261 // Absent any other information, use the zext cast value. 1262 return ZExt; 1263 } 1264 1265 /// CollectAddOperandsWithScales - Process the given Ops list, which is 1266 /// a list of operands to be added under the given scale, update the given 1267 /// map. This is a helper function for getAddRecExpr. As an example of 1268 /// what it does, given a sequence of operands that would form an add 1269 /// expression like this: 1270 /// 1271 /// m + n + 13 + (A * (o + p + (B * q + m + 29))) + r + (-1 * r) 1272 /// 1273 /// where A and B are constants, update the map with these values: 1274 /// 1275 /// (m, 1+A*B), (n, 1), (o, A), (p, A), (q, A*B), (r, 0) 1276 /// 1277 /// and add 13 + A*B*29 to AccumulatedConstant. 1278 /// This will allow getAddRecExpr to produce this: 1279 /// 1280 /// 13+A*B*29 + n + (m * (1+A*B)) + ((o + p) * A) + (q * A*B) 1281 /// 1282 /// This form often exposes folding opportunities that are hidden in 1283 /// the original operand list. 1284 /// 1285 /// Return true iff it appears that any interesting folding opportunities 1286 /// may be exposed. This helps getAddRecExpr short-circuit extra work in 1287 /// the common case where no interesting opportunities are present, and 1288 /// is also used as a check to avoid infinite recursion. 1289 /// 1290 static bool 1291 CollectAddOperandsWithScales(DenseMap<const SCEV *, APInt> &M, 1292 SmallVector<const SCEV *, 8> &NewOps, 1293 APInt &AccumulatedConstant, 1294 const SCEV *const *Ops, size_t NumOperands, 1295 const APInt &Scale, 1296 ScalarEvolution &SE) { 1297 bool Interesting = false; 1298 1299 // Iterate over the add operands. They are sorted, with constants first. 1300 unsigned i = 0; 1301 while (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) { 1302 ++i; 1303 // Pull a buried constant out to the outside. 1304 if (Scale != 1 || AccumulatedConstant != 0 || C->getValue()->isZero()) 1305 Interesting = true; 1306 AccumulatedConstant += Scale * C->getValue()->getValue(); 1307 } 1308 1309 // Next comes everything else. We're especially interested in multiplies 1310 // here, but they're in the middle, so just visit the rest with one loop. 1311 for (; i != NumOperands; ++i) { 1312 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[i]); 1313 if (Mul && isa<SCEVConstant>(Mul->getOperand(0))) { 1314 APInt NewScale = 1315 Scale * cast<SCEVConstant>(Mul->getOperand(0))->getValue()->getValue(); 1316 if (Mul->getNumOperands() == 2 && isa<SCEVAddExpr>(Mul->getOperand(1))) { 1317 // A multiplication of a constant with another add; recurse. 1318 const SCEVAddExpr *Add = cast<SCEVAddExpr>(Mul->getOperand(1)); 1319 Interesting |= 1320 CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant, 1321 Add->op_begin(), Add->getNumOperands(), 1322 NewScale, SE); 1323 } else { 1324 // A multiplication of a constant with some other value. Update 1325 // the map. 1326 SmallVector<const SCEV *, 4> MulOps(Mul->op_begin()+1, Mul->op_end()); 1327 const SCEV *Key = SE.getMulExpr(MulOps); 1328 std::pair<DenseMap<const SCEV *, APInt>::iterator, bool> Pair = 1329 M.insert(std::make_pair(Key, NewScale)); 1330 if (Pair.second) { 1331 NewOps.push_back(Pair.first->first); 1332 } else { 1333 Pair.first->second += NewScale; 1334 // The map already had an entry for this value, which may indicate 1335 // a folding opportunity. 1336 Interesting = true; 1337 } 1338 } 1339 } else { 1340 // An ordinary operand. Update the map. 1341 std::pair<DenseMap<const SCEV *, APInt>::iterator, bool> Pair = 1342 M.insert(std::make_pair(Ops[i], Scale)); 1343 if (Pair.second) { 1344 NewOps.push_back(Pair.first->first); 1345 } else { 1346 Pair.first->second += Scale; 1347 // The map already had an entry for this value, which may indicate 1348 // a folding opportunity. 1349 Interesting = true; 1350 } 1351 } 1352 } 1353 1354 return Interesting; 1355 } 1356 1357 namespace { 1358 struct APIntCompare { 1359 bool operator()(const APInt &LHS, const APInt &RHS) const { 1360 return LHS.ult(RHS); 1361 } 1362 }; 1363 } 1364 1365 /// getAddExpr - Get a canonical add expression, or something simpler if 1366 /// possible. 1367 const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops, 1368 bool HasNUW, bool HasNSW) { 1369 assert(!Ops.empty() && "Cannot get empty add!"); 1370 if (Ops.size() == 1) return Ops[0]; 1371 #ifndef NDEBUG 1372 const Type *ETy = getEffectiveSCEVType(Ops[0]->getType()); 1373 for (unsigned i = 1, e = Ops.size(); i != e; ++i) 1374 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy && 1375 "SCEVAddExpr operand types don't match!"); 1376 #endif 1377 1378 // If HasNSW is true and all the operands are non-negative, infer HasNUW. 1379 if (!HasNUW && HasNSW) { 1380 bool All = true; 1381 for (SmallVectorImpl<const SCEV *>::const_iterator I = Ops.begin(), 1382 E = Ops.end(); I != E; ++I) 1383 if (!isKnownNonNegative(*I)) { 1384 All = false; 1385 break; 1386 } 1387 if (All) HasNUW = true; 1388 } 1389 1390 // Sort by complexity, this groups all similar expression types together. 1391 GroupByComplexity(Ops, LI); 1392 1393 // If there are any constants, fold them together. 1394 unsigned Idx = 0; 1395 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { 1396 ++Idx; 1397 assert(Idx < Ops.size()); 1398 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { 1399 // We found two constants, fold them together! 1400 Ops[0] = getConstant(LHSC->getValue()->getValue() + 1401 RHSC->getValue()->getValue()); 1402 if (Ops.size() == 2) return Ops[0]; 1403 Ops.erase(Ops.begin()+1); // Erase the folded element 1404 LHSC = cast<SCEVConstant>(Ops[0]); 1405 } 1406 1407 // If we are left with a constant zero being added, strip it off. 1408 if (LHSC->getValue()->isZero()) { 1409 Ops.erase(Ops.begin()); 1410 --Idx; 1411 } 1412 1413 if (Ops.size() == 1) return Ops[0]; 1414 } 1415 1416 // Okay, check to see if the same value occurs in the operand list more than 1417 // once. If so, merge them together into an multiply expression. Since we 1418 // sorted the list, these values are required to be adjacent. 1419 const Type *Ty = Ops[0]->getType(); 1420 bool FoundMatch = false; 1421 for (unsigned i = 0, e = Ops.size(); i != e-1; ++i) 1422 if (Ops[i] == Ops[i+1]) { // X + Y + Y --> X + Y*2 1423 // Scan ahead to count how many equal operands there are. 1424 unsigned Count = 2; 1425 while (i+Count != e && Ops[i+Count] == Ops[i]) 1426 ++Count; 1427 // Merge the values into a multiply. 1428 const SCEV *Scale = getConstant(Ty, Count); 1429 const SCEV *Mul = getMulExpr(Scale, Ops[i]); 1430 if (Ops.size() == Count) 1431 return Mul; 1432 Ops[i] = Mul; 1433 Ops.erase(Ops.begin()+i+1, Ops.begin()+i+Count); 1434 --i; e -= Count - 1; 1435 FoundMatch = true; 1436 } 1437 if (FoundMatch) 1438 return getAddExpr(Ops, HasNUW, HasNSW); 1439 1440 // Check for truncates. If all the operands are truncated from the same 1441 // type, see if factoring out the truncate would permit the result to be 1442 // folded. eg., trunc(x) + m*trunc(n) --> trunc(x + trunc(m)*n) 1443 // if the contents of the resulting outer trunc fold to something simple. 1444 for (; Idx < Ops.size() && isa<SCEVTruncateExpr>(Ops[Idx]); ++Idx) { 1445 const SCEVTruncateExpr *Trunc = cast<SCEVTruncateExpr>(Ops[Idx]); 1446 const Type *DstType = Trunc->getType(); 1447 const Type *SrcType = Trunc->getOperand()->getType(); 1448 SmallVector<const SCEV *, 8> LargeOps; 1449 bool Ok = true; 1450 // Check all the operands to see if they can be represented in the 1451 // source type of the truncate. 1452 for (unsigned i = 0, e = Ops.size(); i != e; ++i) { 1453 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Ops[i])) { 1454 if (T->getOperand()->getType() != SrcType) { 1455 Ok = false; 1456 break; 1457 } 1458 LargeOps.push_back(T->getOperand()); 1459 } else if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) { 1460 LargeOps.push_back(getAnyExtendExpr(C, SrcType)); 1461 } else if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(Ops[i])) { 1462 SmallVector<const SCEV *, 8> LargeMulOps; 1463 for (unsigned j = 0, f = M->getNumOperands(); j != f && Ok; ++j) { 1464 if (const SCEVTruncateExpr *T = 1465 dyn_cast<SCEVTruncateExpr>(M->getOperand(j))) { 1466 if (T->getOperand()->getType() != SrcType) { 1467 Ok = false; 1468 break; 1469 } 1470 LargeMulOps.push_back(T->getOperand()); 1471 } else if (const SCEVConstant *C = 1472 dyn_cast<SCEVConstant>(M->getOperand(j))) { 1473 LargeMulOps.push_back(getAnyExtendExpr(C, SrcType)); 1474 } else { 1475 Ok = false; 1476 break; 1477 } 1478 } 1479 if (Ok) 1480 LargeOps.push_back(getMulExpr(LargeMulOps)); 1481 } else { 1482 Ok = false; 1483 break; 1484 } 1485 } 1486 if (Ok) { 1487 // Evaluate the expression in the larger type. 1488 const SCEV *Fold = getAddExpr(LargeOps, HasNUW, HasNSW); 1489 // If it folds to something simple, use it. Otherwise, don't. 1490 if (isa<SCEVConstant>(Fold) || isa<SCEVUnknown>(Fold)) 1491 return getTruncateExpr(Fold, DstType); 1492 } 1493 } 1494 1495 // Skip past any other cast SCEVs. 1496 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddExpr) 1497 ++Idx; 1498 1499 // If there are add operands they would be next. 1500 if (Idx < Ops.size()) { 1501 bool DeletedAdd = false; 1502 while (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[Idx])) { 1503 // If we have an add, expand the add operands onto the end of the operands 1504 // list. 1505 Ops.erase(Ops.begin()+Idx); 1506 Ops.append(Add->op_begin(), Add->op_end()); 1507 DeletedAdd = true; 1508 } 1509 1510 // If we deleted at least one add, we added operands to the end of the list, 1511 // and they are not necessarily sorted. Recurse to resort and resimplify 1512 // any operands we just acquired. 1513 if (DeletedAdd) 1514 return getAddExpr(Ops); 1515 } 1516 1517 // Skip over the add expression until we get to a multiply. 1518 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr) 1519 ++Idx; 1520 1521 // Check to see if there are any folding opportunities present with 1522 // operands multiplied by constant values. 1523 if (Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx])) { 1524 uint64_t BitWidth = getTypeSizeInBits(Ty); 1525 DenseMap<const SCEV *, APInt> M; 1526 SmallVector<const SCEV *, 8> NewOps; 1527 APInt AccumulatedConstant(BitWidth, 0); 1528 if (CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant, 1529 Ops.data(), Ops.size(), 1530 APInt(BitWidth, 1), *this)) { 1531 // Some interesting folding opportunity is present, so its worthwhile to 1532 // re-generate the operands list. Group the operands by constant scale, 1533 // to avoid multiplying by the same constant scale multiple times. 1534 std::map<APInt, SmallVector<const SCEV *, 4>, APIntCompare> MulOpLists; 1535 for (SmallVector<const SCEV *, 8>::const_iterator I = NewOps.begin(), 1536 E = NewOps.end(); I != E; ++I) 1537 MulOpLists[M.find(*I)->second].push_back(*I); 1538 // Re-generate the operands list. 1539 Ops.clear(); 1540 if (AccumulatedConstant != 0) 1541 Ops.push_back(getConstant(AccumulatedConstant)); 1542 for (std::map<APInt, SmallVector<const SCEV *, 4>, APIntCompare>::iterator 1543 I = MulOpLists.begin(), E = MulOpLists.end(); I != E; ++I) 1544 if (I->first != 0) 1545 Ops.push_back(getMulExpr(getConstant(I->first), 1546 getAddExpr(I->second))); 1547 if (Ops.empty()) 1548 return getConstant(Ty, 0); 1549 if (Ops.size() == 1) 1550 return Ops[0]; 1551 return getAddExpr(Ops); 1552 } 1553 } 1554 1555 // If we are adding something to a multiply expression, make sure the 1556 // something is not already an operand of the multiply. If so, merge it into 1557 // the multiply. 1558 for (; Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx]); ++Idx) { 1559 const SCEVMulExpr *Mul = cast<SCEVMulExpr>(Ops[Idx]); 1560 for (unsigned MulOp = 0, e = Mul->getNumOperands(); MulOp != e; ++MulOp) { 1561 const SCEV *MulOpSCEV = Mul->getOperand(MulOp); 1562 if (isa<SCEVConstant>(MulOpSCEV)) 1563 continue; 1564 for (unsigned AddOp = 0, e = Ops.size(); AddOp != e; ++AddOp) 1565 if (MulOpSCEV == Ops[AddOp]) { 1566 // Fold W + X + (X * Y * Z) --> W + (X * ((Y*Z)+1)) 1567 const SCEV *InnerMul = Mul->getOperand(MulOp == 0); 1568 if (Mul->getNumOperands() != 2) { 1569 // If the multiply has more than two operands, we must get the 1570 // Y*Z term. 1571 SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(), 1572 Mul->op_begin()+MulOp); 1573 MulOps.append(Mul->op_begin()+MulOp+1, Mul->op_end()); 1574 InnerMul = getMulExpr(MulOps); 1575 } 1576 const SCEV *One = getConstant(Ty, 1); 1577 const SCEV *AddOne = getAddExpr(One, InnerMul); 1578 const SCEV *OuterMul = getMulExpr(AddOne, MulOpSCEV); 1579 if (Ops.size() == 2) return OuterMul; 1580 if (AddOp < Idx) { 1581 Ops.erase(Ops.begin()+AddOp); 1582 Ops.erase(Ops.begin()+Idx-1); 1583 } else { 1584 Ops.erase(Ops.begin()+Idx); 1585 Ops.erase(Ops.begin()+AddOp-1); 1586 } 1587 Ops.push_back(OuterMul); 1588 return getAddExpr(Ops); 1589 } 1590 1591 // Check this multiply against other multiplies being added together. 1592 for (unsigned OtherMulIdx = Idx+1; 1593 OtherMulIdx < Ops.size() && isa<SCEVMulExpr>(Ops[OtherMulIdx]); 1594 ++OtherMulIdx) { 1595 const SCEVMulExpr *OtherMul = cast<SCEVMulExpr>(Ops[OtherMulIdx]); 1596 // If MulOp occurs in OtherMul, we can fold the two multiplies 1597 // together. 1598 for (unsigned OMulOp = 0, e = OtherMul->getNumOperands(); 1599 OMulOp != e; ++OMulOp) 1600 if (OtherMul->getOperand(OMulOp) == MulOpSCEV) { 1601 // Fold X + (A*B*C) + (A*D*E) --> X + (A*(B*C+D*E)) 1602 const SCEV *InnerMul1 = Mul->getOperand(MulOp == 0); 1603 if (Mul->getNumOperands() != 2) { 1604 SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(), 1605 Mul->op_begin()+MulOp); 1606 MulOps.append(Mul->op_begin()+MulOp+1, Mul->op_end()); 1607 InnerMul1 = getMulExpr(MulOps); 1608 } 1609 const SCEV *InnerMul2 = OtherMul->getOperand(OMulOp == 0); 1610 if (OtherMul->getNumOperands() != 2) { 1611 SmallVector<const SCEV *, 4> MulOps(OtherMul->op_begin(), 1612 OtherMul->op_begin()+OMulOp); 1613 MulOps.append(OtherMul->op_begin()+OMulOp+1, OtherMul->op_end()); 1614 InnerMul2 = getMulExpr(MulOps); 1615 } 1616 const SCEV *InnerMulSum = getAddExpr(InnerMul1,InnerMul2); 1617 const SCEV *OuterMul = getMulExpr(MulOpSCEV, InnerMulSum); 1618 if (Ops.size() == 2) return OuterMul; 1619 Ops.erase(Ops.begin()+Idx); 1620 Ops.erase(Ops.begin()+OtherMulIdx-1); 1621 Ops.push_back(OuterMul); 1622 return getAddExpr(Ops); 1623 } 1624 } 1625 } 1626 } 1627 1628 // If there are any add recurrences in the operands list, see if any other 1629 // added values are loop invariant. If so, we can fold them into the 1630 // recurrence. 1631 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr) 1632 ++Idx; 1633 1634 // Scan over all recurrences, trying to fold loop invariants into them. 1635 for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) { 1636 // Scan all of the other operands to this add and add them to the vector if 1637 // they are loop invariant w.r.t. the recurrence. 1638 SmallVector<const SCEV *, 8> LIOps; 1639 const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]); 1640 const Loop *AddRecLoop = AddRec->getLoop(); 1641 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 1642 if (Ops[i]->isLoopInvariant(AddRecLoop)) { 1643 LIOps.push_back(Ops[i]); 1644 Ops.erase(Ops.begin()+i); 1645 --i; --e; 1646 } 1647 1648 // If we found some loop invariants, fold them into the recurrence. 1649 if (!LIOps.empty()) { 1650 // NLI + LI + {Start,+,Step} --> NLI + {LI+Start,+,Step} 1651 LIOps.push_back(AddRec->getStart()); 1652 1653 SmallVector<const SCEV *, 4> AddRecOps(AddRec->op_begin(), 1654 AddRec->op_end()); 1655 AddRecOps[0] = getAddExpr(LIOps); 1656 1657 // Build the new addrec. Propagate the NUW and NSW flags if both the 1658 // outer add and the inner addrec are guaranteed to have no overflow. 1659 const SCEV *NewRec = getAddRecExpr(AddRecOps, AddRecLoop, 1660 HasNUW && AddRec->hasNoUnsignedWrap(), 1661 HasNSW && AddRec->hasNoSignedWrap()); 1662 1663 // If all of the other operands were loop invariant, we are done. 1664 if (Ops.size() == 1) return NewRec; 1665 1666 // Otherwise, add the folded AddRec by the non-liv parts. 1667 for (unsigned i = 0;; ++i) 1668 if (Ops[i] == AddRec) { 1669 Ops[i] = NewRec; 1670 break; 1671 } 1672 return getAddExpr(Ops); 1673 } 1674 1675 // Okay, if there weren't any loop invariants to be folded, check to see if 1676 // there are multiple AddRec's with the same loop induction variable being 1677 // added together. If so, we can fold them. 1678 for (unsigned OtherIdx = Idx+1; 1679 OtherIdx < Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]); 1680 ++OtherIdx) 1681 if (AddRecLoop == cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop()) { 1682 // Other + {A,+,B}<L> + {C,+,D}<L> --> Other + {A+C,+,B+D}<L> 1683 SmallVector<const SCEV *, 4> AddRecOps(AddRec->op_begin(), 1684 AddRec->op_end()); 1685 for (; OtherIdx != Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]); 1686 ++OtherIdx) 1687 if (const SCEVAddRecExpr *OtherAddRec = 1688 dyn_cast<SCEVAddRecExpr>(Ops[OtherIdx])) 1689 if (OtherAddRec->getLoop() == AddRecLoop) { 1690 for (unsigned i = 0, e = OtherAddRec->getNumOperands(); 1691 i != e; ++i) { 1692 if (i >= AddRecOps.size()) { 1693 AddRecOps.append(OtherAddRec->op_begin()+i, 1694 OtherAddRec->op_end()); 1695 break; 1696 } 1697 AddRecOps[i] = getAddExpr(AddRecOps[i], 1698 OtherAddRec->getOperand(i)); 1699 } 1700 Ops.erase(Ops.begin() + OtherIdx); --OtherIdx; 1701 } 1702 Ops[Idx] = getAddRecExpr(AddRecOps, AddRecLoop); 1703 return getAddExpr(Ops); 1704 } 1705 1706 // Otherwise couldn't fold anything into this recurrence. Move onto the 1707 // next one. 1708 } 1709 1710 // Okay, it looks like we really DO need an add expr. Check to see if we 1711 // already have one, otherwise create a new one. 1712 FoldingSetNodeID ID; 1713 ID.AddInteger(scAddExpr); 1714 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 1715 ID.AddPointer(Ops[i]); 1716 void *IP = 0; 1717 SCEVAddExpr *S = 1718 static_cast<SCEVAddExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); 1719 if (!S) { 1720 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); 1721 std::uninitialized_copy(Ops.begin(), Ops.end(), O); 1722 S = new (SCEVAllocator) SCEVAddExpr(ID.Intern(SCEVAllocator), 1723 O, Ops.size()); 1724 UniqueSCEVs.InsertNode(S, IP); 1725 } 1726 if (HasNUW) S->setHasNoUnsignedWrap(true); 1727 if (HasNSW) S->setHasNoSignedWrap(true); 1728 return S; 1729 } 1730 1731 /// getMulExpr - Get a canonical multiply expression, or something simpler if 1732 /// possible. 1733 const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops, 1734 bool HasNUW, bool HasNSW) { 1735 assert(!Ops.empty() && "Cannot get empty mul!"); 1736 if (Ops.size() == 1) return Ops[0]; 1737 #ifndef NDEBUG 1738 const Type *ETy = getEffectiveSCEVType(Ops[0]->getType()); 1739 for (unsigned i = 1, e = Ops.size(); i != e; ++i) 1740 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy && 1741 "SCEVMulExpr operand types don't match!"); 1742 #endif 1743 1744 // If HasNSW is true and all the operands are non-negative, infer HasNUW. 1745 if (!HasNUW && HasNSW) { 1746 bool All = true; 1747 for (SmallVectorImpl<const SCEV *>::const_iterator I = Ops.begin(), 1748 E = Ops.end(); I != E; ++I) 1749 if (!isKnownNonNegative(*I)) { 1750 All = false; 1751 break; 1752 } 1753 if (All) HasNUW = true; 1754 } 1755 1756 // Sort by complexity, this groups all similar expression types together. 1757 GroupByComplexity(Ops, LI); 1758 1759 // If there are any constants, fold them together. 1760 unsigned Idx = 0; 1761 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { 1762 1763 // C1*(C2+V) -> C1*C2 + C1*V 1764 if (Ops.size() == 2) 1765 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1])) 1766 if (Add->getNumOperands() == 2 && 1767 isa<SCEVConstant>(Add->getOperand(0))) 1768 return getAddExpr(getMulExpr(LHSC, Add->getOperand(0)), 1769 getMulExpr(LHSC, Add->getOperand(1))); 1770 1771 ++Idx; 1772 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { 1773 // We found two constants, fold them together! 1774 ConstantInt *Fold = ConstantInt::get(getContext(), 1775 LHSC->getValue()->getValue() * 1776 RHSC->getValue()->getValue()); 1777 Ops[0] = getConstant(Fold); 1778 Ops.erase(Ops.begin()+1); // Erase the folded element 1779 if (Ops.size() == 1) return Ops[0]; 1780 LHSC = cast<SCEVConstant>(Ops[0]); 1781 } 1782 1783 // If we are left with a constant one being multiplied, strip it off. 1784 if (cast<SCEVConstant>(Ops[0])->getValue()->equalsInt(1)) { 1785 Ops.erase(Ops.begin()); 1786 --Idx; 1787 } else if (cast<SCEVConstant>(Ops[0])->getValue()->isZero()) { 1788 // If we have a multiply of zero, it will always be zero. 1789 return Ops[0]; 1790 } else if (Ops[0]->isAllOnesValue()) { 1791 // If we have a mul by -1 of an add, try distributing the -1 among the 1792 // add operands. 1793 if (Ops.size() == 2) 1794 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1])) { 1795 SmallVector<const SCEV *, 4> NewOps; 1796 bool AnyFolded = false; 1797 for (SCEVAddRecExpr::op_iterator I = Add->op_begin(), E = Add->op_end(); 1798 I != E; ++I) { 1799 const SCEV *Mul = getMulExpr(Ops[0], *I); 1800 if (!isa<SCEVMulExpr>(Mul)) AnyFolded = true; 1801 NewOps.push_back(Mul); 1802 } 1803 if (AnyFolded) 1804 return getAddExpr(NewOps); 1805 } 1806 } 1807 1808 if (Ops.size() == 1) 1809 return Ops[0]; 1810 } 1811 1812 // Skip over the add expression until we get to a multiply. 1813 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr) 1814 ++Idx; 1815 1816 // If there are mul operands inline them all into this expression. 1817 if (Idx < Ops.size()) { 1818 bool DeletedMul = false; 1819 while (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[Idx])) { 1820 // If we have an mul, expand the mul operands onto the end of the operands 1821 // list. 1822 Ops.erase(Ops.begin()+Idx); 1823 Ops.append(Mul->op_begin(), Mul->op_end()); 1824 DeletedMul = true; 1825 } 1826 1827 // If we deleted at least one mul, we added operands to the end of the list, 1828 // and they are not necessarily sorted. Recurse to resort and resimplify 1829 // any operands we just acquired. 1830 if (DeletedMul) 1831 return getMulExpr(Ops); 1832 } 1833 1834 // If there are any add recurrences in the operands list, see if any other 1835 // added values are loop invariant. If so, we can fold them into the 1836 // recurrence. 1837 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr) 1838 ++Idx; 1839 1840 // Scan over all recurrences, trying to fold loop invariants into them. 1841 for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) { 1842 // Scan all of the other operands to this mul and add them to the vector if 1843 // they are loop invariant w.r.t. the recurrence. 1844 SmallVector<const SCEV *, 8> LIOps; 1845 const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]); 1846 const Loop *AddRecLoop = AddRec->getLoop(); 1847 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 1848 if (Ops[i]->isLoopInvariant(AddRecLoop)) { 1849 LIOps.push_back(Ops[i]); 1850 Ops.erase(Ops.begin()+i); 1851 --i; --e; 1852 } 1853 1854 // If we found some loop invariants, fold them into the recurrence. 1855 if (!LIOps.empty()) { 1856 // NLI * LI * {Start,+,Step} --> NLI * {LI*Start,+,LI*Step} 1857 SmallVector<const SCEV *, 4> NewOps; 1858 NewOps.reserve(AddRec->getNumOperands()); 1859 const SCEV *Scale = getMulExpr(LIOps); 1860 for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) 1861 NewOps.push_back(getMulExpr(Scale, AddRec->getOperand(i))); 1862 1863 // Build the new addrec. Propagate the NUW and NSW flags if both the 1864 // outer mul and the inner addrec are guaranteed to have no overflow. 1865 const SCEV *NewRec = getAddRecExpr(NewOps, AddRecLoop, 1866 HasNUW && AddRec->hasNoUnsignedWrap(), 1867 HasNSW && AddRec->hasNoSignedWrap()); 1868 1869 // If all of the other operands were loop invariant, we are done. 1870 if (Ops.size() == 1) return NewRec; 1871 1872 // Otherwise, multiply the folded AddRec by the non-liv parts. 1873 for (unsigned i = 0;; ++i) 1874 if (Ops[i] == AddRec) { 1875 Ops[i] = NewRec; 1876 break; 1877 } 1878 return getMulExpr(Ops); 1879 } 1880 1881 // Okay, if there weren't any loop invariants to be folded, check to see if 1882 // there are multiple AddRec's with the same loop induction variable being 1883 // multiplied together. If so, we can fold them. 1884 for (unsigned OtherIdx = Idx+1; 1885 OtherIdx < Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]); 1886 ++OtherIdx) 1887 if (AddRecLoop == cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop()) { 1888 // F * G, where F = {A,+,B}<L> and G = {C,+,D}<L> --> 1889 // {A*C,+,F*D + G*B + B*D}<L> 1890 for (; OtherIdx != Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]); 1891 ++OtherIdx) 1892 if (const SCEVAddRecExpr *OtherAddRec = 1893 dyn_cast<SCEVAddRecExpr>(Ops[OtherIdx])) 1894 if (OtherAddRec->getLoop() == AddRecLoop) { 1895 const SCEVAddRecExpr *F = AddRec, *G = OtherAddRec; 1896 const SCEV *NewStart = getMulExpr(F->getStart(), G->getStart()); 1897 const SCEV *B = F->getStepRecurrence(*this); 1898 const SCEV *D = G->getStepRecurrence(*this); 1899 const SCEV *NewStep = getAddExpr(getMulExpr(F, D), 1900 getMulExpr(G, B), 1901 getMulExpr(B, D)); 1902 const SCEV *NewAddRec = getAddRecExpr(NewStart, NewStep, 1903 F->getLoop()); 1904 if (Ops.size() == 2) return NewAddRec; 1905 Ops[Idx] = AddRec = cast<SCEVAddRecExpr>(NewAddRec); 1906 Ops.erase(Ops.begin() + OtherIdx); --OtherIdx; 1907 } 1908 return getMulExpr(Ops); 1909 } 1910 1911 // Otherwise couldn't fold anything into this recurrence. Move onto the 1912 // next one. 1913 } 1914 1915 // Okay, it looks like we really DO need an mul expr. Check to see if we 1916 // already have one, otherwise create a new one. 1917 FoldingSetNodeID ID; 1918 ID.AddInteger(scMulExpr); 1919 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 1920 ID.AddPointer(Ops[i]); 1921 void *IP = 0; 1922 SCEVMulExpr *S = 1923 static_cast<SCEVMulExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); 1924 if (!S) { 1925 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); 1926 std::uninitialized_copy(Ops.begin(), Ops.end(), O); 1927 S = new (SCEVAllocator) SCEVMulExpr(ID.Intern(SCEVAllocator), 1928 O, Ops.size()); 1929 UniqueSCEVs.InsertNode(S, IP); 1930 } 1931 if (HasNUW) S->setHasNoUnsignedWrap(true); 1932 if (HasNSW) S->setHasNoSignedWrap(true); 1933 return S; 1934 } 1935 1936 /// getUDivExpr - Get a canonical unsigned division expression, or something 1937 /// simpler if possible. 1938 const SCEV *ScalarEvolution::getUDivExpr(const SCEV *LHS, 1939 const SCEV *RHS) { 1940 assert(getEffectiveSCEVType(LHS->getType()) == 1941 getEffectiveSCEVType(RHS->getType()) && 1942 "SCEVUDivExpr operand types don't match!"); 1943 1944 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) { 1945 if (RHSC->getValue()->equalsInt(1)) 1946 return LHS; // X udiv 1 --> x 1947 // If the denominator is zero, the result of the udiv is undefined. Don't 1948 // try to analyze it, because the resolution chosen here may differ from 1949 // the resolution chosen in other parts of the compiler. 1950 if (!RHSC->getValue()->isZero()) { 1951 // Determine if the division can be folded into the operands of 1952 // its operands. 1953 // TODO: Generalize this to non-constants by using known-bits information. 1954 const Type *Ty = LHS->getType(); 1955 unsigned LZ = RHSC->getValue()->getValue().countLeadingZeros(); 1956 unsigned MaxShiftAmt = getTypeSizeInBits(Ty) - LZ - 1; 1957 // For non-power-of-two values, effectively round the value up to the 1958 // nearest power of two. 1959 if (!RHSC->getValue()->getValue().isPowerOf2()) 1960 ++MaxShiftAmt; 1961 const IntegerType *ExtTy = 1962 IntegerType::get(getContext(), getTypeSizeInBits(Ty) + MaxShiftAmt); 1963 // {X,+,N}/C --> {X/C,+,N/C} if safe and N/C can be folded. 1964 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(LHS)) 1965 if (const SCEVConstant *Step = 1966 dyn_cast<SCEVConstant>(AR->getStepRecurrence(*this))) 1967 if (!Step->getValue()->getValue() 1968 .urem(RHSC->getValue()->getValue()) && 1969 getZeroExtendExpr(AR, ExtTy) == 1970 getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy), 1971 getZeroExtendExpr(Step, ExtTy), 1972 AR->getLoop())) { 1973 SmallVector<const SCEV *, 4> Operands; 1974 for (unsigned i = 0, e = AR->getNumOperands(); i != e; ++i) 1975 Operands.push_back(getUDivExpr(AR->getOperand(i), RHS)); 1976 return getAddRecExpr(Operands, AR->getLoop()); 1977 } 1978 // (A*B)/C --> A*(B/C) if safe and B/C can be folded. 1979 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(LHS)) { 1980 SmallVector<const SCEV *, 4> Operands; 1981 for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) 1982 Operands.push_back(getZeroExtendExpr(M->getOperand(i), ExtTy)); 1983 if (getZeroExtendExpr(M, ExtTy) == getMulExpr(Operands)) 1984 // Find an operand that's safely divisible. 1985 for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) { 1986 const SCEV *Op = M->getOperand(i); 1987 const SCEV *Div = getUDivExpr(Op, RHSC); 1988 if (!isa<SCEVUDivExpr>(Div) && getMulExpr(Div, RHSC) == Op) { 1989 Operands = SmallVector<const SCEV *, 4>(M->op_begin(), 1990 M->op_end()); 1991 Operands[i] = Div; 1992 return getMulExpr(Operands); 1993 } 1994 } 1995 } 1996 // (A+B)/C --> (A/C + B/C) if safe and A/C and B/C can be folded. 1997 if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(LHS)) { 1998 SmallVector<const SCEV *, 4> Operands; 1999 for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i) 2000 Operands.push_back(getZeroExtendExpr(A->getOperand(i), ExtTy)); 2001 if (getZeroExtendExpr(A, ExtTy) == getAddExpr(Operands)) { 2002 Operands.clear(); 2003 for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i) { 2004 const SCEV *Op = getUDivExpr(A->getOperand(i), RHS); 2005 if (isa<SCEVUDivExpr>(Op) || 2006 getMulExpr(Op, RHS) != A->getOperand(i)) 2007 break; 2008 Operands.push_back(Op); 2009 } 2010 if (Operands.size() == A->getNumOperands()) 2011 return getAddExpr(Operands); 2012 } 2013 } 2014 2015 // Fold if both operands are constant. 2016 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) { 2017 Constant *LHSCV = LHSC->getValue(); 2018 Constant *RHSCV = RHSC->getValue(); 2019 return getConstant(cast<ConstantInt>(ConstantExpr::getUDiv(LHSCV, 2020 RHSCV))); 2021 } 2022 } 2023 } 2024 2025 FoldingSetNodeID ID; 2026 ID.AddInteger(scUDivExpr); 2027 ID.AddPointer(LHS); 2028 ID.AddPointer(RHS); 2029 void *IP = 0; 2030 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 2031 SCEV *S = new (SCEVAllocator) SCEVUDivExpr(ID.Intern(SCEVAllocator), 2032 LHS, RHS); 2033 UniqueSCEVs.InsertNode(S, IP); 2034 return S; 2035 } 2036 2037 2038 /// getAddRecExpr - Get an add recurrence expression for the specified loop. 2039 /// Simplify the expression as much as possible. 2040 const SCEV *ScalarEvolution::getAddRecExpr(const SCEV *Start, 2041 const SCEV *Step, const Loop *L, 2042 bool HasNUW, bool HasNSW) { 2043 SmallVector<const SCEV *, 4> Operands; 2044 Operands.push_back(Start); 2045 if (const SCEVAddRecExpr *StepChrec = dyn_cast<SCEVAddRecExpr>(Step)) 2046 if (StepChrec->getLoop() == L) { 2047 Operands.append(StepChrec->op_begin(), StepChrec->op_end()); 2048 return getAddRecExpr(Operands, L); 2049 } 2050 2051 Operands.push_back(Step); 2052 return getAddRecExpr(Operands, L, HasNUW, HasNSW); 2053 } 2054 2055 /// getAddRecExpr - Get an add recurrence expression for the specified loop. 2056 /// Simplify the expression as much as possible. 2057 const SCEV * 2058 ScalarEvolution::getAddRecExpr(SmallVectorImpl<const SCEV *> &Operands, 2059 const Loop *L, 2060 bool HasNUW, bool HasNSW) { 2061 if (Operands.size() == 1) return Operands[0]; 2062 #ifndef NDEBUG 2063 const Type *ETy = getEffectiveSCEVType(Operands[0]->getType()); 2064 for (unsigned i = 1, e = Operands.size(); i != e; ++i) 2065 assert(getEffectiveSCEVType(Operands[i]->getType()) == ETy && 2066 "SCEVAddRecExpr operand types don't match!"); 2067 #endif 2068 2069 if (Operands.back()->isZero()) { 2070 Operands.pop_back(); 2071 return getAddRecExpr(Operands, L, HasNUW, HasNSW); // {X,+,0} --> X 2072 } 2073 2074 // It's tempting to want to call getMaxBackedgeTakenCount count here and 2075 // use that information to infer NUW and NSW flags. However, computing a 2076 // BE count requires calling getAddRecExpr, so we may not yet have a 2077 // meaningful BE count at this point (and if we don't, we'd be stuck 2078 // with a SCEVCouldNotCompute as the cached BE count). 2079 2080 // If HasNSW is true and all the operands are non-negative, infer HasNUW. 2081 if (!HasNUW && HasNSW) { 2082 bool All = true; 2083 for (SmallVectorImpl<const SCEV *>::const_iterator I = Operands.begin(), 2084 E = Operands.end(); I != E; ++I) 2085 if (!isKnownNonNegative(*I)) { 2086 All = false; 2087 break; 2088 } 2089 if (All) HasNUW = true; 2090 } 2091 2092 // Canonicalize nested AddRecs in by nesting them in order of loop depth. 2093 if (const SCEVAddRecExpr *NestedAR = dyn_cast<SCEVAddRecExpr>(Operands[0])) { 2094 const Loop *NestedLoop = NestedAR->getLoop(); 2095 if (L->contains(NestedLoop) ? 2096 (L->getLoopDepth() < NestedLoop->getLoopDepth()) : 2097 (!NestedLoop->contains(L) && 2098 DT->dominates(L->getHeader(), NestedLoop->getHeader()))) { 2099 SmallVector<const SCEV *, 4> NestedOperands(NestedAR->op_begin(), 2100 NestedAR->op_end()); 2101 Operands[0] = NestedAR->getStart(); 2102 // AddRecs require their operands be loop-invariant with respect to their 2103 // loops. Don't perform this transformation if it would break this 2104 // requirement. 2105 bool AllInvariant = true; 2106 for (unsigned i = 0, e = Operands.size(); i != e; ++i) 2107 if (!Operands[i]->isLoopInvariant(L)) { 2108 AllInvariant = false; 2109 break; 2110 } 2111 if (AllInvariant) { 2112 NestedOperands[0] = getAddRecExpr(Operands, L); 2113 AllInvariant = true; 2114 for (unsigned i = 0, e = NestedOperands.size(); i != e; ++i) 2115 if (!NestedOperands[i]->isLoopInvariant(NestedLoop)) { 2116 AllInvariant = false; 2117 break; 2118 } 2119 if (AllInvariant) 2120 // Ok, both add recurrences are valid after the transformation. 2121 return getAddRecExpr(NestedOperands, NestedLoop, HasNUW, HasNSW); 2122 } 2123 // Reset Operands to its original state. 2124 Operands[0] = NestedAR; 2125 } 2126 } 2127 2128 // Okay, it looks like we really DO need an addrec expr. Check to see if we 2129 // already have one, otherwise create a new one. 2130 FoldingSetNodeID ID; 2131 ID.AddInteger(scAddRecExpr); 2132 for (unsigned i = 0, e = Operands.size(); i != e; ++i) 2133 ID.AddPointer(Operands[i]); 2134 ID.AddPointer(L); 2135 void *IP = 0; 2136 SCEVAddRecExpr *S = 2137 static_cast<SCEVAddRecExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); 2138 if (!S) { 2139 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Operands.size()); 2140 std::uninitialized_copy(Operands.begin(), Operands.end(), O); 2141 S = new (SCEVAllocator) SCEVAddRecExpr(ID.Intern(SCEVAllocator), 2142 O, Operands.size(), L); 2143 UniqueSCEVs.InsertNode(S, IP); 2144 } 2145 if (HasNUW) S->setHasNoUnsignedWrap(true); 2146 if (HasNSW) S->setHasNoSignedWrap(true); 2147 return S; 2148 } 2149 2150 const SCEV *ScalarEvolution::getSMaxExpr(const SCEV *LHS, 2151 const SCEV *RHS) { 2152 SmallVector<const SCEV *, 2> Ops; 2153 Ops.push_back(LHS); 2154 Ops.push_back(RHS); 2155 return getSMaxExpr(Ops); 2156 } 2157 2158 const SCEV * 2159 ScalarEvolution::getSMaxExpr(SmallVectorImpl<const SCEV *> &Ops) { 2160 assert(!Ops.empty() && "Cannot get empty smax!"); 2161 if (Ops.size() == 1) return Ops[0]; 2162 #ifndef NDEBUG 2163 const Type *ETy = getEffectiveSCEVType(Ops[0]->getType()); 2164 for (unsigned i = 1, e = Ops.size(); i != e; ++i) 2165 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy && 2166 "SCEVSMaxExpr operand types don't match!"); 2167 #endif 2168 2169 // Sort by complexity, this groups all similar expression types together. 2170 GroupByComplexity(Ops, LI); 2171 2172 // If there are any constants, fold them together. 2173 unsigned Idx = 0; 2174 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { 2175 ++Idx; 2176 assert(Idx < Ops.size()); 2177 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { 2178 // We found two constants, fold them together! 2179 ConstantInt *Fold = ConstantInt::get(getContext(), 2180 APIntOps::smax(LHSC->getValue()->getValue(), 2181 RHSC->getValue()->getValue())); 2182 Ops[0] = getConstant(Fold); 2183 Ops.erase(Ops.begin()+1); // Erase the folded element 2184 if (Ops.size() == 1) return Ops[0]; 2185 LHSC = cast<SCEVConstant>(Ops[0]); 2186 } 2187 2188 // If we are left with a constant minimum-int, strip it off. 2189 if (cast<SCEVConstant>(Ops[0])->getValue()->isMinValue(true)) { 2190 Ops.erase(Ops.begin()); 2191 --Idx; 2192 } else if (cast<SCEVConstant>(Ops[0])->getValue()->isMaxValue(true)) { 2193 // If we have an smax with a constant maximum-int, it will always be 2194 // maximum-int. 2195 return Ops[0]; 2196 } 2197 2198 if (Ops.size() == 1) return Ops[0]; 2199 } 2200 2201 // Find the first SMax 2202 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scSMaxExpr) 2203 ++Idx; 2204 2205 // Check to see if one of the operands is an SMax. If so, expand its operands 2206 // onto our operand list, and recurse to simplify. 2207 if (Idx < Ops.size()) { 2208 bool DeletedSMax = false; 2209 while (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(Ops[Idx])) { 2210 Ops.erase(Ops.begin()+Idx); 2211 Ops.append(SMax->op_begin(), SMax->op_end()); 2212 DeletedSMax = true; 2213 } 2214 2215 if (DeletedSMax) 2216 return getSMaxExpr(Ops); 2217 } 2218 2219 // Okay, check to see if the same value occurs in the operand list twice. If 2220 // so, delete one. Since we sorted the list, these values are required to 2221 // be adjacent. 2222 for (unsigned i = 0, e = Ops.size()-1; i != e; ++i) 2223 // X smax Y smax Y --> X smax Y 2224 // X smax Y --> X, if X is always greater than Y 2225 if (Ops[i] == Ops[i+1] || 2226 isKnownPredicate(ICmpInst::ICMP_SGE, Ops[i], Ops[i+1])) { 2227 Ops.erase(Ops.begin()+i+1, Ops.begin()+i+2); 2228 --i; --e; 2229 } else if (isKnownPredicate(ICmpInst::ICMP_SLE, Ops[i], Ops[i+1])) { 2230 Ops.erase(Ops.begin()+i, Ops.begin()+i+1); 2231 --i; --e; 2232 } 2233 2234 if (Ops.size() == 1) return Ops[0]; 2235 2236 assert(!Ops.empty() && "Reduced smax down to nothing!"); 2237 2238 // Okay, it looks like we really DO need an smax expr. Check to see if we 2239 // already have one, otherwise create a new one. 2240 FoldingSetNodeID ID; 2241 ID.AddInteger(scSMaxExpr); 2242 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 2243 ID.AddPointer(Ops[i]); 2244 void *IP = 0; 2245 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 2246 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); 2247 std::uninitialized_copy(Ops.begin(), Ops.end(), O); 2248 SCEV *S = new (SCEVAllocator) SCEVSMaxExpr(ID.Intern(SCEVAllocator), 2249 O, Ops.size()); 2250 UniqueSCEVs.InsertNode(S, IP); 2251 return S; 2252 } 2253 2254 const SCEV *ScalarEvolution::getUMaxExpr(const SCEV *LHS, 2255 const SCEV *RHS) { 2256 SmallVector<const SCEV *, 2> Ops; 2257 Ops.push_back(LHS); 2258 Ops.push_back(RHS); 2259 return getUMaxExpr(Ops); 2260 } 2261 2262 const SCEV * 2263 ScalarEvolution::getUMaxExpr(SmallVectorImpl<const SCEV *> &Ops) { 2264 assert(!Ops.empty() && "Cannot get empty umax!"); 2265 if (Ops.size() == 1) return Ops[0]; 2266 #ifndef NDEBUG 2267 const Type *ETy = getEffectiveSCEVType(Ops[0]->getType()); 2268 for (unsigned i = 1, e = Ops.size(); i != e; ++i) 2269 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy && 2270 "SCEVUMaxExpr operand types don't match!"); 2271 #endif 2272 2273 // Sort by complexity, this groups all similar expression types together. 2274 GroupByComplexity(Ops, LI); 2275 2276 // If there are any constants, fold them together. 2277 unsigned Idx = 0; 2278 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { 2279 ++Idx; 2280 assert(Idx < Ops.size()); 2281 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { 2282 // We found two constants, fold them together! 2283 ConstantInt *Fold = ConstantInt::get(getContext(), 2284 APIntOps::umax(LHSC->getValue()->getValue(), 2285 RHSC->getValue()->getValue())); 2286 Ops[0] = getConstant(Fold); 2287 Ops.erase(Ops.begin()+1); // Erase the folded element 2288 if (Ops.size() == 1) return Ops[0]; 2289 LHSC = cast<SCEVConstant>(Ops[0]); 2290 } 2291 2292 // If we are left with a constant minimum-int, strip it off. 2293 if (cast<SCEVConstant>(Ops[0])->getValue()->isMinValue(false)) { 2294 Ops.erase(Ops.begin()); 2295 --Idx; 2296 } else if (cast<SCEVConstant>(Ops[0])->getValue()->isMaxValue(false)) { 2297 // If we have an umax with a constant maximum-int, it will always be 2298 // maximum-int. 2299 return Ops[0]; 2300 } 2301 2302 if (Ops.size() == 1) return Ops[0]; 2303 } 2304 2305 // Find the first UMax 2306 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scUMaxExpr) 2307 ++Idx; 2308 2309 // Check to see if one of the operands is a UMax. If so, expand its operands 2310 // onto our operand list, and recurse to simplify. 2311 if (Idx < Ops.size()) { 2312 bool DeletedUMax = false; 2313 while (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(Ops[Idx])) { 2314 Ops.erase(Ops.begin()+Idx); 2315 Ops.append(UMax->op_begin(), UMax->op_end()); 2316 DeletedUMax = true; 2317 } 2318 2319 if (DeletedUMax) 2320 return getUMaxExpr(Ops); 2321 } 2322 2323 // Okay, check to see if the same value occurs in the operand list twice. If 2324 // so, delete one. Since we sorted the list, these values are required to 2325 // be adjacent. 2326 for (unsigned i = 0, e = Ops.size()-1; i != e; ++i) 2327 // X umax Y umax Y --> X umax Y 2328 // X umax Y --> X, if X is always greater than Y 2329 if (Ops[i] == Ops[i+1] || 2330 isKnownPredicate(ICmpInst::ICMP_UGE, Ops[i], Ops[i+1])) { 2331 Ops.erase(Ops.begin()+i+1, Ops.begin()+i+2); 2332 --i; --e; 2333 } else if (isKnownPredicate(ICmpInst::ICMP_ULE, Ops[i], Ops[i+1])) { 2334 Ops.erase(Ops.begin()+i, Ops.begin()+i+1); 2335 --i; --e; 2336 } 2337 2338 if (Ops.size() == 1) return Ops[0]; 2339 2340 assert(!Ops.empty() && "Reduced umax down to nothing!"); 2341 2342 // Okay, it looks like we really DO need a umax expr. Check to see if we 2343 // already have one, otherwise create a new one. 2344 FoldingSetNodeID ID; 2345 ID.AddInteger(scUMaxExpr); 2346 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 2347 ID.AddPointer(Ops[i]); 2348 void *IP = 0; 2349 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 2350 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size()); 2351 std::uninitialized_copy(Ops.begin(), Ops.end(), O); 2352 SCEV *S = new (SCEVAllocator) SCEVUMaxExpr(ID.Intern(SCEVAllocator), 2353 O, Ops.size()); 2354 UniqueSCEVs.InsertNode(S, IP); 2355 return S; 2356 } 2357 2358 const SCEV *ScalarEvolution::getSMinExpr(const SCEV *LHS, 2359 const SCEV *RHS) { 2360 // ~smax(~x, ~y) == smin(x, y). 2361 return getNotSCEV(getSMaxExpr(getNotSCEV(LHS), getNotSCEV(RHS))); 2362 } 2363 2364 const SCEV *ScalarEvolution::getUMinExpr(const SCEV *LHS, 2365 const SCEV *RHS) { 2366 // ~umax(~x, ~y) == umin(x, y) 2367 return getNotSCEV(getUMaxExpr(getNotSCEV(LHS), getNotSCEV(RHS))); 2368 } 2369 2370 const SCEV *ScalarEvolution::getSizeOfExpr(const Type *AllocTy) { 2371 // If we have TargetData, we can bypass creating a target-independent 2372 // constant expression and then folding it back into a ConstantInt. 2373 // This is just a compile-time optimization. 2374 if (TD) 2375 return getConstant(TD->getIntPtrType(getContext()), 2376 TD->getTypeAllocSize(AllocTy)); 2377 2378 Constant *C = ConstantExpr::getSizeOf(AllocTy); 2379 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) 2380 if (Constant *Folded = ConstantFoldConstantExpression(CE, TD)) 2381 C = Folded; 2382 const Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(AllocTy)); 2383 return getTruncateOrZeroExtend(getSCEV(C), Ty); 2384 } 2385 2386 const SCEV *ScalarEvolution::getAlignOfExpr(const Type *AllocTy) { 2387 Constant *C = ConstantExpr::getAlignOf(AllocTy); 2388 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) 2389 if (Constant *Folded = ConstantFoldConstantExpression(CE, TD)) 2390 C = Folded; 2391 const Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(AllocTy)); 2392 return getTruncateOrZeroExtend(getSCEV(C), Ty); 2393 } 2394 2395 const SCEV *ScalarEvolution::getOffsetOfExpr(const StructType *STy, 2396 unsigned FieldNo) { 2397 // If we have TargetData, we can bypass creating a target-independent 2398 // constant expression and then folding it back into a ConstantInt. 2399 // This is just a compile-time optimization. 2400 if (TD) 2401 return getConstant(TD->getIntPtrType(getContext()), 2402 TD->getStructLayout(STy)->getElementOffset(FieldNo)); 2403 2404 Constant *C = ConstantExpr::getOffsetOf(STy, FieldNo); 2405 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) 2406 if (Constant *Folded = ConstantFoldConstantExpression(CE, TD)) 2407 C = Folded; 2408 const Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(STy)); 2409 return getTruncateOrZeroExtend(getSCEV(C), Ty); 2410 } 2411 2412 const SCEV *ScalarEvolution::getOffsetOfExpr(const Type *CTy, 2413 Constant *FieldNo) { 2414 Constant *C = ConstantExpr::getOffsetOf(CTy, FieldNo); 2415 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) 2416 if (Constant *Folded = ConstantFoldConstantExpression(CE, TD)) 2417 C = Folded; 2418 const Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(CTy)); 2419 return getTruncateOrZeroExtend(getSCEV(C), Ty); 2420 } 2421 2422 const SCEV *ScalarEvolution::getUnknown(Value *V) { 2423 // Don't attempt to do anything other than create a SCEVUnknown object 2424 // here. createSCEV only calls getUnknown after checking for all other 2425 // interesting possibilities, and any other code that calls getUnknown 2426 // is doing so in order to hide a value from SCEV canonicalization. 2427 2428 FoldingSetNodeID ID; 2429 ID.AddInteger(scUnknown); 2430 ID.AddPointer(V); 2431 void *IP = 0; 2432 if (SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) { 2433 assert(cast<SCEVUnknown>(S)->getValue() == V && 2434 "Stale SCEVUnknown in uniquing map!"); 2435 return S; 2436 } 2437 SCEV *S = new (SCEVAllocator) SCEVUnknown(ID.Intern(SCEVAllocator), V, this, 2438 FirstUnknown); 2439 FirstUnknown = cast<SCEVUnknown>(S); 2440 UniqueSCEVs.InsertNode(S, IP); 2441 return S; 2442 } 2443 2444 //===----------------------------------------------------------------------===// 2445 // Basic SCEV Analysis and PHI Idiom Recognition Code 2446 // 2447 2448 /// isSCEVable - Test if values of the given type are analyzable within 2449 /// the SCEV framework. This primarily includes integer types, and it 2450 /// can optionally include pointer types if the ScalarEvolution class 2451 /// has access to target-specific information. 2452 bool ScalarEvolution::isSCEVable(const Type *Ty) const { 2453 // Integers and pointers are always SCEVable. 2454 return Ty->isIntegerTy() || Ty->isPointerTy(); 2455 } 2456 2457 /// getTypeSizeInBits - Return the size in bits of the specified type, 2458 /// for which isSCEVable must return true. 2459 uint64_t ScalarEvolution::getTypeSizeInBits(const Type *Ty) const { 2460 assert(isSCEVable(Ty) && "Type is not SCEVable!"); 2461 2462 // If we have a TargetData, use it! 2463 if (TD) 2464 return TD->getTypeSizeInBits(Ty); 2465 2466 // Integer types have fixed sizes. 2467 if (Ty->isIntegerTy()) 2468 return Ty->getPrimitiveSizeInBits(); 2469 2470 // The only other support type is pointer. Without TargetData, conservatively 2471 // assume pointers are 64-bit. 2472 assert(Ty->isPointerTy() && "isSCEVable permitted a non-SCEVable type!"); 2473 return 64; 2474 } 2475 2476 /// getEffectiveSCEVType - Return a type with the same bitwidth as 2477 /// the given type and which represents how SCEV will treat the given 2478 /// type, for which isSCEVable must return true. For pointer types, 2479 /// this is the pointer-sized integer type. 2480 const Type *ScalarEvolution::getEffectiveSCEVType(const Type *Ty) const { 2481 assert(isSCEVable(Ty) && "Type is not SCEVable!"); 2482 2483 if (Ty->isIntegerTy()) 2484 return Ty; 2485 2486 // The only other support type is pointer. 2487 assert(Ty->isPointerTy() && "Unexpected non-pointer non-integer type!"); 2488 if (TD) return TD->getIntPtrType(getContext()); 2489 2490 // Without TargetData, conservatively assume pointers are 64-bit. 2491 return Type::getInt64Ty(getContext()); 2492 } 2493 2494 const SCEV *ScalarEvolution::getCouldNotCompute() { 2495 return &CouldNotCompute; 2496 } 2497 2498 /// getSCEV - Return an existing SCEV if it exists, otherwise analyze the 2499 /// expression and create a new one. 2500 const SCEV *ScalarEvolution::getSCEV(Value *V) { 2501 assert(isSCEVable(V->getType()) && "Value is not SCEVable!"); 2502 2503 ValueExprMapType::const_iterator I = ValueExprMap.find(V); 2504 if (I != ValueExprMap.end()) return I->second; 2505 const SCEV *S = createSCEV(V); 2506 2507 // The process of creating a SCEV for V may have caused other SCEVs 2508 // to have been created, so it's necessary to insert the new entry 2509 // from scratch, rather than trying to remember the insert position 2510 // above. 2511 ValueExprMap.insert(std::make_pair(SCEVCallbackVH(V, this), S)); 2512 return S; 2513 } 2514 2515 /// getNegativeSCEV - Return a SCEV corresponding to -V = -1*V 2516 /// 2517 const SCEV *ScalarEvolution::getNegativeSCEV(const SCEV *V) { 2518 if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V)) 2519 return getConstant( 2520 cast<ConstantInt>(ConstantExpr::getNeg(VC->getValue()))); 2521 2522 const Type *Ty = V->getType(); 2523 Ty = getEffectiveSCEVType(Ty); 2524 return getMulExpr(V, 2525 getConstant(cast<ConstantInt>(Constant::getAllOnesValue(Ty)))); 2526 } 2527 2528 /// getNotSCEV - Return a SCEV corresponding to ~V = -1-V 2529 const SCEV *ScalarEvolution::getNotSCEV(const SCEV *V) { 2530 if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V)) 2531 return getConstant( 2532 cast<ConstantInt>(ConstantExpr::getNot(VC->getValue()))); 2533 2534 const Type *Ty = V->getType(); 2535 Ty = getEffectiveSCEVType(Ty); 2536 const SCEV *AllOnes = 2537 getConstant(cast<ConstantInt>(Constant::getAllOnesValue(Ty))); 2538 return getMinusSCEV(AllOnes, V); 2539 } 2540 2541 /// getMinusSCEV - Return a SCEV corresponding to LHS - RHS. 2542 /// 2543 const SCEV *ScalarEvolution::getMinusSCEV(const SCEV *LHS, 2544 const SCEV *RHS) { 2545 // Fast path: X - X --> 0. 2546 if (LHS == RHS) 2547 return getConstant(LHS->getType(), 0); 2548 2549 // X - Y --> X + -Y 2550 return getAddExpr(LHS, getNegativeSCEV(RHS)); 2551 } 2552 2553 /// getTruncateOrZeroExtend - Return a SCEV corresponding to a conversion of the 2554 /// input value to the specified type. If the type must be extended, it is zero 2555 /// extended. 2556 const SCEV * 2557 ScalarEvolution::getTruncateOrZeroExtend(const SCEV *V, 2558 const Type *Ty) { 2559 const Type *SrcTy = V->getType(); 2560 assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) && 2561 (Ty->isIntegerTy() || Ty->isPointerTy()) && 2562 "Cannot truncate or zero extend with non-integer arguments!"); 2563 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 2564 return V; // No conversion 2565 if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty)) 2566 return getTruncateExpr(V, Ty); 2567 return getZeroExtendExpr(V, Ty); 2568 } 2569 2570 /// getTruncateOrSignExtend - Return a SCEV corresponding to a conversion of the 2571 /// input value to the specified type. If the type must be extended, it is sign 2572 /// extended. 2573 const SCEV * 2574 ScalarEvolution::getTruncateOrSignExtend(const SCEV *V, 2575 const Type *Ty) { 2576 const Type *SrcTy = V->getType(); 2577 assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) && 2578 (Ty->isIntegerTy() || Ty->isPointerTy()) && 2579 "Cannot truncate or zero extend with non-integer arguments!"); 2580 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 2581 return V; // No conversion 2582 if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty)) 2583 return getTruncateExpr(V, Ty); 2584 return getSignExtendExpr(V, Ty); 2585 } 2586 2587 /// getNoopOrZeroExtend - Return a SCEV corresponding to a conversion of the 2588 /// input value to the specified type. If the type must be extended, it is zero 2589 /// extended. The conversion must not be narrowing. 2590 const SCEV * 2591 ScalarEvolution::getNoopOrZeroExtend(const SCEV *V, const Type *Ty) { 2592 const Type *SrcTy = V->getType(); 2593 assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) && 2594 (Ty->isIntegerTy() || Ty->isPointerTy()) && 2595 "Cannot noop or zero extend with non-integer arguments!"); 2596 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) && 2597 "getNoopOrZeroExtend cannot truncate!"); 2598 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 2599 return V; // No conversion 2600 return getZeroExtendExpr(V, Ty); 2601 } 2602 2603 /// getNoopOrSignExtend - Return a SCEV corresponding to a conversion of the 2604 /// input value to the specified type. If the type must be extended, it is sign 2605 /// extended. The conversion must not be narrowing. 2606 const SCEV * 2607 ScalarEvolution::getNoopOrSignExtend(const SCEV *V, const Type *Ty) { 2608 const Type *SrcTy = V->getType(); 2609 assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) && 2610 (Ty->isIntegerTy() || Ty->isPointerTy()) && 2611 "Cannot noop or sign extend with non-integer arguments!"); 2612 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) && 2613 "getNoopOrSignExtend cannot truncate!"); 2614 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 2615 return V; // No conversion 2616 return getSignExtendExpr(V, Ty); 2617 } 2618 2619 /// getNoopOrAnyExtend - Return a SCEV corresponding to a conversion of 2620 /// the input value to the specified type. If the type must be extended, 2621 /// it is extended with unspecified bits. The conversion must not be 2622 /// narrowing. 2623 const SCEV * 2624 ScalarEvolution::getNoopOrAnyExtend(const SCEV *V, const Type *Ty) { 2625 const Type *SrcTy = V->getType(); 2626 assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) && 2627 (Ty->isIntegerTy() || Ty->isPointerTy()) && 2628 "Cannot noop or any extend with non-integer arguments!"); 2629 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) && 2630 "getNoopOrAnyExtend cannot truncate!"); 2631 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 2632 return V; // No conversion 2633 return getAnyExtendExpr(V, Ty); 2634 } 2635 2636 /// getTruncateOrNoop - Return a SCEV corresponding to a conversion of the 2637 /// input value to the specified type. The conversion must not be widening. 2638 const SCEV * 2639 ScalarEvolution::getTruncateOrNoop(const SCEV *V, const Type *Ty) { 2640 const Type *SrcTy = V->getType(); 2641 assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) && 2642 (Ty->isIntegerTy() || Ty->isPointerTy()) && 2643 "Cannot truncate or noop with non-integer arguments!"); 2644 assert(getTypeSizeInBits(SrcTy) >= getTypeSizeInBits(Ty) && 2645 "getTruncateOrNoop cannot extend!"); 2646 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 2647 return V; // No conversion 2648 return getTruncateExpr(V, Ty); 2649 } 2650 2651 /// getUMaxFromMismatchedTypes - Promote the operands to the wider of 2652 /// the types using zero-extension, and then perform a umax operation 2653 /// with them. 2654 const SCEV *ScalarEvolution::getUMaxFromMismatchedTypes(const SCEV *LHS, 2655 const SCEV *RHS) { 2656 const SCEV *PromotedLHS = LHS; 2657 const SCEV *PromotedRHS = RHS; 2658 2659 if (getTypeSizeInBits(LHS->getType()) > getTypeSizeInBits(RHS->getType())) 2660 PromotedRHS = getZeroExtendExpr(RHS, LHS->getType()); 2661 else 2662 PromotedLHS = getNoopOrZeroExtend(LHS, RHS->getType()); 2663 2664 return getUMaxExpr(PromotedLHS, PromotedRHS); 2665 } 2666 2667 /// getUMinFromMismatchedTypes - Promote the operands to the wider of 2668 /// the types using zero-extension, and then perform a umin operation 2669 /// with them. 2670 const SCEV *ScalarEvolution::getUMinFromMismatchedTypes(const SCEV *LHS, 2671 const SCEV *RHS) { 2672 const SCEV *PromotedLHS = LHS; 2673 const SCEV *PromotedRHS = RHS; 2674 2675 if (getTypeSizeInBits(LHS->getType()) > getTypeSizeInBits(RHS->getType())) 2676 PromotedRHS = getZeroExtendExpr(RHS, LHS->getType()); 2677 else 2678 PromotedLHS = getNoopOrZeroExtend(LHS, RHS->getType()); 2679 2680 return getUMinExpr(PromotedLHS, PromotedRHS); 2681 } 2682 2683 /// PushDefUseChildren - Push users of the given Instruction 2684 /// onto the given Worklist. 2685 static void 2686 PushDefUseChildren(Instruction *I, 2687 SmallVectorImpl<Instruction *> &Worklist) { 2688 // Push the def-use children onto the Worklist stack. 2689 for (Value::use_iterator UI = I->use_begin(), UE = I->use_end(); 2690 UI != UE; ++UI) 2691 Worklist.push_back(cast<Instruction>(*UI)); 2692 } 2693 2694 /// ForgetSymbolicValue - This looks up computed SCEV values for all 2695 /// instructions that depend on the given instruction and removes them from 2696 /// the ValueExprMapType map if they reference SymName. This is used during PHI 2697 /// resolution. 2698 void 2699 ScalarEvolution::ForgetSymbolicName(Instruction *PN, const SCEV *SymName) { 2700 SmallVector<Instruction *, 16> Worklist; 2701 PushDefUseChildren(PN, Worklist); 2702 2703 SmallPtrSet<Instruction *, 8> Visited; 2704 Visited.insert(PN); 2705 while (!Worklist.empty()) { 2706 Instruction *I = Worklist.pop_back_val(); 2707 if (!Visited.insert(I)) continue; 2708 2709 ValueExprMapType::iterator It = 2710 ValueExprMap.find(static_cast<Value *>(I)); 2711 if (It != ValueExprMap.end()) { 2712 // Short-circuit the def-use traversal if the symbolic name 2713 // ceases to appear in expressions. 2714 if (It->second != SymName && !It->second->hasOperand(SymName)) 2715 continue; 2716 2717 // SCEVUnknown for a PHI either means that it has an unrecognized 2718 // structure, it's a PHI that's in the progress of being computed 2719 // by createNodeForPHI, or it's a single-value PHI. In the first case, 2720 // additional loop trip count information isn't going to change anything. 2721 // In the second case, createNodeForPHI will perform the necessary 2722 // updates on its own when it gets to that point. In the third, we do 2723 // want to forget the SCEVUnknown. 2724 if (!isa<PHINode>(I) || 2725 !isa<SCEVUnknown>(It->second) || 2726 (I != PN && It->second == SymName)) { 2727 ValuesAtScopes.erase(It->second); 2728 ValueExprMap.erase(It); 2729 } 2730 } 2731 2732 PushDefUseChildren(I, Worklist); 2733 } 2734 } 2735 2736 /// createNodeForPHI - PHI nodes have two cases. Either the PHI node exists in 2737 /// a loop header, making it a potential recurrence, or it doesn't. 2738 /// 2739 const SCEV *ScalarEvolution::createNodeForPHI(PHINode *PN) { 2740 if (const Loop *L = LI->getLoopFor(PN->getParent())) 2741 if (L->getHeader() == PN->getParent()) { 2742 // The loop may have multiple entrances or multiple exits; we can analyze 2743 // this phi as an addrec if it has a unique entry value and a unique 2744 // backedge value. 2745 Value *BEValueV = 0, *StartValueV = 0; 2746 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 2747 Value *V = PN->getIncomingValue(i); 2748 if (L->contains(PN->getIncomingBlock(i))) { 2749 if (!BEValueV) { 2750 BEValueV = V; 2751 } else if (BEValueV != V) { 2752 BEValueV = 0; 2753 break; 2754 } 2755 } else if (!StartValueV) { 2756 StartValueV = V; 2757 } else if (StartValueV != V) { 2758 StartValueV = 0; 2759 break; 2760 } 2761 } 2762 if (BEValueV && StartValueV) { 2763 // While we are analyzing this PHI node, handle its value symbolically. 2764 const SCEV *SymbolicName = getUnknown(PN); 2765 assert(ValueExprMap.find(PN) == ValueExprMap.end() && 2766 "PHI node already processed?"); 2767 ValueExprMap.insert(std::make_pair(SCEVCallbackVH(PN, this), SymbolicName)); 2768 2769 // Using this symbolic name for the PHI, analyze the value coming around 2770 // the back-edge. 2771 const SCEV *BEValue = getSCEV(BEValueV); 2772 2773 // NOTE: If BEValue is loop invariant, we know that the PHI node just 2774 // has a special value for the first iteration of the loop. 2775 2776 // If the value coming around the backedge is an add with the symbolic 2777 // value we just inserted, then we found a simple induction variable! 2778 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(BEValue)) { 2779 // If there is a single occurrence of the symbolic value, replace it 2780 // with a recurrence. 2781 unsigned FoundIndex = Add->getNumOperands(); 2782 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i) 2783 if (Add->getOperand(i) == SymbolicName) 2784 if (FoundIndex == e) { 2785 FoundIndex = i; 2786 break; 2787 } 2788 2789 if (FoundIndex != Add->getNumOperands()) { 2790 // Create an add with everything but the specified operand. 2791 SmallVector<const SCEV *, 8> Ops; 2792 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i) 2793 if (i != FoundIndex) 2794 Ops.push_back(Add->getOperand(i)); 2795 const SCEV *Accum = getAddExpr(Ops); 2796 2797 // This is not a valid addrec if the step amount is varying each 2798 // loop iteration, but is not itself an addrec in this loop. 2799 if (Accum->isLoopInvariant(L) || 2800 (isa<SCEVAddRecExpr>(Accum) && 2801 cast<SCEVAddRecExpr>(Accum)->getLoop() == L)) { 2802 bool HasNUW = false; 2803 bool HasNSW = false; 2804 2805 // If the increment doesn't overflow, then neither the addrec nor 2806 // the post-increment will overflow. 2807 if (const AddOperator *OBO = dyn_cast<AddOperator>(BEValueV)) { 2808 if (OBO->hasNoUnsignedWrap()) 2809 HasNUW = true; 2810 if (OBO->hasNoSignedWrap()) 2811 HasNSW = true; 2812 } 2813 2814 const SCEV *StartVal = getSCEV(StartValueV); 2815 const SCEV *PHISCEV = 2816 getAddRecExpr(StartVal, Accum, L, HasNUW, HasNSW); 2817 2818 // Since the no-wrap flags are on the increment, they apply to the 2819 // post-incremented value as well. 2820 if (Accum->isLoopInvariant(L)) 2821 (void)getAddRecExpr(getAddExpr(StartVal, Accum), 2822 Accum, L, HasNUW, HasNSW); 2823 2824 // Okay, for the entire analysis of this edge we assumed the PHI 2825 // to be symbolic. We now need to go back and purge all of the 2826 // entries for the scalars that use the symbolic expression. 2827 ForgetSymbolicName(PN, SymbolicName); 2828 ValueExprMap[SCEVCallbackVH(PN, this)] = PHISCEV; 2829 return PHISCEV; 2830 } 2831 } 2832 } else if (const SCEVAddRecExpr *AddRec = 2833 dyn_cast<SCEVAddRecExpr>(BEValue)) { 2834 // Otherwise, this could be a loop like this: 2835 // i = 0; for (j = 1; ..; ++j) { .... i = j; } 2836 // In this case, j = {1,+,1} and BEValue is j. 2837 // Because the other in-value of i (0) fits the evolution of BEValue 2838 // i really is an addrec evolution. 2839 if (AddRec->getLoop() == L && AddRec->isAffine()) { 2840 const SCEV *StartVal = getSCEV(StartValueV); 2841 2842 // If StartVal = j.start - j.stride, we can use StartVal as the 2843 // initial step of the addrec evolution. 2844 if (StartVal == getMinusSCEV(AddRec->getOperand(0), 2845 AddRec->getOperand(1))) { 2846 const SCEV *PHISCEV = 2847 getAddRecExpr(StartVal, AddRec->getOperand(1), L); 2848 2849 // Okay, for the entire analysis of this edge we assumed the PHI 2850 // to be symbolic. We now need to go back and purge all of the 2851 // entries for the scalars that use the symbolic expression. 2852 ForgetSymbolicName(PN, SymbolicName); 2853 ValueExprMap[SCEVCallbackVH(PN, this)] = PHISCEV; 2854 return PHISCEV; 2855 } 2856 } 2857 } 2858 } 2859 } 2860 2861 // If the PHI has a single incoming value, follow that value, unless the 2862 // PHI's incoming blocks are in a different loop, in which case doing so 2863 // risks breaking LCSSA form. Instcombine would normally zap these, but 2864 // it doesn't have DominatorTree information, so it may miss cases. 2865 if (Value *V = PN->hasConstantValue(DT)) { 2866 bool AllSameLoop = true; 2867 Loop *PNLoop = LI->getLoopFor(PN->getParent()); 2868 for (size_t i = 0, e = PN->getNumIncomingValues(); i != e; ++i) 2869 if (LI->getLoopFor(PN->getIncomingBlock(i)) != PNLoop) { 2870 AllSameLoop = false; 2871 break; 2872 } 2873 if (AllSameLoop) 2874 return getSCEV(V); 2875 } 2876 2877 // If it's not a loop phi, we can't handle it yet. 2878 return getUnknown(PN); 2879 } 2880 2881 /// createNodeForGEP - Expand GEP instructions into add and multiply 2882 /// operations. This allows them to be analyzed by regular SCEV code. 2883 /// 2884 const SCEV *ScalarEvolution::createNodeForGEP(GEPOperator *GEP) { 2885 2886 // Don't blindly transfer the inbounds flag from the GEP instruction to the 2887 // Add expression, because the Instruction may be guarded by control flow 2888 // and the no-overflow bits may not be valid for the expression in any 2889 // context. 2890 2891 const Type *IntPtrTy = getEffectiveSCEVType(GEP->getType()); 2892 Value *Base = GEP->getOperand(0); 2893 // Don't attempt to analyze GEPs over unsized objects. 2894 if (!cast<PointerType>(Base->getType())->getElementType()->isSized()) 2895 return getUnknown(GEP); 2896 const SCEV *TotalOffset = getConstant(IntPtrTy, 0); 2897 gep_type_iterator GTI = gep_type_begin(GEP); 2898 for (GetElementPtrInst::op_iterator I = llvm::next(GEP->op_begin()), 2899 E = GEP->op_end(); 2900 I != E; ++I) { 2901 Value *Index = *I; 2902 // Compute the (potentially symbolic) offset in bytes for this index. 2903 if (const StructType *STy = dyn_cast<StructType>(*GTI++)) { 2904 // For a struct, add the member offset. 2905 unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue(); 2906 const SCEV *FieldOffset = getOffsetOfExpr(STy, FieldNo); 2907 2908 // Add the field offset to the running total offset. 2909 TotalOffset = getAddExpr(TotalOffset, FieldOffset); 2910 } else { 2911 // For an array, add the element offset, explicitly scaled. 2912 const SCEV *ElementSize = getSizeOfExpr(*GTI); 2913 const SCEV *IndexS = getSCEV(Index); 2914 // Getelementptr indices are signed. 2915 IndexS = getTruncateOrSignExtend(IndexS, IntPtrTy); 2916 2917 // Multiply the index by the element size to compute the element offset. 2918 const SCEV *LocalOffset = getMulExpr(IndexS, ElementSize); 2919 2920 // Add the element offset to the running total offset. 2921 TotalOffset = getAddExpr(TotalOffset, LocalOffset); 2922 } 2923 } 2924 2925 // Get the SCEV for the GEP base. 2926 const SCEV *BaseS = getSCEV(Base); 2927 2928 // Add the total offset from all the GEP indices to the base. 2929 return getAddExpr(BaseS, TotalOffset); 2930 } 2931 2932 /// GetMinTrailingZeros - Determine the minimum number of zero bits that S is 2933 /// guaranteed to end in (at every loop iteration). It is, at the same time, 2934 /// the minimum number of times S is divisible by 2. For example, given {4,+,8} 2935 /// it returns 2. If S is guaranteed to be 0, it returns the bitwidth of S. 2936 uint32_t 2937 ScalarEvolution::GetMinTrailingZeros(const SCEV *S) { 2938 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) 2939 return C->getValue()->getValue().countTrailingZeros(); 2940 2941 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(S)) 2942 return std::min(GetMinTrailingZeros(T->getOperand()), 2943 (uint32_t)getTypeSizeInBits(T->getType())); 2944 2945 if (const SCEVZeroExtendExpr *E = dyn_cast<SCEVZeroExtendExpr>(S)) { 2946 uint32_t OpRes = GetMinTrailingZeros(E->getOperand()); 2947 return OpRes == getTypeSizeInBits(E->getOperand()->getType()) ? 2948 getTypeSizeInBits(E->getType()) : OpRes; 2949 } 2950 2951 if (const SCEVSignExtendExpr *E = dyn_cast<SCEVSignExtendExpr>(S)) { 2952 uint32_t OpRes = GetMinTrailingZeros(E->getOperand()); 2953 return OpRes == getTypeSizeInBits(E->getOperand()->getType()) ? 2954 getTypeSizeInBits(E->getType()) : OpRes; 2955 } 2956 2957 if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(S)) { 2958 // The result is the min of all operands results. 2959 uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0)); 2960 for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i) 2961 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i))); 2962 return MinOpRes; 2963 } 2964 2965 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(S)) { 2966 // The result is the sum of all operands results. 2967 uint32_t SumOpRes = GetMinTrailingZeros(M->getOperand(0)); 2968 uint32_t BitWidth = getTypeSizeInBits(M->getType()); 2969 for (unsigned i = 1, e = M->getNumOperands(); 2970 SumOpRes != BitWidth && i != e; ++i) 2971 SumOpRes = std::min(SumOpRes + GetMinTrailingZeros(M->getOperand(i)), 2972 BitWidth); 2973 return SumOpRes; 2974 } 2975 2976 if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(S)) { 2977 // The result is the min of all operands results. 2978 uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0)); 2979 for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i) 2980 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i))); 2981 return MinOpRes; 2982 } 2983 2984 if (const SCEVSMaxExpr *M = dyn_cast<SCEVSMaxExpr>(S)) { 2985 // The result is the min of all operands results. 2986 uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0)); 2987 for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i) 2988 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i))); 2989 return MinOpRes; 2990 } 2991 2992 if (const SCEVUMaxExpr *M = dyn_cast<SCEVUMaxExpr>(S)) { 2993 // The result is the min of all operands results. 2994 uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0)); 2995 for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i) 2996 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i))); 2997 return MinOpRes; 2998 } 2999 3000 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) { 3001 // For a SCEVUnknown, ask ValueTracking. 3002 unsigned BitWidth = getTypeSizeInBits(U->getType()); 3003 APInt Mask = APInt::getAllOnesValue(BitWidth); 3004 APInt Zeros(BitWidth, 0), Ones(BitWidth, 0); 3005 ComputeMaskedBits(U->getValue(), Mask, Zeros, Ones); 3006 return Zeros.countTrailingOnes(); 3007 } 3008 3009 // SCEVUDivExpr 3010 return 0; 3011 } 3012 3013 /// getUnsignedRange - Determine the unsigned range for a particular SCEV. 3014 /// 3015 ConstantRange 3016 ScalarEvolution::getUnsignedRange(const SCEV *S) { 3017 3018 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) 3019 return ConstantRange(C->getValue()->getValue()); 3020 3021 unsigned BitWidth = getTypeSizeInBits(S->getType()); 3022 ConstantRange ConservativeResult(BitWidth, /*isFullSet=*/true); 3023 3024 // If the value has known zeros, the maximum unsigned value will have those 3025 // known zeros as well. 3026 uint32_t TZ = GetMinTrailingZeros(S); 3027 if (TZ != 0) 3028 ConservativeResult = 3029 ConstantRange(APInt::getMinValue(BitWidth), 3030 APInt::getMaxValue(BitWidth).lshr(TZ).shl(TZ) + 1); 3031 3032 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) { 3033 ConstantRange X = getUnsignedRange(Add->getOperand(0)); 3034 for (unsigned i = 1, e = Add->getNumOperands(); i != e; ++i) 3035 X = X.add(getUnsignedRange(Add->getOperand(i))); 3036 return ConservativeResult.intersectWith(X); 3037 } 3038 3039 if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) { 3040 ConstantRange X = getUnsignedRange(Mul->getOperand(0)); 3041 for (unsigned i = 1, e = Mul->getNumOperands(); i != e; ++i) 3042 X = X.multiply(getUnsignedRange(Mul->getOperand(i))); 3043 return ConservativeResult.intersectWith(X); 3044 } 3045 3046 if (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(S)) { 3047 ConstantRange X = getUnsignedRange(SMax->getOperand(0)); 3048 for (unsigned i = 1, e = SMax->getNumOperands(); i != e; ++i) 3049 X = X.smax(getUnsignedRange(SMax->getOperand(i))); 3050 return ConservativeResult.intersectWith(X); 3051 } 3052 3053 if (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(S)) { 3054 ConstantRange X = getUnsignedRange(UMax->getOperand(0)); 3055 for (unsigned i = 1, e = UMax->getNumOperands(); i != e; ++i) 3056 X = X.umax(getUnsignedRange(UMax->getOperand(i))); 3057 return ConservativeResult.intersectWith(X); 3058 } 3059 3060 if (const SCEVUDivExpr *UDiv = dyn_cast<SCEVUDivExpr>(S)) { 3061 ConstantRange X = getUnsignedRange(UDiv->getLHS()); 3062 ConstantRange Y = getUnsignedRange(UDiv->getRHS()); 3063 return ConservativeResult.intersectWith(X.udiv(Y)); 3064 } 3065 3066 if (const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(S)) { 3067 ConstantRange X = getUnsignedRange(ZExt->getOperand()); 3068 return ConservativeResult.intersectWith(X.zeroExtend(BitWidth)); 3069 } 3070 3071 if (const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(S)) { 3072 ConstantRange X = getUnsignedRange(SExt->getOperand()); 3073 return ConservativeResult.intersectWith(X.signExtend(BitWidth)); 3074 } 3075 3076 if (const SCEVTruncateExpr *Trunc = dyn_cast<SCEVTruncateExpr>(S)) { 3077 ConstantRange X = getUnsignedRange(Trunc->getOperand()); 3078 return ConservativeResult.intersectWith(X.truncate(BitWidth)); 3079 } 3080 3081 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(S)) { 3082 // If there's no unsigned wrap, the value will never be less than its 3083 // initial value. 3084 if (AddRec->hasNoUnsignedWrap()) 3085 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(AddRec->getStart())) 3086 if (!C->getValue()->isZero()) 3087 ConservativeResult = 3088 ConservativeResult.intersectWith( 3089 ConstantRange(C->getValue()->getValue(), APInt(BitWidth, 0))); 3090 3091 // TODO: non-affine addrec 3092 if (AddRec->isAffine()) { 3093 const Type *Ty = AddRec->getType(); 3094 const SCEV *MaxBECount = getMaxBackedgeTakenCount(AddRec->getLoop()); 3095 if (!isa<SCEVCouldNotCompute>(MaxBECount) && 3096 getTypeSizeInBits(MaxBECount->getType()) <= BitWidth) { 3097 MaxBECount = getNoopOrZeroExtend(MaxBECount, Ty); 3098 3099 const SCEV *Start = AddRec->getStart(); 3100 const SCEV *Step = AddRec->getStepRecurrence(*this); 3101 3102 ConstantRange StartRange = getUnsignedRange(Start); 3103 ConstantRange StepRange = getSignedRange(Step); 3104 ConstantRange MaxBECountRange = getUnsignedRange(MaxBECount); 3105 ConstantRange EndRange = 3106 StartRange.add(MaxBECountRange.multiply(StepRange)); 3107 3108 // Check for overflow. This must be done with ConstantRange arithmetic 3109 // because we could be called from within the ScalarEvolution overflow 3110 // checking code. 3111 ConstantRange ExtStartRange = StartRange.zextOrTrunc(BitWidth*2+1); 3112 ConstantRange ExtStepRange = StepRange.sextOrTrunc(BitWidth*2+1); 3113 ConstantRange ExtMaxBECountRange = 3114 MaxBECountRange.zextOrTrunc(BitWidth*2+1); 3115 ConstantRange ExtEndRange = EndRange.zextOrTrunc(BitWidth*2+1); 3116 if (ExtStartRange.add(ExtMaxBECountRange.multiply(ExtStepRange)) != 3117 ExtEndRange) 3118 return ConservativeResult; 3119 3120 APInt Min = APIntOps::umin(StartRange.getUnsignedMin(), 3121 EndRange.getUnsignedMin()); 3122 APInt Max = APIntOps::umax(StartRange.getUnsignedMax(), 3123 EndRange.getUnsignedMax()); 3124 if (Min.isMinValue() && Max.isMaxValue()) 3125 return ConservativeResult; 3126 return ConservativeResult.intersectWith(ConstantRange(Min, Max+1)); 3127 } 3128 } 3129 3130 return ConservativeResult; 3131 } 3132 3133 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) { 3134 // For a SCEVUnknown, ask ValueTracking. 3135 APInt Mask = APInt::getAllOnesValue(BitWidth); 3136 APInt Zeros(BitWidth, 0), Ones(BitWidth, 0); 3137 ComputeMaskedBits(U->getValue(), Mask, Zeros, Ones, TD); 3138 if (Ones == ~Zeros + 1) 3139 return ConservativeResult; 3140 return ConservativeResult.intersectWith(ConstantRange(Ones, ~Zeros + 1)); 3141 } 3142 3143 return ConservativeResult; 3144 } 3145 3146 /// getSignedRange - Determine the signed range for a particular SCEV. 3147 /// 3148 ConstantRange 3149 ScalarEvolution::getSignedRange(const SCEV *S) { 3150 3151 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) 3152 return ConstantRange(C->getValue()->getValue()); 3153 3154 unsigned BitWidth = getTypeSizeInBits(S->getType()); 3155 ConstantRange ConservativeResult(BitWidth, /*isFullSet=*/true); 3156 3157 // If the value has known zeros, the maximum signed value will have those 3158 // known zeros as well. 3159 uint32_t TZ = GetMinTrailingZeros(S); 3160 if (TZ != 0) 3161 ConservativeResult = 3162 ConstantRange(APInt::getSignedMinValue(BitWidth), 3163 APInt::getSignedMaxValue(BitWidth).ashr(TZ).shl(TZ) + 1); 3164 3165 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) { 3166 ConstantRange X = getSignedRange(Add->getOperand(0)); 3167 for (unsigned i = 1, e = Add->getNumOperands(); i != e; ++i) 3168 X = X.add(getSignedRange(Add->getOperand(i))); 3169 return ConservativeResult.intersectWith(X); 3170 } 3171 3172 if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) { 3173 ConstantRange X = getSignedRange(Mul->getOperand(0)); 3174 for (unsigned i = 1, e = Mul->getNumOperands(); i != e; ++i) 3175 X = X.multiply(getSignedRange(Mul->getOperand(i))); 3176 return ConservativeResult.intersectWith(X); 3177 } 3178 3179 if (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(S)) { 3180 ConstantRange X = getSignedRange(SMax->getOperand(0)); 3181 for (unsigned i = 1, e = SMax->getNumOperands(); i != e; ++i) 3182 X = X.smax(getSignedRange(SMax->getOperand(i))); 3183 return ConservativeResult.intersectWith(X); 3184 } 3185 3186 if (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(S)) { 3187 ConstantRange X = getSignedRange(UMax->getOperand(0)); 3188 for (unsigned i = 1, e = UMax->getNumOperands(); i != e; ++i) 3189 X = X.umax(getSignedRange(UMax->getOperand(i))); 3190 return ConservativeResult.intersectWith(X); 3191 } 3192 3193 if (const SCEVUDivExpr *UDiv = dyn_cast<SCEVUDivExpr>(S)) { 3194 ConstantRange X = getSignedRange(UDiv->getLHS()); 3195 ConstantRange Y = getSignedRange(UDiv->getRHS()); 3196 return ConservativeResult.intersectWith(X.udiv(Y)); 3197 } 3198 3199 if (const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(S)) { 3200 ConstantRange X = getSignedRange(ZExt->getOperand()); 3201 return ConservativeResult.intersectWith(X.zeroExtend(BitWidth)); 3202 } 3203 3204 if (const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(S)) { 3205 ConstantRange X = getSignedRange(SExt->getOperand()); 3206 return ConservativeResult.intersectWith(X.signExtend(BitWidth)); 3207 } 3208 3209 if (const SCEVTruncateExpr *Trunc = dyn_cast<SCEVTruncateExpr>(S)) { 3210 ConstantRange X = getSignedRange(Trunc->getOperand()); 3211 return ConservativeResult.intersectWith(X.truncate(BitWidth)); 3212 } 3213 3214 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(S)) { 3215 // If there's no signed wrap, and all the operands have the same sign or 3216 // zero, the value won't ever change sign. 3217 if (AddRec->hasNoSignedWrap()) { 3218 bool AllNonNeg = true; 3219 bool AllNonPos = true; 3220 for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) { 3221 if (!isKnownNonNegative(AddRec->getOperand(i))) AllNonNeg = false; 3222 if (!isKnownNonPositive(AddRec->getOperand(i))) AllNonPos = false; 3223 } 3224 if (AllNonNeg) 3225 ConservativeResult = ConservativeResult.intersectWith( 3226 ConstantRange(APInt(BitWidth, 0), 3227 APInt::getSignedMinValue(BitWidth))); 3228 else if (AllNonPos) 3229 ConservativeResult = ConservativeResult.intersectWith( 3230 ConstantRange(APInt::getSignedMinValue(BitWidth), 3231 APInt(BitWidth, 1))); 3232 } 3233 3234 // TODO: non-affine addrec 3235 if (AddRec->isAffine()) { 3236 const Type *Ty = AddRec->getType(); 3237 const SCEV *MaxBECount = getMaxBackedgeTakenCount(AddRec->getLoop()); 3238 if (!isa<SCEVCouldNotCompute>(MaxBECount) && 3239 getTypeSizeInBits(MaxBECount->getType()) <= BitWidth) { 3240 MaxBECount = getNoopOrZeroExtend(MaxBECount, Ty); 3241 3242 const SCEV *Start = AddRec->getStart(); 3243 const SCEV *Step = AddRec->getStepRecurrence(*this); 3244 3245 ConstantRange StartRange = getSignedRange(Start); 3246 ConstantRange StepRange = getSignedRange(Step); 3247 ConstantRange MaxBECountRange = getUnsignedRange(MaxBECount); 3248 ConstantRange EndRange = 3249 StartRange.add(MaxBECountRange.multiply(StepRange)); 3250 3251 // Check for overflow. This must be done with ConstantRange arithmetic 3252 // because we could be called from within the ScalarEvolution overflow 3253 // checking code. 3254 ConstantRange ExtStartRange = StartRange.sextOrTrunc(BitWidth*2+1); 3255 ConstantRange ExtStepRange = StepRange.sextOrTrunc(BitWidth*2+1); 3256 ConstantRange ExtMaxBECountRange = 3257 MaxBECountRange.zextOrTrunc(BitWidth*2+1); 3258 ConstantRange ExtEndRange = EndRange.sextOrTrunc(BitWidth*2+1); 3259 if (ExtStartRange.add(ExtMaxBECountRange.multiply(ExtStepRange)) != 3260 ExtEndRange) 3261 return ConservativeResult; 3262 3263 APInt Min = APIntOps::smin(StartRange.getSignedMin(), 3264 EndRange.getSignedMin()); 3265 APInt Max = APIntOps::smax(StartRange.getSignedMax(), 3266 EndRange.getSignedMax()); 3267 if (Min.isMinSignedValue() && Max.isMaxSignedValue()) 3268 return ConservativeResult; 3269 return ConservativeResult.intersectWith(ConstantRange(Min, Max+1)); 3270 } 3271 } 3272 3273 return ConservativeResult; 3274 } 3275 3276 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) { 3277 // For a SCEVUnknown, ask ValueTracking. 3278 if (!U->getValue()->getType()->isIntegerTy() && !TD) 3279 return ConservativeResult; 3280 unsigned NS = ComputeNumSignBits(U->getValue(), TD); 3281 if (NS == 1) 3282 return ConservativeResult; 3283 return ConservativeResult.intersectWith( 3284 ConstantRange(APInt::getSignedMinValue(BitWidth).ashr(NS - 1), 3285 APInt::getSignedMaxValue(BitWidth).ashr(NS - 1)+1)); 3286 } 3287 3288 return ConservativeResult; 3289 } 3290 3291 /// createSCEV - We know that there is no SCEV for the specified value. 3292 /// Analyze the expression. 3293 /// 3294 const SCEV *ScalarEvolution::createSCEV(Value *V) { 3295 if (!isSCEVable(V->getType())) 3296 return getUnknown(V); 3297 3298 unsigned Opcode = Instruction::UserOp1; 3299 if (Instruction *I = dyn_cast<Instruction>(V)) { 3300 Opcode = I->getOpcode(); 3301 3302 // Don't attempt to analyze instructions in blocks that aren't 3303 // reachable. Such instructions don't matter, and they aren't required 3304 // to obey basic rules for definitions dominating uses which this 3305 // analysis depends on. 3306 if (!DT->isReachableFromEntry(I->getParent())) 3307 return getUnknown(V); 3308 } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) 3309 Opcode = CE->getOpcode(); 3310 else if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) 3311 return getConstant(CI); 3312 else if (isa<ConstantPointerNull>(V)) 3313 return getConstant(V->getType(), 0); 3314 else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) 3315 return GA->mayBeOverridden() ? getUnknown(V) : getSCEV(GA->getAliasee()); 3316 else 3317 return getUnknown(V); 3318 3319 Operator *U = cast<Operator>(V); 3320 switch (Opcode) { 3321 case Instruction::Add: { 3322 // The simple thing to do would be to just call getSCEV on both operands 3323 // and call getAddExpr with the result. However if we're looking at a 3324 // bunch of things all added together, this can be quite inefficient, 3325 // because it leads to N-1 getAddExpr calls for N ultimate operands. 3326 // Instead, gather up all the operands and make a single getAddExpr call. 3327 // LLVM IR canonical form means we need only traverse the left operands. 3328 SmallVector<const SCEV *, 4> AddOps; 3329 AddOps.push_back(getSCEV(U->getOperand(1))); 3330 for (Value *Op = U->getOperand(0); ; Op = U->getOperand(0)) { 3331 unsigned Opcode = Op->getValueID() - Value::InstructionVal; 3332 if (Opcode != Instruction::Add && Opcode != Instruction::Sub) 3333 break; 3334 U = cast<Operator>(Op); 3335 const SCEV *Op1 = getSCEV(U->getOperand(1)); 3336 if (Opcode == Instruction::Sub) 3337 AddOps.push_back(getNegativeSCEV(Op1)); 3338 else 3339 AddOps.push_back(Op1); 3340 } 3341 AddOps.push_back(getSCEV(U->getOperand(0))); 3342 return getAddExpr(AddOps); 3343 } 3344 case Instruction::Mul: { 3345 // See the Add code above. 3346 SmallVector<const SCEV *, 4> MulOps; 3347 MulOps.push_back(getSCEV(U->getOperand(1))); 3348 for (Value *Op = U->getOperand(0); 3349 Op->getValueID() == Instruction::Mul + Value::InstructionVal; 3350 Op = U->getOperand(0)) { 3351 U = cast<Operator>(Op); 3352 MulOps.push_back(getSCEV(U->getOperand(1))); 3353 } 3354 MulOps.push_back(getSCEV(U->getOperand(0))); 3355 return getMulExpr(MulOps); 3356 } 3357 case Instruction::UDiv: 3358 return getUDivExpr(getSCEV(U->getOperand(0)), 3359 getSCEV(U->getOperand(1))); 3360 case Instruction::Sub: 3361 return getMinusSCEV(getSCEV(U->getOperand(0)), 3362 getSCEV(U->getOperand(1))); 3363 case Instruction::And: 3364 // For an expression like x&255 that merely masks off the high bits, 3365 // use zext(trunc(x)) as the SCEV expression. 3366 if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1))) { 3367 if (CI->isNullValue()) 3368 return getSCEV(U->getOperand(1)); 3369 if (CI->isAllOnesValue()) 3370 return getSCEV(U->getOperand(0)); 3371 const APInt &A = CI->getValue(); 3372 3373 // Instcombine's ShrinkDemandedConstant may strip bits out of 3374 // constants, obscuring what would otherwise be a low-bits mask. 3375 // Use ComputeMaskedBits to compute what ShrinkDemandedConstant 3376 // knew about to reconstruct a low-bits mask value. 3377 unsigned LZ = A.countLeadingZeros(); 3378 unsigned BitWidth = A.getBitWidth(); 3379 APInt AllOnes = APInt::getAllOnesValue(BitWidth); 3380 APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0); 3381 ComputeMaskedBits(U->getOperand(0), AllOnes, KnownZero, KnownOne, TD); 3382 3383 APInt EffectiveMask = APInt::getLowBitsSet(BitWidth, BitWidth - LZ); 3384 3385 if (LZ != 0 && !((~A & ~KnownZero) & EffectiveMask)) 3386 return 3387 getZeroExtendExpr(getTruncateExpr(getSCEV(U->getOperand(0)), 3388 IntegerType::get(getContext(), BitWidth - LZ)), 3389 U->getType()); 3390 } 3391 break; 3392 3393 case Instruction::Or: 3394 // If the RHS of the Or is a constant, we may have something like: 3395 // X*4+1 which got turned into X*4|1. Handle this as an Add so loop 3396 // optimizations will transparently handle this case. 3397 // 3398 // In order for this transformation to be safe, the LHS must be of the 3399 // form X*(2^n) and the Or constant must be less than 2^n. 3400 if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1))) { 3401 const SCEV *LHS = getSCEV(U->getOperand(0)); 3402 const APInt &CIVal = CI->getValue(); 3403 if (GetMinTrailingZeros(LHS) >= 3404 (CIVal.getBitWidth() - CIVal.countLeadingZeros())) { 3405 // Build a plain add SCEV. 3406 const SCEV *S = getAddExpr(LHS, getSCEV(CI)); 3407 // If the LHS of the add was an addrec and it has no-wrap flags, 3408 // transfer the no-wrap flags, since an or won't introduce a wrap. 3409 if (const SCEVAddRecExpr *NewAR = dyn_cast<SCEVAddRecExpr>(S)) { 3410 const SCEVAddRecExpr *OldAR = cast<SCEVAddRecExpr>(LHS); 3411 if (OldAR->hasNoUnsignedWrap()) 3412 const_cast<SCEVAddRecExpr *>(NewAR)->setHasNoUnsignedWrap(true); 3413 if (OldAR->hasNoSignedWrap()) 3414 const_cast<SCEVAddRecExpr *>(NewAR)->setHasNoSignedWrap(true); 3415 } 3416 return S; 3417 } 3418 } 3419 break; 3420 case Instruction::Xor: 3421 if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1))) { 3422 // If the RHS of the xor is a signbit, then this is just an add. 3423 // Instcombine turns add of signbit into xor as a strength reduction step. 3424 if (CI->getValue().isSignBit()) 3425 return getAddExpr(getSCEV(U->getOperand(0)), 3426 getSCEV(U->getOperand(1))); 3427 3428 // If the RHS of xor is -1, then this is a not operation. 3429 if (CI->isAllOnesValue()) 3430 return getNotSCEV(getSCEV(U->getOperand(0))); 3431 3432 // Model xor(and(x, C), C) as and(~x, C), if C is a low-bits mask. 3433 // This is a variant of the check for xor with -1, and it handles 3434 // the case where instcombine has trimmed non-demanded bits out 3435 // of an xor with -1. 3436 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(U->getOperand(0))) 3437 if (ConstantInt *LCI = dyn_cast<ConstantInt>(BO->getOperand(1))) 3438 if (BO->getOpcode() == Instruction::And && 3439 LCI->getValue() == CI->getValue()) 3440 if (const SCEVZeroExtendExpr *Z = 3441 dyn_cast<SCEVZeroExtendExpr>(getSCEV(U->getOperand(0)))) { 3442 const Type *UTy = U->getType(); 3443 const SCEV *Z0 = Z->getOperand(); 3444 const Type *Z0Ty = Z0->getType(); 3445 unsigned Z0TySize = getTypeSizeInBits(Z0Ty); 3446 3447 // If C is a low-bits mask, the zero extend is serving to 3448 // mask off the high bits. Complement the operand and 3449 // re-apply the zext. 3450 if (APIntOps::isMask(Z0TySize, CI->getValue())) 3451 return getZeroExtendExpr(getNotSCEV(Z0), UTy); 3452 3453 // If C is a single bit, it may be in the sign-bit position 3454 // before the zero-extend. In this case, represent the xor 3455 // using an add, which is equivalent, and re-apply the zext. 3456 APInt Trunc = APInt(CI->getValue()).trunc(Z0TySize); 3457 if (APInt(Trunc).zext(getTypeSizeInBits(UTy)) == CI->getValue() && 3458 Trunc.isSignBit()) 3459 return getZeroExtendExpr(getAddExpr(Z0, getConstant(Trunc)), 3460 UTy); 3461 } 3462 } 3463 break; 3464 3465 case Instruction::Shl: 3466 // Turn shift left of a constant amount into a multiply. 3467 if (ConstantInt *SA = dyn_cast<ConstantInt>(U->getOperand(1))) { 3468 uint32_t BitWidth = cast<IntegerType>(U->getType())->getBitWidth(); 3469 3470 // If the shift count is not less than the bitwidth, the result of 3471 // the shift is undefined. Don't try to analyze it, because the 3472 // resolution chosen here may differ from the resolution chosen in 3473 // other parts of the compiler. 3474 if (SA->getValue().uge(BitWidth)) 3475 break; 3476 3477 Constant *X = ConstantInt::get(getContext(), 3478 APInt(BitWidth, 1).shl(SA->getZExtValue())); 3479 return getMulExpr(getSCEV(U->getOperand(0)), getSCEV(X)); 3480 } 3481 break; 3482 3483 case Instruction::LShr: 3484 // Turn logical shift right of a constant into a unsigned divide. 3485 if (ConstantInt *SA = dyn_cast<ConstantInt>(U->getOperand(1))) { 3486 uint32_t BitWidth = cast<IntegerType>(U->getType())->getBitWidth(); 3487 3488 // If the shift count is not less than the bitwidth, the result of 3489 // the shift is undefined. Don't try to analyze it, because the 3490 // resolution chosen here may differ from the resolution chosen in 3491 // other parts of the compiler. 3492 if (SA->getValue().uge(BitWidth)) 3493 break; 3494 3495 Constant *X = ConstantInt::get(getContext(), 3496 APInt(BitWidth, 1).shl(SA->getZExtValue())); 3497 return getUDivExpr(getSCEV(U->getOperand(0)), getSCEV(X)); 3498 } 3499 break; 3500 3501 case Instruction::AShr: 3502 // For a two-shift sext-inreg, use sext(trunc(x)) as the SCEV expression. 3503 if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1))) 3504 if (Operator *L = dyn_cast<Operator>(U->getOperand(0))) 3505 if (L->getOpcode() == Instruction::Shl && 3506 L->getOperand(1) == U->getOperand(1)) { 3507 uint64_t BitWidth = getTypeSizeInBits(U->getType()); 3508 3509 // If the shift count is not less than the bitwidth, the result of 3510 // the shift is undefined. Don't try to analyze it, because the 3511 // resolution chosen here may differ from the resolution chosen in 3512 // other parts of the compiler. 3513 if (CI->getValue().uge(BitWidth)) 3514 break; 3515 3516 uint64_t Amt = BitWidth - CI->getZExtValue(); 3517 if (Amt == BitWidth) 3518 return getSCEV(L->getOperand(0)); // shift by zero --> noop 3519 return 3520 getSignExtendExpr(getTruncateExpr(getSCEV(L->getOperand(0)), 3521 IntegerType::get(getContext(), 3522 Amt)), 3523 U->getType()); 3524 } 3525 break; 3526 3527 case Instruction::Trunc: 3528 return getTruncateExpr(getSCEV(U->getOperand(0)), U->getType()); 3529 3530 case Instruction::ZExt: 3531 return getZeroExtendExpr(getSCEV(U->getOperand(0)), U->getType()); 3532 3533 case Instruction::SExt: 3534 return getSignExtendExpr(getSCEV(U->getOperand(0)), U->getType()); 3535 3536 case Instruction::BitCast: 3537 // BitCasts are no-op casts so we just eliminate the cast. 3538 if (isSCEVable(U->getType()) && isSCEVable(U->getOperand(0)->getType())) 3539 return getSCEV(U->getOperand(0)); 3540 break; 3541 3542 // It's tempting to handle inttoptr and ptrtoint as no-ops, however this can 3543 // lead to pointer expressions which cannot safely be expanded to GEPs, 3544 // because ScalarEvolution doesn't respect the GEP aliasing rules when 3545 // simplifying integer expressions. 3546 3547 case Instruction::GetElementPtr: 3548 return createNodeForGEP(cast<GEPOperator>(U)); 3549 3550 case Instruction::PHI: 3551 return createNodeForPHI(cast<PHINode>(U)); 3552 3553 case Instruction::Select: 3554 // This could be a smax or umax that was lowered earlier. 3555 // Try to recover it. 3556 if (ICmpInst *ICI = dyn_cast<ICmpInst>(U->getOperand(0))) { 3557 Value *LHS = ICI->getOperand(0); 3558 Value *RHS = ICI->getOperand(1); 3559 switch (ICI->getPredicate()) { 3560 case ICmpInst::ICMP_SLT: 3561 case ICmpInst::ICMP_SLE: 3562 std::swap(LHS, RHS); 3563 // fall through 3564 case ICmpInst::ICMP_SGT: 3565 case ICmpInst::ICMP_SGE: 3566 // a >s b ? a+x : b+x -> smax(a, b)+x 3567 // a >s b ? b+x : a+x -> smin(a, b)+x 3568 if (LHS->getType() == U->getType()) { 3569 const SCEV *LS = getSCEV(LHS); 3570 const SCEV *RS = getSCEV(RHS); 3571 const SCEV *LA = getSCEV(U->getOperand(1)); 3572 const SCEV *RA = getSCEV(U->getOperand(2)); 3573 const SCEV *LDiff = getMinusSCEV(LA, LS); 3574 const SCEV *RDiff = getMinusSCEV(RA, RS); 3575 if (LDiff == RDiff) 3576 return getAddExpr(getSMaxExpr(LS, RS), LDiff); 3577 LDiff = getMinusSCEV(LA, RS); 3578 RDiff = getMinusSCEV(RA, LS); 3579 if (LDiff == RDiff) 3580 return getAddExpr(getSMinExpr(LS, RS), LDiff); 3581 } 3582 break; 3583 case ICmpInst::ICMP_ULT: 3584 case ICmpInst::ICMP_ULE: 3585 std::swap(LHS, RHS); 3586 // fall through 3587 case ICmpInst::ICMP_UGT: 3588 case ICmpInst::ICMP_UGE: 3589 // a >u b ? a+x : b+x -> umax(a, b)+x 3590 // a >u b ? b+x : a+x -> umin(a, b)+x 3591 if (LHS->getType() == U->getType()) { 3592 const SCEV *LS = getSCEV(LHS); 3593 const SCEV *RS = getSCEV(RHS); 3594 const SCEV *LA = getSCEV(U->getOperand(1)); 3595 const SCEV *RA = getSCEV(U->getOperand(2)); 3596 const SCEV *LDiff = getMinusSCEV(LA, LS); 3597 const SCEV *RDiff = getMinusSCEV(RA, RS); 3598 if (LDiff == RDiff) 3599 return getAddExpr(getUMaxExpr(LS, RS), LDiff); 3600 LDiff = getMinusSCEV(LA, RS); 3601 RDiff = getMinusSCEV(RA, LS); 3602 if (LDiff == RDiff) 3603 return getAddExpr(getUMinExpr(LS, RS), LDiff); 3604 } 3605 break; 3606 case ICmpInst::ICMP_NE: 3607 // n != 0 ? n+x : 1+x -> umax(n, 1)+x 3608 if (LHS->getType() == U->getType() && 3609 isa<ConstantInt>(RHS) && 3610 cast<ConstantInt>(RHS)->isZero()) { 3611 const SCEV *One = getConstant(LHS->getType(), 1); 3612 const SCEV *LS = getSCEV(LHS); 3613 const SCEV *LA = getSCEV(U->getOperand(1)); 3614 const SCEV *RA = getSCEV(U->getOperand(2)); 3615 const SCEV *LDiff = getMinusSCEV(LA, LS); 3616 const SCEV *RDiff = getMinusSCEV(RA, One); 3617 if (LDiff == RDiff) 3618 return getAddExpr(getUMaxExpr(One, LS), LDiff); 3619 } 3620 break; 3621 case ICmpInst::ICMP_EQ: 3622 // n == 0 ? 1+x : n+x -> umax(n, 1)+x 3623 if (LHS->getType() == U->getType() && 3624 isa<ConstantInt>(RHS) && 3625 cast<ConstantInt>(RHS)->isZero()) { 3626 const SCEV *One = getConstant(LHS->getType(), 1); 3627 const SCEV *LS = getSCEV(LHS); 3628 const SCEV *LA = getSCEV(U->getOperand(1)); 3629 const SCEV *RA = getSCEV(U->getOperand(2)); 3630 const SCEV *LDiff = getMinusSCEV(LA, One); 3631 const SCEV *RDiff = getMinusSCEV(RA, LS); 3632 if (LDiff == RDiff) 3633 return getAddExpr(getUMaxExpr(One, LS), LDiff); 3634 } 3635 break; 3636 default: 3637 break; 3638 } 3639 } 3640 3641 default: // We cannot analyze this expression. 3642 break; 3643 } 3644 3645 return getUnknown(V); 3646 } 3647 3648 3649 3650 //===----------------------------------------------------------------------===// 3651 // Iteration Count Computation Code 3652 // 3653 3654 /// getBackedgeTakenCount - If the specified loop has a predictable 3655 /// backedge-taken count, return it, otherwise return a SCEVCouldNotCompute 3656 /// object. The backedge-taken count is the number of times the loop header 3657 /// will be branched to from within the loop. This is one less than the 3658 /// trip count of the loop, since it doesn't count the first iteration, 3659 /// when the header is branched to from outside the loop. 3660 /// 3661 /// Note that it is not valid to call this method on a loop without a 3662 /// loop-invariant backedge-taken count (see 3663 /// hasLoopInvariantBackedgeTakenCount). 3664 /// 3665 const SCEV *ScalarEvolution::getBackedgeTakenCount(const Loop *L) { 3666 return getBackedgeTakenInfo(L).Exact; 3667 } 3668 3669 /// getMaxBackedgeTakenCount - Similar to getBackedgeTakenCount, except 3670 /// return the least SCEV value that is known never to be less than the 3671 /// actual backedge taken count. 3672 const SCEV *ScalarEvolution::getMaxBackedgeTakenCount(const Loop *L) { 3673 return getBackedgeTakenInfo(L).Max; 3674 } 3675 3676 /// PushLoopPHIs - Push PHI nodes in the header of the given loop 3677 /// onto the given Worklist. 3678 static void 3679 PushLoopPHIs(const Loop *L, SmallVectorImpl<Instruction *> &Worklist) { 3680 BasicBlock *Header = L->getHeader(); 3681 3682 // Push all Loop-header PHIs onto the Worklist stack. 3683 for (BasicBlock::iterator I = Header->begin(); 3684 PHINode *PN = dyn_cast<PHINode>(I); ++I) 3685 Worklist.push_back(PN); 3686 } 3687 3688 const ScalarEvolution::BackedgeTakenInfo & 3689 ScalarEvolution::getBackedgeTakenInfo(const Loop *L) { 3690 // Initially insert a CouldNotCompute for this loop. If the insertion 3691 // succeeds, proceed to actually compute a backedge-taken count and 3692 // update the value. The temporary CouldNotCompute value tells SCEV 3693 // code elsewhere that it shouldn't attempt to request a new 3694 // backedge-taken count, which could result in infinite recursion. 3695 std::pair<std::map<const Loop *, BackedgeTakenInfo>::iterator, bool> Pair = 3696 BackedgeTakenCounts.insert(std::make_pair(L, getCouldNotCompute())); 3697 if (Pair.second) { 3698 BackedgeTakenInfo BECount = ComputeBackedgeTakenCount(L); 3699 if (BECount.Exact != getCouldNotCompute()) { 3700 assert(BECount.Exact->isLoopInvariant(L) && 3701 BECount.Max->isLoopInvariant(L) && 3702 "Computed backedge-taken count isn't loop invariant for loop!"); 3703 ++NumTripCountsComputed; 3704 3705 // Update the value in the map. 3706 Pair.first->second = BECount; 3707 } else { 3708 if (BECount.Max != getCouldNotCompute()) 3709 // Update the value in the map. 3710 Pair.first->second = BECount; 3711 if (isa<PHINode>(L->getHeader()->begin())) 3712 // Only count loops that have phi nodes as not being computable. 3713 ++NumTripCountsNotComputed; 3714 } 3715 3716 // Now that we know more about the trip count for this loop, forget any 3717 // existing SCEV values for PHI nodes in this loop since they are only 3718 // conservative estimates made without the benefit of trip count 3719 // information. This is similar to the code in forgetLoop, except that 3720 // it handles SCEVUnknown PHI nodes specially. 3721 if (BECount.hasAnyInfo()) { 3722 SmallVector<Instruction *, 16> Worklist; 3723 PushLoopPHIs(L, Worklist); 3724 3725 SmallPtrSet<Instruction *, 8> Visited; 3726 while (!Worklist.empty()) { 3727 Instruction *I = Worklist.pop_back_val(); 3728 if (!Visited.insert(I)) continue; 3729 3730 ValueExprMapType::iterator It = 3731 ValueExprMap.find(static_cast<Value *>(I)); 3732 if (It != ValueExprMap.end()) { 3733 // SCEVUnknown for a PHI either means that it has an unrecognized 3734 // structure, or it's a PHI that's in the progress of being computed 3735 // by createNodeForPHI. In the former case, additional loop trip 3736 // count information isn't going to change anything. In the later 3737 // case, createNodeForPHI will perform the necessary updates on its 3738 // own when it gets to that point. 3739 if (!isa<PHINode>(I) || !isa<SCEVUnknown>(It->second)) { 3740 ValuesAtScopes.erase(It->second); 3741 ValueExprMap.erase(It); 3742 } 3743 if (PHINode *PN = dyn_cast<PHINode>(I)) 3744 ConstantEvolutionLoopExitValue.erase(PN); 3745 } 3746 3747 PushDefUseChildren(I, Worklist); 3748 } 3749 } 3750 } 3751 return Pair.first->second; 3752 } 3753 3754 /// forgetLoop - This method should be called by the client when it has 3755 /// changed a loop in a way that may effect ScalarEvolution's ability to 3756 /// compute a trip count, or if the loop is deleted. 3757 void ScalarEvolution::forgetLoop(const Loop *L) { 3758 // Drop any stored trip count value. 3759 BackedgeTakenCounts.erase(L); 3760 3761 // Drop information about expressions based on loop-header PHIs. 3762 SmallVector<Instruction *, 16> Worklist; 3763 PushLoopPHIs(L, Worklist); 3764 3765 SmallPtrSet<Instruction *, 8> Visited; 3766 while (!Worklist.empty()) { 3767 Instruction *I = Worklist.pop_back_val(); 3768 if (!Visited.insert(I)) continue; 3769 3770 ValueExprMapType::iterator It = ValueExprMap.find(static_cast<Value *>(I)); 3771 if (It != ValueExprMap.end()) { 3772 ValuesAtScopes.erase(It->second); 3773 ValueExprMap.erase(It); 3774 if (PHINode *PN = dyn_cast<PHINode>(I)) 3775 ConstantEvolutionLoopExitValue.erase(PN); 3776 } 3777 3778 PushDefUseChildren(I, Worklist); 3779 } 3780 } 3781 3782 /// forgetValue - This method should be called by the client when it has 3783 /// changed a value in a way that may effect its value, or which may 3784 /// disconnect it from a def-use chain linking it to a loop. 3785 void ScalarEvolution::forgetValue(Value *V) { 3786 Instruction *I = dyn_cast<Instruction>(V); 3787 if (!I) return; 3788 3789 // Drop information about expressions based on loop-header PHIs. 3790 SmallVector<Instruction *, 16> Worklist; 3791 Worklist.push_back(I); 3792 3793 SmallPtrSet<Instruction *, 8> Visited; 3794 while (!Worklist.empty()) { 3795 I = Worklist.pop_back_val(); 3796 if (!Visited.insert(I)) continue; 3797 3798 ValueExprMapType::iterator It = ValueExprMap.find(static_cast<Value *>(I)); 3799 if (It != ValueExprMap.end()) { 3800 ValuesAtScopes.erase(It->second); 3801 ValueExprMap.erase(It); 3802 if (PHINode *PN = dyn_cast<PHINode>(I)) 3803 ConstantEvolutionLoopExitValue.erase(PN); 3804 } 3805 3806 PushDefUseChildren(I, Worklist); 3807 } 3808 } 3809 3810 /// ComputeBackedgeTakenCount - Compute the number of times the backedge 3811 /// of the specified loop will execute. 3812 ScalarEvolution::BackedgeTakenInfo 3813 ScalarEvolution::ComputeBackedgeTakenCount(const Loop *L) { 3814 SmallVector<BasicBlock *, 8> ExitingBlocks; 3815 L->getExitingBlocks(ExitingBlocks); 3816 3817 // Examine all exits and pick the most conservative values. 3818 const SCEV *BECount = getCouldNotCompute(); 3819 const SCEV *MaxBECount = getCouldNotCompute(); 3820 bool CouldNotComputeBECount = false; 3821 for (unsigned i = 0, e = ExitingBlocks.size(); i != e; ++i) { 3822 BackedgeTakenInfo NewBTI = 3823 ComputeBackedgeTakenCountFromExit(L, ExitingBlocks[i]); 3824 3825 if (NewBTI.Exact == getCouldNotCompute()) { 3826 // We couldn't compute an exact value for this exit, so 3827 // we won't be able to compute an exact value for the loop. 3828 CouldNotComputeBECount = true; 3829 BECount = getCouldNotCompute(); 3830 } else if (!CouldNotComputeBECount) { 3831 if (BECount == getCouldNotCompute()) 3832 BECount = NewBTI.Exact; 3833 else 3834 BECount = getUMinFromMismatchedTypes(BECount, NewBTI.Exact); 3835 } 3836 if (MaxBECount == getCouldNotCompute()) 3837 MaxBECount = NewBTI.Max; 3838 else if (NewBTI.Max != getCouldNotCompute()) 3839 MaxBECount = getUMinFromMismatchedTypes(MaxBECount, NewBTI.Max); 3840 } 3841 3842 return BackedgeTakenInfo(BECount, MaxBECount); 3843 } 3844 3845 /// ComputeBackedgeTakenCountFromExit - Compute the number of times the backedge 3846 /// of the specified loop will execute if it exits via the specified block. 3847 ScalarEvolution::BackedgeTakenInfo 3848 ScalarEvolution::ComputeBackedgeTakenCountFromExit(const Loop *L, 3849 BasicBlock *ExitingBlock) { 3850 3851 // Okay, we've chosen an exiting block. See what condition causes us to 3852 // exit at this block. 3853 // 3854 // FIXME: we should be able to handle switch instructions (with a single exit) 3855 BranchInst *ExitBr = dyn_cast<BranchInst>(ExitingBlock->getTerminator()); 3856 if (ExitBr == 0) return getCouldNotCompute(); 3857 assert(ExitBr->isConditional() && "If unconditional, it can't be in loop!"); 3858 3859 // At this point, we know we have a conditional branch that determines whether 3860 // the loop is exited. However, we don't know if the branch is executed each 3861 // time through the loop. If not, then the execution count of the branch will 3862 // not be equal to the trip count of the loop. 3863 // 3864 // Currently we check for this by checking to see if the Exit branch goes to 3865 // the loop header. If so, we know it will always execute the same number of 3866 // times as the loop. We also handle the case where the exit block *is* the 3867 // loop header. This is common for un-rotated loops. 3868 // 3869 // If both of those tests fail, walk up the unique predecessor chain to the 3870 // header, stopping if there is an edge that doesn't exit the loop. If the 3871 // header is reached, the execution count of the branch will be equal to the 3872 // trip count of the loop. 3873 // 3874 // More extensive analysis could be done to handle more cases here. 3875 // 3876 if (ExitBr->getSuccessor(0) != L->getHeader() && 3877 ExitBr->getSuccessor(1) != L->getHeader() && 3878 ExitBr->getParent() != L->getHeader()) { 3879 // The simple checks failed, try climbing the unique predecessor chain 3880 // up to the header. 3881 bool Ok = false; 3882 for (BasicBlock *BB = ExitBr->getParent(); BB; ) { 3883 BasicBlock *Pred = BB->getUniquePredecessor(); 3884 if (!Pred) 3885 return getCouldNotCompute(); 3886 TerminatorInst *PredTerm = Pred->getTerminator(); 3887 for (unsigned i = 0, e = PredTerm->getNumSuccessors(); i != e; ++i) { 3888 BasicBlock *PredSucc = PredTerm->getSuccessor(i); 3889 if (PredSucc == BB) 3890 continue; 3891 // If the predecessor has a successor that isn't BB and isn't 3892 // outside the loop, assume the worst. 3893 if (L->contains(PredSucc)) 3894 return getCouldNotCompute(); 3895 } 3896 if (Pred == L->getHeader()) { 3897 Ok = true; 3898 break; 3899 } 3900 BB = Pred; 3901 } 3902 if (!Ok) 3903 return getCouldNotCompute(); 3904 } 3905 3906 // Proceed to the next level to examine the exit condition expression. 3907 return ComputeBackedgeTakenCountFromExitCond(L, ExitBr->getCondition(), 3908 ExitBr->getSuccessor(0), 3909 ExitBr->getSuccessor(1)); 3910 } 3911 3912 /// ComputeBackedgeTakenCountFromExitCond - Compute the number of times the 3913 /// backedge of the specified loop will execute if its exit condition 3914 /// were a conditional branch of ExitCond, TBB, and FBB. 3915 ScalarEvolution::BackedgeTakenInfo 3916 ScalarEvolution::ComputeBackedgeTakenCountFromExitCond(const Loop *L, 3917 Value *ExitCond, 3918 BasicBlock *TBB, 3919 BasicBlock *FBB) { 3920 // Check if the controlling expression for this loop is an And or Or. 3921 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(ExitCond)) { 3922 if (BO->getOpcode() == Instruction::And) { 3923 // Recurse on the operands of the and. 3924 BackedgeTakenInfo BTI0 = 3925 ComputeBackedgeTakenCountFromExitCond(L, BO->getOperand(0), TBB, FBB); 3926 BackedgeTakenInfo BTI1 = 3927 ComputeBackedgeTakenCountFromExitCond(L, BO->getOperand(1), TBB, FBB); 3928 const SCEV *BECount = getCouldNotCompute(); 3929 const SCEV *MaxBECount = getCouldNotCompute(); 3930 if (L->contains(TBB)) { 3931 // Both conditions must be true for the loop to continue executing. 3932 // Choose the less conservative count. 3933 if (BTI0.Exact == getCouldNotCompute() || 3934 BTI1.Exact == getCouldNotCompute()) 3935 BECount = getCouldNotCompute(); 3936 else 3937 BECount = getUMinFromMismatchedTypes(BTI0.Exact, BTI1.Exact); 3938 if (BTI0.Max == getCouldNotCompute()) 3939 MaxBECount = BTI1.Max; 3940 else if (BTI1.Max == getCouldNotCompute()) 3941 MaxBECount = BTI0.Max; 3942 else 3943 MaxBECount = getUMinFromMismatchedTypes(BTI0.Max, BTI1.Max); 3944 } else { 3945 // Both conditions must be true at the same time for the loop to exit. 3946 // For now, be conservative. 3947 assert(L->contains(FBB) && "Loop block has no successor in loop!"); 3948 if (BTI0.Max == BTI1.Max) 3949 MaxBECount = BTI0.Max; 3950 if (BTI0.Exact == BTI1.Exact) 3951 BECount = BTI0.Exact; 3952 } 3953 3954 return BackedgeTakenInfo(BECount, MaxBECount); 3955 } 3956 if (BO->getOpcode() == Instruction::Or) { 3957 // Recurse on the operands of the or. 3958 BackedgeTakenInfo BTI0 = 3959 ComputeBackedgeTakenCountFromExitCond(L, BO->getOperand(0), TBB, FBB); 3960 BackedgeTakenInfo BTI1 = 3961 ComputeBackedgeTakenCountFromExitCond(L, BO->getOperand(1), TBB, FBB); 3962 const SCEV *BECount = getCouldNotCompute(); 3963 const SCEV *MaxBECount = getCouldNotCompute(); 3964 if (L->contains(FBB)) { 3965 // Both conditions must be false for the loop to continue executing. 3966 // Choose the less conservative count. 3967 if (BTI0.Exact == getCouldNotCompute() || 3968 BTI1.Exact == getCouldNotCompute()) 3969 BECount = getCouldNotCompute(); 3970 else 3971 BECount = getUMinFromMismatchedTypes(BTI0.Exact, BTI1.Exact); 3972 if (BTI0.Max == getCouldNotCompute()) 3973 MaxBECount = BTI1.Max; 3974 else if (BTI1.Max == getCouldNotCompute()) 3975 MaxBECount = BTI0.Max; 3976 else 3977 MaxBECount = getUMinFromMismatchedTypes(BTI0.Max, BTI1.Max); 3978 } else { 3979 // Both conditions must be false at the same time for the loop to exit. 3980 // For now, be conservative. 3981 assert(L->contains(TBB) && "Loop block has no successor in loop!"); 3982 if (BTI0.Max == BTI1.Max) 3983 MaxBECount = BTI0.Max; 3984 if (BTI0.Exact == BTI1.Exact) 3985 BECount = BTI0.Exact; 3986 } 3987 3988 return BackedgeTakenInfo(BECount, MaxBECount); 3989 } 3990 } 3991 3992 // With an icmp, it may be feasible to compute an exact backedge-taken count. 3993 // Proceed to the next level to examine the icmp. 3994 if (ICmpInst *ExitCondICmp = dyn_cast<ICmpInst>(ExitCond)) 3995 return ComputeBackedgeTakenCountFromExitCondICmp(L, ExitCondICmp, TBB, FBB); 3996 3997 // Check for a constant condition. These are normally stripped out by 3998 // SimplifyCFG, but ScalarEvolution may be used by a pass which wishes to 3999 // preserve the CFG and is temporarily leaving constant conditions 4000 // in place. 4001 if (ConstantInt *CI = dyn_cast<ConstantInt>(ExitCond)) { 4002 if (L->contains(FBB) == !CI->getZExtValue()) 4003 // The backedge is always taken. 4004 return getCouldNotCompute(); 4005 else 4006 // The backedge is never taken. 4007 return getConstant(CI->getType(), 0); 4008 } 4009 4010 // If it's not an integer or pointer comparison then compute it the hard way. 4011 return ComputeBackedgeTakenCountExhaustively(L, ExitCond, !L->contains(TBB)); 4012 } 4013 4014 /// ComputeBackedgeTakenCountFromExitCondICmp - Compute the number of times the 4015 /// backedge of the specified loop will execute if its exit condition 4016 /// were a conditional branch of the ICmpInst ExitCond, TBB, and FBB. 4017 ScalarEvolution::BackedgeTakenInfo 4018 ScalarEvolution::ComputeBackedgeTakenCountFromExitCondICmp(const Loop *L, 4019 ICmpInst *ExitCond, 4020 BasicBlock *TBB, 4021 BasicBlock *FBB) { 4022 4023 // If the condition was exit on true, convert the condition to exit on false 4024 ICmpInst::Predicate Cond; 4025 if (!L->contains(FBB)) 4026 Cond = ExitCond->getPredicate(); 4027 else 4028 Cond = ExitCond->getInversePredicate(); 4029 4030 // Handle common loops like: for (X = "string"; *X; ++X) 4031 if (LoadInst *LI = dyn_cast<LoadInst>(ExitCond->getOperand(0))) 4032 if (Constant *RHS = dyn_cast<Constant>(ExitCond->getOperand(1))) { 4033 BackedgeTakenInfo ItCnt = 4034 ComputeLoadConstantCompareBackedgeTakenCount(LI, RHS, L, Cond); 4035 if (ItCnt.hasAnyInfo()) 4036 return ItCnt; 4037 } 4038 4039 const SCEV *LHS = getSCEV(ExitCond->getOperand(0)); 4040 const SCEV *RHS = getSCEV(ExitCond->getOperand(1)); 4041 4042 // Try to evaluate any dependencies out of the loop. 4043 LHS = getSCEVAtScope(LHS, L); 4044 RHS = getSCEVAtScope(RHS, L); 4045 4046 // At this point, we would like to compute how many iterations of the 4047 // loop the predicate will return true for these inputs. 4048 if (LHS->isLoopInvariant(L) && !RHS->isLoopInvariant(L)) { 4049 // If there is a loop-invariant, force it into the RHS. 4050 std::swap(LHS, RHS); 4051 Cond = ICmpInst::getSwappedPredicate(Cond); 4052 } 4053 4054 // Simplify the operands before analyzing them. 4055 (void)SimplifyICmpOperands(Cond, LHS, RHS); 4056 4057 // If we have a comparison of a chrec against a constant, try to use value 4058 // ranges to answer this query. 4059 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) 4060 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(LHS)) 4061 if (AddRec->getLoop() == L) { 4062 // Form the constant range. 4063 ConstantRange CompRange( 4064 ICmpInst::makeConstantRange(Cond, RHSC->getValue()->getValue())); 4065 4066 const SCEV *Ret = AddRec->getNumIterationsInRange(CompRange, *this); 4067 if (!isa<SCEVCouldNotCompute>(Ret)) return Ret; 4068 } 4069 4070 switch (Cond) { 4071 case ICmpInst::ICMP_NE: { // while (X != Y) 4072 // Convert to: while (X-Y != 0) 4073 BackedgeTakenInfo BTI = HowFarToZero(getMinusSCEV(LHS, RHS), L); 4074 if (BTI.hasAnyInfo()) return BTI; 4075 break; 4076 } 4077 case ICmpInst::ICMP_EQ: { // while (X == Y) 4078 // Convert to: while (X-Y == 0) 4079 BackedgeTakenInfo BTI = HowFarToNonZero(getMinusSCEV(LHS, RHS), L); 4080 if (BTI.hasAnyInfo()) return BTI; 4081 break; 4082 } 4083 case ICmpInst::ICMP_SLT: { 4084 BackedgeTakenInfo BTI = HowManyLessThans(LHS, RHS, L, true); 4085 if (BTI.hasAnyInfo()) return BTI; 4086 break; 4087 } 4088 case ICmpInst::ICMP_SGT: { 4089 BackedgeTakenInfo BTI = HowManyLessThans(getNotSCEV(LHS), 4090 getNotSCEV(RHS), L, true); 4091 if (BTI.hasAnyInfo()) return BTI; 4092 break; 4093 } 4094 case ICmpInst::ICMP_ULT: { 4095 BackedgeTakenInfo BTI = HowManyLessThans(LHS, RHS, L, false); 4096 if (BTI.hasAnyInfo()) return BTI; 4097 break; 4098 } 4099 case ICmpInst::ICMP_UGT: { 4100 BackedgeTakenInfo BTI = HowManyLessThans(getNotSCEV(LHS), 4101 getNotSCEV(RHS), L, false); 4102 if (BTI.hasAnyInfo()) return BTI; 4103 break; 4104 } 4105 default: 4106 #if 0 4107 dbgs() << "ComputeBackedgeTakenCount "; 4108 if (ExitCond->getOperand(0)->getType()->isUnsigned()) 4109 dbgs() << "[unsigned] "; 4110 dbgs() << *LHS << " " 4111 << Instruction::getOpcodeName(Instruction::ICmp) 4112 << " " << *RHS << "\n"; 4113 #endif 4114 break; 4115 } 4116 return 4117 ComputeBackedgeTakenCountExhaustively(L, ExitCond, !L->contains(TBB)); 4118 } 4119 4120 static ConstantInt * 4121 EvaluateConstantChrecAtConstant(const SCEVAddRecExpr *AddRec, ConstantInt *C, 4122 ScalarEvolution &SE) { 4123 const SCEV *InVal = SE.getConstant(C); 4124 const SCEV *Val = AddRec->evaluateAtIteration(InVal, SE); 4125 assert(isa<SCEVConstant>(Val) && 4126 "Evaluation of SCEV at constant didn't fold correctly?"); 4127 return cast<SCEVConstant>(Val)->getValue(); 4128 } 4129 4130 /// GetAddressedElementFromGlobal - Given a global variable with an initializer 4131 /// and a GEP expression (missing the pointer index) indexing into it, return 4132 /// the addressed element of the initializer or null if the index expression is 4133 /// invalid. 4134 static Constant * 4135 GetAddressedElementFromGlobal(GlobalVariable *GV, 4136 const std::vector<ConstantInt*> &Indices) { 4137 Constant *Init = GV->getInitializer(); 4138 for (unsigned i = 0, e = Indices.size(); i != e; ++i) { 4139 uint64_t Idx = Indices[i]->getZExtValue(); 4140 if (ConstantStruct *CS = dyn_cast<ConstantStruct>(Init)) { 4141 assert(Idx < CS->getNumOperands() && "Bad struct index!"); 4142 Init = cast<Constant>(CS->getOperand(Idx)); 4143 } else if (ConstantArray *CA = dyn_cast<ConstantArray>(Init)) { 4144 if (Idx >= CA->getNumOperands()) return 0; // Bogus program 4145 Init = cast<Constant>(CA->getOperand(Idx)); 4146 } else if (isa<ConstantAggregateZero>(Init)) { 4147 if (const StructType *STy = dyn_cast<StructType>(Init->getType())) { 4148 assert(Idx < STy->getNumElements() && "Bad struct index!"); 4149 Init = Constant::getNullValue(STy->getElementType(Idx)); 4150 } else if (const ArrayType *ATy = dyn_cast<ArrayType>(Init->getType())) { 4151 if (Idx >= ATy->getNumElements()) return 0; // Bogus program 4152 Init = Constant::getNullValue(ATy->getElementType()); 4153 } else { 4154 llvm_unreachable("Unknown constant aggregate type!"); 4155 } 4156 return 0; 4157 } else { 4158 return 0; // Unknown initializer type 4159 } 4160 } 4161 return Init; 4162 } 4163 4164 /// ComputeLoadConstantCompareBackedgeTakenCount - Given an exit condition of 4165 /// 'icmp op load X, cst', try to see if we can compute the backedge 4166 /// execution count. 4167 ScalarEvolution::BackedgeTakenInfo 4168 ScalarEvolution::ComputeLoadConstantCompareBackedgeTakenCount( 4169 LoadInst *LI, 4170 Constant *RHS, 4171 const Loop *L, 4172 ICmpInst::Predicate predicate) { 4173 if (LI->isVolatile()) return getCouldNotCompute(); 4174 4175 // Check to see if the loaded pointer is a getelementptr of a global. 4176 // TODO: Use SCEV instead of manually grubbing with GEPs. 4177 GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(LI->getOperand(0)); 4178 if (!GEP) return getCouldNotCompute(); 4179 4180 // Make sure that it is really a constant global we are gepping, with an 4181 // initializer, and make sure the first IDX is really 0. 4182 GlobalVariable *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0)); 4183 if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer() || 4184 GEP->getNumOperands() < 3 || !isa<Constant>(GEP->getOperand(1)) || 4185 !cast<Constant>(GEP->getOperand(1))->isNullValue()) 4186 return getCouldNotCompute(); 4187 4188 // Okay, we allow one non-constant index into the GEP instruction. 4189 Value *VarIdx = 0; 4190 std::vector<ConstantInt*> Indexes; 4191 unsigned VarIdxNum = 0; 4192 for (unsigned i = 2, e = GEP->getNumOperands(); i != e; ++i) 4193 if (ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(i))) { 4194 Indexes.push_back(CI); 4195 } else if (!isa<ConstantInt>(GEP->getOperand(i))) { 4196 if (VarIdx) return getCouldNotCompute(); // Multiple non-constant idx's. 4197 VarIdx = GEP->getOperand(i); 4198 VarIdxNum = i-2; 4199 Indexes.push_back(0); 4200 } 4201 4202 // Okay, we know we have a (load (gep GV, 0, X)) comparison with a constant. 4203 // Check to see if X is a loop variant variable value now. 4204 const SCEV *Idx = getSCEV(VarIdx); 4205 Idx = getSCEVAtScope(Idx, L); 4206 4207 // We can only recognize very limited forms of loop index expressions, in 4208 // particular, only affine AddRec's like {C1,+,C2}. 4209 const SCEVAddRecExpr *IdxExpr = dyn_cast<SCEVAddRecExpr>(Idx); 4210 if (!IdxExpr || !IdxExpr->isAffine() || IdxExpr->isLoopInvariant(L) || 4211 !isa<SCEVConstant>(IdxExpr->getOperand(0)) || 4212 !isa<SCEVConstant>(IdxExpr->getOperand(1))) 4213 return getCouldNotCompute(); 4214 4215 unsigned MaxSteps = MaxBruteForceIterations; 4216 for (unsigned IterationNum = 0; IterationNum != MaxSteps; ++IterationNum) { 4217 ConstantInt *ItCst = ConstantInt::get( 4218 cast<IntegerType>(IdxExpr->getType()), IterationNum); 4219 ConstantInt *Val = EvaluateConstantChrecAtConstant(IdxExpr, ItCst, *this); 4220 4221 // Form the GEP offset. 4222 Indexes[VarIdxNum] = Val; 4223 4224 Constant *Result = GetAddressedElementFromGlobal(GV, Indexes); 4225 if (Result == 0) break; // Cannot compute! 4226 4227 // Evaluate the condition for this iteration. 4228 Result = ConstantExpr::getICmp(predicate, Result, RHS); 4229 if (!isa<ConstantInt>(Result)) break; // Couldn't decide for sure 4230 if (cast<ConstantInt>(Result)->getValue().isMinValue()) { 4231 #if 0 4232 dbgs() << "\n***\n*** Computed loop count " << *ItCst 4233 << "\n*** From global " << *GV << "*** BB: " << *L->getHeader() 4234 << "***\n"; 4235 #endif 4236 ++NumArrayLenItCounts; 4237 return getConstant(ItCst); // Found terminating iteration! 4238 } 4239 } 4240 return getCouldNotCompute(); 4241 } 4242 4243 4244 /// CanConstantFold - Return true if we can constant fold an instruction of the 4245 /// specified type, assuming that all operands were constants. 4246 static bool CanConstantFold(const Instruction *I) { 4247 if (isa<BinaryOperator>(I) || isa<CmpInst>(I) || 4248 isa<SelectInst>(I) || isa<CastInst>(I) || isa<GetElementPtrInst>(I)) 4249 return true; 4250 4251 if (const CallInst *CI = dyn_cast<CallInst>(I)) 4252 if (const Function *F = CI->getCalledFunction()) 4253 return canConstantFoldCallTo(F); 4254 return false; 4255 } 4256 4257 /// getConstantEvolvingPHI - Given an LLVM value and a loop, return a PHI node 4258 /// in the loop that V is derived from. We allow arbitrary operations along the 4259 /// way, but the operands of an operation must either be constants or a value 4260 /// derived from a constant PHI. If this expression does not fit with these 4261 /// constraints, return null. 4262 static PHINode *getConstantEvolvingPHI(Value *V, const Loop *L) { 4263 // If this is not an instruction, or if this is an instruction outside of the 4264 // loop, it can't be derived from a loop PHI. 4265 Instruction *I = dyn_cast<Instruction>(V); 4266 if (I == 0 || !L->contains(I)) return 0; 4267 4268 if (PHINode *PN = dyn_cast<PHINode>(I)) { 4269 if (L->getHeader() == I->getParent()) 4270 return PN; 4271 else 4272 // We don't currently keep track of the control flow needed to evaluate 4273 // PHIs, so we cannot handle PHIs inside of loops. 4274 return 0; 4275 } 4276 4277 // If we won't be able to constant fold this expression even if the operands 4278 // are constants, return early. 4279 if (!CanConstantFold(I)) return 0; 4280 4281 // Otherwise, we can evaluate this instruction if all of its operands are 4282 // constant or derived from a PHI node themselves. 4283 PHINode *PHI = 0; 4284 for (unsigned Op = 0, e = I->getNumOperands(); Op != e; ++Op) 4285 if (!isa<Constant>(I->getOperand(Op))) { 4286 PHINode *P = getConstantEvolvingPHI(I->getOperand(Op), L); 4287 if (P == 0) return 0; // Not evolving from PHI 4288 if (PHI == 0) 4289 PHI = P; 4290 else if (PHI != P) 4291 return 0; // Evolving from multiple different PHIs. 4292 } 4293 4294 // This is a expression evolving from a constant PHI! 4295 return PHI; 4296 } 4297 4298 /// EvaluateExpression - Given an expression that passes the 4299 /// getConstantEvolvingPHI predicate, evaluate its value assuming the PHI node 4300 /// in the loop has the value PHIVal. If we can't fold this expression for some 4301 /// reason, return null. 4302 static Constant *EvaluateExpression(Value *V, Constant *PHIVal, 4303 const TargetData *TD) { 4304 if (isa<PHINode>(V)) return PHIVal; 4305 if (Constant *C = dyn_cast<Constant>(V)) return C; 4306 Instruction *I = cast<Instruction>(V); 4307 4308 std::vector<Constant*> Operands(I->getNumOperands()); 4309 4310 for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) { 4311 Operands[i] = EvaluateExpression(I->getOperand(i), PHIVal, TD); 4312 if (Operands[i] == 0) return 0; 4313 } 4314 4315 if (const CmpInst *CI = dyn_cast<CmpInst>(I)) 4316 return ConstantFoldCompareInstOperands(CI->getPredicate(), Operands[0], 4317 Operands[1], TD); 4318 return ConstantFoldInstOperands(I->getOpcode(), I->getType(), 4319 &Operands[0], Operands.size(), TD); 4320 } 4321 4322 /// getConstantEvolutionLoopExitValue - If we know that the specified Phi is 4323 /// in the header of its containing loop, we know the loop executes a 4324 /// constant number of times, and the PHI node is just a recurrence 4325 /// involving constants, fold it. 4326 Constant * 4327 ScalarEvolution::getConstantEvolutionLoopExitValue(PHINode *PN, 4328 const APInt &BEs, 4329 const Loop *L) { 4330 std::map<PHINode*, Constant*>::const_iterator I = 4331 ConstantEvolutionLoopExitValue.find(PN); 4332 if (I != ConstantEvolutionLoopExitValue.end()) 4333 return I->second; 4334 4335 if (BEs.ugt(MaxBruteForceIterations)) 4336 return ConstantEvolutionLoopExitValue[PN] = 0; // Not going to evaluate it. 4337 4338 Constant *&RetVal = ConstantEvolutionLoopExitValue[PN]; 4339 4340 // Since the loop is canonicalized, the PHI node must have two entries. One 4341 // entry must be a constant (coming in from outside of the loop), and the 4342 // second must be derived from the same PHI. 4343 bool SecondIsBackedge = L->contains(PN->getIncomingBlock(1)); 4344 Constant *StartCST = 4345 dyn_cast<Constant>(PN->getIncomingValue(!SecondIsBackedge)); 4346 if (StartCST == 0) 4347 return RetVal = 0; // Must be a constant. 4348 4349 Value *BEValue = PN->getIncomingValue(SecondIsBackedge); 4350 if (getConstantEvolvingPHI(BEValue, L) != PN && 4351 !isa<Constant>(BEValue)) 4352 return RetVal = 0; // Not derived from same PHI. 4353 4354 // Execute the loop symbolically to determine the exit value. 4355 if (BEs.getActiveBits() >= 32) 4356 return RetVal = 0; // More than 2^32-1 iterations?? Not doing it! 4357 4358 unsigned NumIterations = BEs.getZExtValue(); // must be in range 4359 unsigned IterationNum = 0; 4360 for (Constant *PHIVal = StartCST; ; ++IterationNum) { 4361 if (IterationNum == NumIterations) 4362 return RetVal = PHIVal; // Got exit value! 4363 4364 // Compute the value of the PHI node for the next iteration. 4365 Constant *NextPHI = EvaluateExpression(BEValue, PHIVal, TD); 4366 if (NextPHI == PHIVal) 4367 return RetVal = NextPHI; // Stopped evolving! 4368 if (NextPHI == 0) 4369 return 0; // Couldn't evaluate! 4370 PHIVal = NextPHI; 4371 } 4372 } 4373 4374 /// ComputeBackedgeTakenCountExhaustively - If the loop is known to execute a 4375 /// constant number of times (the condition evolves only from constants), 4376 /// try to evaluate a few iterations of the loop until we get the exit 4377 /// condition gets a value of ExitWhen (true or false). If we cannot 4378 /// evaluate the trip count of the loop, return getCouldNotCompute(). 4379 const SCEV * 4380 ScalarEvolution::ComputeBackedgeTakenCountExhaustively(const Loop *L, 4381 Value *Cond, 4382 bool ExitWhen) { 4383 PHINode *PN = getConstantEvolvingPHI(Cond, L); 4384 if (PN == 0) return getCouldNotCompute(); 4385 4386 // If the loop is canonicalized, the PHI will have exactly two entries. 4387 // That's the only form we support here. 4388 if (PN->getNumIncomingValues() != 2) return getCouldNotCompute(); 4389 4390 // One entry must be a constant (coming in from outside of the loop), and the 4391 // second must be derived from the same PHI. 4392 bool SecondIsBackedge = L->contains(PN->getIncomingBlock(1)); 4393 Constant *StartCST = 4394 dyn_cast<Constant>(PN->getIncomingValue(!SecondIsBackedge)); 4395 if (StartCST == 0) return getCouldNotCompute(); // Must be a constant. 4396 4397 Value *BEValue = PN->getIncomingValue(SecondIsBackedge); 4398 if (getConstantEvolvingPHI(BEValue, L) != PN && 4399 !isa<Constant>(BEValue)) 4400 return getCouldNotCompute(); // Not derived from same PHI. 4401 4402 // Okay, we find a PHI node that defines the trip count of this loop. Execute 4403 // the loop symbolically to determine when the condition gets a value of 4404 // "ExitWhen". 4405 unsigned IterationNum = 0; 4406 unsigned MaxIterations = MaxBruteForceIterations; // Limit analysis. 4407 for (Constant *PHIVal = StartCST; 4408 IterationNum != MaxIterations; ++IterationNum) { 4409 ConstantInt *CondVal = 4410 dyn_cast_or_null<ConstantInt>(EvaluateExpression(Cond, PHIVal, TD)); 4411 4412 // Couldn't symbolically evaluate. 4413 if (!CondVal) return getCouldNotCompute(); 4414 4415 if (CondVal->getValue() == uint64_t(ExitWhen)) { 4416 ++NumBruteForceTripCountsComputed; 4417 return getConstant(Type::getInt32Ty(getContext()), IterationNum); 4418 } 4419 4420 // Compute the value of the PHI node for the next iteration. 4421 Constant *NextPHI = EvaluateExpression(BEValue, PHIVal, TD); 4422 if (NextPHI == 0 || NextPHI == PHIVal) 4423 return getCouldNotCompute();// Couldn't evaluate or not making progress... 4424 PHIVal = NextPHI; 4425 } 4426 4427 // Too many iterations were needed to evaluate. 4428 return getCouldNotCompute(); 4429 } 4430 4431 /// getSCEVAtScope - Return a SCEV expression for the specified value 4432 /// at the specified scope in the program. The L value specifies a loop 4433 /// nest to evaluate the expression at, where null is the top-level or a 4434 /// specified loop is immediately inside of the loop. 4435 /// 4436 /// This method can be used to compute the exit value for a variable defined 4437 /// in a loop by querying what the value will hold in the parent loop. 4438 /// 4439 /// In the case that a relevant loop exit value cannot be computed, the 4440 /// original value V is returned. 4441 const SCEV *ScalarEvolution::getSCEVAtScope(const SCEV *V, const Loop *L) { 4442 // Check to see if we've folded this expression at this loop before. 4443 std::map<const Loop *, const SCEV *> &Values = ValuesAtScopes[V]; 4444 std::pair<std::map<const Loop *, const SCEV *>::iterator, bool> Pair = 4445 Values.insert(std::make_pair(L, static_cast<const SCEV *>(0))); 4446 if (!Pair.second) 4447 return Pair.first->second ? Pair.first->second : V; 4448 4449 // Otherwise compute it. 4450 const SCEV *C = computeSCEVAtScope(V, L); 4451 ValuesAtScopes[V][L] = C; 4452 return C; 4453 } 4454 4455 const SCEV *ScalarEvolution::computeSCEVAtScope(const SCEV *V, const Loop *L) { 4456 if (isa<SCEVConstant>(V)) return V; 4457 4458 // If this instruction is evolved from a constant-evolving PHI, compute the 4459 // exit value from the loop without using SCEVs. 4460 if (const SCEVUnknown *SU = dyn_cast<SCEVUnknown>(V)) { 4461 if (Instruction *I = dyn_cast<Instruction>(SU->getValue())) { 4462 const Loop *LI = (*this->LI)[I->getParent()]; 4463 if (LI && LI->getParentLoop() == L) // Looking for loop exit value. 4464 if (PHINode *PN = dyn_cast<PHINode>(I)) 4465 if (PN->getParent() == LI->getHeader()) { 4466 // Okay, there is no closed form solution for the PHI node. Check 4467 // to see if the loop that contains it has a known backedge-taken 4468 // count. If so, we may be able to force computation of the exit 4469 // value. 4470 const SCEV *BackedgeTakenCount = getBackedgeTakenCount(LI); 4471 if (const SCEVConstant *BTCC = 4472 dyn_cast<SCEVConstant>(BackedgeTakenCount)) { 4473 // Okay, we know how many times the containing loop executes. If 4474 // this is a constant evolving PHI node, get the final value at 4475 // the specified iteration number. 4476 Constant *RV = getConstantEvolutionLoopExitValue(PN, 4477 BTCC->getValue()->getValue(), 4478 LI); 4479 if (RV) return getSCEV(RV); 4480 } 4481 } 4482 4483 // Okay, this is an expression that we cannot symbolically evaluate 4484 // into a SCEV. Check to see if it's possible to symbolically evaluate 4485 // the arguments into constants, and if so, try to constant propagate the 4486 // result. This is particularly useful for computing loop exit values. 4487 if (CanConstantFold(I)) { 4488 SmallVector<Constant *, 4> Operands; 4489 bool MadeImprovement = false; 4490 for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) { 4491 Value *Op = I->getOperand(i); 4492 if (Constant *C = dyn_cast<Constant>(Op)) { 4493 Operands.push_back(C); 4494 continue; 4495 } 4496 4497 // If any of the operands is non-constant and if they are 4498 // non-integer and non-pointer, don't even try to analyze them 4499 // with scev techniques. 4500 if (!isSCEVable(Op->getType())) 4501 return V; 4502 4503 const SCEV *OrigV = getSCEV(Op); 4504 const SCEV *OpV = getSCEVAtScope(OrigV, L); 4505 MadeImprovement |= OrigV != OpV; 4506 4507 Constant *C = 0; 4508 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(OpV)) 4509 C = SC->getValue(); 4510 if (const SCEVUnknown *SU = dyn_cast<SCEVUnknown>(OpV)) 4511 C = dyn_cast<Constant>(SU->getValue()); 4512 if (!C) return V; 4513 if (C->getType() != Op->getType()) 4514 C = ConstantExpr::getCast(CastInst::getCastOpcode(C, false, 4515 Op->getType(), 4516 false), 4517 C, Op->getType()); 4518 Operands.push_back(C); 4519 } 4520 4521 // Check to see if getSCEVAtScope actually made an improvement. 4522 if (MadeImprovement) { 4523 Constant *C = 0; 4524 if (const CmpInst *CI = dyn_cast<CmpInst>(I)) 4525 C = ConstantFoldCompareInstOperands(CI->getPredicate(), 4526 Operands[0], Operands[1], TD); 4527 else 4528 C = ConstantFoldInstOperands(I->getOpcode(), I->getType(), 4529 &Operands[0], Operands.size(), TD); 4530 if (!C) return V; 4531 return getSCEV(C); 4532 } 4533 } 4534 } 4535 4536 // This is some other type of SCEVUnknown, just return it. 4537 return V; 4538 } 4539 4540 if (const SCEVCommutativeExpr *Comm = dyn_cast<SCEVCommutativeExpr>(V)) { 4541 // Avoid performing the look-up in the common case where the specified 4542 // expression has no loop-variant portions. 4543 for (unsigned i = 0, e = Comm->getNumOperands(); i != e; ++i) { 4544 const SCEV *OpAtScope = getSCEVAtScope(Comm->getOperand(i), L); 4545 if (OpAtScope != Comm->getOperand(i)) { 4546 // Okay, at least one of these operands is loop variant but might be 4547 // foldable. Build a new instance of the folded commutative expression. 4548 SmallVector<const SCEV *, 8> NewOps(Comm->op_begin(), 4549 Comm->op_begin()+i); 4550 NewOps.push_back(OpAtScope); 4551 4552 for (++i; i != e; ++i) { 4553 OpAtScope = getSCEVAtScope(Comm->getOperand(i), L); 4554 NewOps.push_back(OpAtScope); 4555 } 4556 if (isa<SCEVAddExpr>(Comm)) 4557 return getAddExpr(NewOps); 4558 if (isa<SCEVMulExpr>(Comm)) 4559 return getMulExpr(NewOps); 4560 if (isa<SCEVSMaxExpr>(Comm)) 4561 return getSMaxExpr(NewOps); 4562 if (isa<SCEVUMaxExpr>(Comm)) 4563 return getUMaxExpr(NewOps); 4564 llvm_unreachable("Unknown commutative SCEV type!"); 4565 } 4566 } 4567 // If we got here, all operands are loop invariant. 4568 return Comm; 4569 } 4570 4571 if (const SCEVUDivExpr *Div = dyn_cast<SCEVUDivExpr>(V)) { 4572 const SCEV *LHS = getSCEVAtScope(Div->getLHS(), L); 4573 const SCEV *RHS = getSCEVAtScope(Div->getRHS(), L); 4574 if (LHS == Div->getLHS() && RHS == Div->getRHS()) 4575 return Div; // must be loop invariant 4576 return getUDivExpr(LHS, RHS); 4577 } 4578 4579 // If this is a loop recurrence for a loop that does not contain L, then we 4580 // are dealing with the final value computed by the loop. 4581 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(V)) { 4582 // First, attempt to evaluate each operand. 4583 // Avoid performing the look-up in the common case where the specified 4584 // expression has no loop-variant portions. 4585 for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) { 4586 const SCEV *OpAtScope = getSCEVAtScope(AddRec->getOperand(i), L); 4587 if (OpAtScope == AddRec->getOperand(i)) 4588 continue; 4589 4590 // Okay, at least one of these operands is loop variant but might be 4591 // foldable. Build a new instance of the folded commutative expression. 4592 SmallVector<const SCEV *, 8> NewOps(AddRec->op_begin(), 4593 AddRec->op_begin()+i); 4594 NewOps.push_back(OpAtScope); 4595 for (++i; i != e; ++i) 4596 NewOps.push_back(getSCEVAtScope(AddRec->getOperand(i), L)); 4597 4598 AddRec = cast<SCEVAddRecExpr>(getAddRecExpr(NewOps, AddRec->getLoop())); 4599 break; 4600 } 4601 4602 // If the scope is outside the addrec's loop, evaluate it by using the 4603 // loop exit value of the addrec. 4604 if (!AddRec->getLoop()->contains(L)) { 4605 // To evaluate this recurrence, we need to know how many times the AddRec 4606 // loop iterates. Compute this now. 4607 const SCEV *BackedgeTakenCount = getBackedgeTakenCount(AddRec->getLoop()); 4608 if (BackedgeTakenCount == getCouldNotCompute()) return AddRec; 4609 4610 // Then, evaluate the AddRec. 4611 return AddRec->evaluateAtIteration(BackedgeTakenCount, *this); 4612 } 4613 4614 return AddRec; 4615 } 4616 4617 if (const SCEVZeroExtendExpr *Cast = dyn_cast<SCEVZeroExtendExpr>(V)) { 4618 const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L); 4619 if (Op == Cast->getOperand()) 4620 return Cast; // must be loop invariant 4621 return getZeroExtendExpr(Op, Cast->getType()); 4622 } 4623 4624 if (const SCEVSignExtendExpr *Cast = dyn_cast<SCEVSignExtendExpr>(V)) { 4625 const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L); 4626 if (Op == Cast->getOperand()) 4627 return Cast; // must be loop invariant 4628 return getSignExtendExpr(Op, Cast->getType()); 4629 } 4630 4631 if (const SCEVTruncateExpr *Cast = dyn_cast<SCEVTruncateExpr>(V)) { 4632 const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L); 4633 if (Op == Cast->getOperand()) 4634 return Cast; // must be loop invariant 4635 return getTruncateExpr(Op, Cast->getType()); 4636 } 4637 4638 llvm_unreachable("Unknown SCEV type!"); 4639 return 0; 4640 } 4641 4642 /// getSCEVAtScope - This is a convenience function which does 4643 /// getSCEVAtScope(getSCEV(V), L). 4644 const SCEV *ScalarEvolution::getSCEVAtScope(Value *V, const Loop *L) { 4645 return getSCEVAtScope(getSCEV(V), L); 4646 } 4647 4648 /// SolveLinEquationWithOverflow - Finds the minimum unsigned root of the 4649 /// following equation: 4650 /// 4651 /// A * X = B (mod N) 4652 /// 4653 /// where N = 2^BW and BW is the common bit width of A and B. The signedness of 4654 /// A and B isn't important. 4655 /// 4656 /// If the equation does not have a solution, SCEVCouldNotCompute is returned. 4657 static const SCEV *SolveLinEquationWithOverflow(const APInt &A, const APInt &B, 4658 ScalarEvolution &SE) { 4659 uint32_t BW = A.getBitWidth(); 4660 assert(BW == B.getBitWidth() && "Bit widths must be the same."); 4661 assert(A != 0 && "A must be non-zero."); 4662 4663 // 1. D = gcd(A, N) 4664 // 4665 // The gcd of A and N may have only one prime factor: 2. The number of 4666 // trailing zeros in A is its multiplicity 4667 uint32_t Mult2 = A.countTrailingZeros(); 4668 // D = 2^Mult2 4669 4670 // 2. Check if B is divisible by D. 4671 // 4672 // B is divisible by D if and only if the multiplicity of prime factor 2 for B 4673 // is not less than multiplicity of this prime factor for D. 4674 if (B.countTrailingZeros() < Mult2) 4675 return SE.getCouldNotCompute(); 4676 4677 // 3. Compute I: the multiplicative inverse of (A / D) in arithmetic 4678 // modulo (N / D). 4679 // 4680 // (N / D) may need BW+1 bits in its representation. Hence, we'll use this 4681 // bit width during computations. 4682 APInt AD = A.lshr(Mult2).zext(BW + 1); // AD = A / D 4683 APInt Mod(BW + 1, 0); 4684 Mod.set(BW - Mult2); // Mod = N / D 4685 APInt I = AD.multiplicativeInverse(Mod); 4686 4687 // 4. Compute the minimum unsigned root of the equation: 4688 // I * (B / D) mod (N / D) 4689 APInt Result = (I * B.lshr(Mult2).zext(BW + 1)).urem(Mod); 4690 4691 // The result is guaranteed to be less than 2^BW so we may truncate it to BW 4692 // bits. 4693 return SE.getConstant(Result.trunc(BW)); 4694 } 4695 4696 /// SolveQuadraticEquation - Find the roots of the quadratic equation for the 4697 /// given quadratic chrec {L,+,M,+,N}. This returns either the two roots (which 4698 /// might be the same) or two SCEVCouldNotCompute objects. 4699 /// 4700 static std::pair<const SCEV *,const SCEV *> 4701 SolveQuadraticEquation(const SCEVAddRecExpr *AddRec, ScalarEvolution &SE) { 4702 assert(AddRec->getNumOperands() == 3 && "This is not a quadratic chrec!"); 4703 const SCEVConstant *LC = dyn_cast<SCEVConstant>(AddRec->getOperand(0)); 4704 const SCEVConstant *MC = dyn_cast<SCEVConstant>(AddRec->getOperand(1)); 4705 const SCEVConstant *NC = dyn_cast<SCEVConstant>(AddRec->getOperand(2)); 4706 4707 // We currently can only solve this if the coefficients are constants. 4708 if (!LC || !MC || !NC) { 4709 const SCEV *CNC = SE.getCouldNotCompute(); 4710 return std::make_pair(CNC, CNC); 4711 } 4712 4713 uint32_t BitWidth = LC->getValue()->getValue().getBitWidth(); 4714 const APInt &L = LC->getValue()->getValue(); 4715 const APInt &M = MC->getValue()->getValue(); 4716 const APInt &N = NC->getValue()->getValue(); 4717 APInt Two(BitWidth, 2); 4718 APInt Four(BitWidth, 4); 4719 4720 { 4721 using namespace APIntOps; 4722 const APInt& C = L; 4723 // Convert from chrec coefficients to polynomial coefficients AX^2+BX+C 4724 // The B coefficient is M-N/2 4725 APInt B(M); 4726 B -= sdiv(N,Two); 4727 4728 // The A coefficient is N/2 4729 APInt A(N.sdiv(Two)); 4730 4731 // Compute the B^2-4ac term. 4732 APInt SqrtTerm(B); 4733 SqrtTerm *= B; 4734 SqrtTerm -= Four * (A * C); 4735 4736 // Compute sqrt(B^2-4ac). This is guaranteed to be the nearest 4737 // integer value or else APInt::sqrt() will assert. 4738 APInt SqrtVal(SqrtTerm.sqrt()); 4739 4740 // Compute the two solutions for the quadratic formula. 4741 // The divisions must be performed as signed divisions. 4742 APInt NegB(-B); 4743 APInt TwoA( A << 1 ); 4744 if (TwoA.isMinValue()) { 4745 const SCEV *CNC = SE.getCouldNotCompute(); 4746 return std::make_pair(CNC, CNC); 4747 } 4748 4749 LLVMContext &Context = SE.getContext(); 4750 4751 ConstantInt *Solution1 = 4752 ConstantInt::get(Context, (NegB + SqrtVal).sdiv(TwoA)); 4753 ConstantInt *Solution2 = 4754 ConstantInt::get(Context, (NegB - SqrtVal).sdiv(TwoA)); 4755 4756 return std::make_pair(SE.getConstant(Solution1), 4757 SE.getConstant(Solution2)); 4758 } // end APIntOps namespace 4759 } 4760 4761 /// HowFarToZero - Return the number of times a backedge comparing the specified 4762 /// value to zero will execute. If not computable, return CouldNotCompute. 4763 ScalarEvolution::BackedgeTakenInfo 4764 ScalarEvolution::HowFarToZero(const SCEV *V, const Loop *L) { 4765 // If the value is a constant 4766 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) { 4767 // If the value is already zero, the branch will execute zero times. 4768 if (C->getValue()->isZero()) return C; 4769 return getCouldNotCompute(); // Otherwise it will loop infinitely. 4770 } 4771 4772 const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(V); 4773 if (!AddRec || AddRec->getLoop() != L) 4774 return getCouldNotCompute(); 4775 4776 if (AddRec->isAffine()) { 4777 // If this is an affine expression, the execution count of this branch is 4778 // the minimum unsigned root of the following equation: 4779 // 4780 // Start + Step*N = 0 (mod 2^BW) 4781 // 4782 // equivalent to: 4783 // 4784 // Step*N = -Start (mod 2^BW) 4785 // 4786 // where BW is the common bit width of Start and Step. 4787 4788 // Get the initial value for the loop. 4789 const SCEV *Start = getSCEVAtScope(AddRec->getStart(), 4790 L->getParentLoop()); 4791 const SCEV *Step = getSCEVAtScope(AddRec->getOperand(1), 4792 L->getParentLoop()); 4793 4794 if (const SCEVConstant *StepC = dyn_cast<SCEVConstant>(Step)) { 4795 // For now we handle only constant steps. 4796 4797 // First, handle unitary steps. 4798 if (StepC->getValue()->equalsInt(1)) // 1*N = -Start (mod 2^BW), so: 4799 return getNegativeSCEV(Start); // N = -Start (as unsigned) 4800 if (StepC->getValue()->isAllOnesValue()) // -1*N = -Start (mod 2^BW), so: 4801 return Start; // N = Start (as unsigned) 4802 4803 // Then, try to solve the above equation provided that Start is constant. 4804 if (const SCEVConstant *StartC = dyn_cast<SCEVConstant>(Start)) 4805 return SolveLinEquationWithOverflow(StepC->getValue()->getValue(), 4806 -StartC->getValue()->getValue(), 4807 *this); 4808 } 4809 } else if (AddRec->isQuadratic() && AddRec->getType()->isIntegerTy()) { 4810 // If this is a quadratic (3-term) AddRec {L,+,M,+,N}, find the roots of 4811 // the quadratic equation to solve it. 4812 std::pair<const SCEV *,const SCEV *> Roots = SolveQuadraticEquation(AddRec, 4813 *this); 4814 const SCEVConstant *R1 = dyn_cast<SCEVConstant>(Roots.first); 4815 const SCEVConstant *R2 = dyn_cast<SCEVConstant>(Roots.second); 4816 if (R1) { 4817 #if 0 4818 dbgs() << "HFTZ: " << *V << " - sol#1: " << *R1 4819 << " sol#2: " << *R2 << "\n"; 4820 #endif 4821 // Pick the smallest positive root value. 4822 if (ConstantInt *CB = 4823 dyn_cast<ConstantInt>(ConstantExpr::getICmp(ICmpInst::ICMP_ULT, 4824 R1->getValue(), R2->getValue()))) { 4825 if (CB->getZExtValue() == false) 4826 std::swap(R1, R2); // R1 is the minimum root now. 4827 4828 // We can only use this value if the chrec ends up with an exact zero 4829 // value at this index. When solving for "X*X != 5", for example, we 4830 // should not accept a root of 2. 4831 const SCEV *Val = AddRec->evaluateAtIteration(R1, *this); 4832 if (Val->isZero()) 4833 return R1; // We found a quadratic root! 4834 } 4835 } 4836 } 4837 4838 return getCouldNotCompute(); 4839 } 4840 4841 /// HowFarToNonZero - Return the number of times a backedge checking the 4842 /// specified value for nonzero will execute. If not computable, return 4843 /// CouldNotCompute 4844 ScalarEvolution::BackedgeTakenInfo 4845 ScalarEvolution::HowFarToNonZero(const SCEV *V, const Loop *L) { 4846 // Loops that look like: while (X == 0) are very strange indeed. We don't 4847 // handle them yet except for the trivial case. This could be expanded in the 4848 // future as needed. 4849 4850 // If the value is a constant, check to see if it is known to be non-zero 4851 // already. If so, the backedge will execute zero times. 4852 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) { 4853 if (!C->getValue()->isNullValue()) 4854 return getConstant(C->getType(), 0); 4855 return getCouldNotCompute(); // Otherwise it will loop infinitely. 4856 } 4857 4858 // We could implement others, but I really doubt anyone writes loops like 4859 // this, and if they did, they would already be constant folded. 4860 return getCouldNotCompute(); 4861 } 4862 4863 /// getPredecessorWithUniqueSuccessorForBB - Return a predecessor of BB 4864 /// (which may not be an immediate predecessor) which has exactly one 4865 /// successor from which BB is reachable, or null if no such block is 4866 /// found. 4867 /// 4868 std::pair<BasicBlock *, BasicBlock *> 4869 ScalarEvolution::getPredecessorWithUniqueSuccessorForBB(BasicBlock *BB) { 4870 // If the block has a unique predecessor, then there is no path from the 4871 // predecessor to the block that does not go through the direct edge 4872 // from the predecessor to the block. 4873 if (BasicBlock *Pred = BB->getSinglePredecessor()) 4874 return std::make_pair(Pred, BB); 4875 4876 // A loop's header is defined to be a block that dominates the loop. 4877 // If the header has a unique predecessor outside the loop, it must be 4878 // a block that has exactly one successor that can reach the loop. 4879 if (Loop *L = LI->getLoopFor(BB)) 4880 return std::make_pair(L->getLoopPredecessor(), L->getHeader()); 4881 4882 return std::pair<BasicBlock *, BasicBlock *>(); 4883 } 4884 4885 /// HasSameValue - SCEV structural equivalence is usually sufficient for 4886 /// testing whether two expressions are equal, however for the purposes of 4887 /// looking for a condition guarding a loop, it can be useful to be a little 4888 /// more general, since a front-end may have replicated the controlling 4889 /// expression. 4890 /// 4891 static bool HasSameValue(const SCEV *A, const SCEV *B) { 4892 // Quick check to see if they are the same SCEV. 4893 if (A == B) return true; 4894 4895 // Otherwise, if they're both SCEVUnknown, it's possible that they hold 4896 // two different instructions with the same value. Check for this case. 4897 if (const SCEVUnknown *AU = dyn_cast<SCEVUnknown>(A)) 4898 if (const SCEVUnknown *BU = dyn_cast<SCEVUnknown>(B)) 4899 if (const Instruction *AI = dyn_cast<Instruction>(AU->getValue())) 4900 if (const Instruction *BI = dyn_cast<Instruction>(BU->getValue())) 4901 if (AI->isIdenticalTo(BI) && !AI->mayReadFromMemory()) 4902 return true; 4903 4904 // Otherwise assume they may have a different value. 4905 return false; 4906 } 4907 4908 /// SimplifyICmpOperands - Simplify LHS and RHS in a comparison with 4909 /// predicate Pred. Return true iff any changes were made. 4910 /// 4911 bool ScalarEvolution::SimplifyICmpOperands(ICmpInst::Predicate &Pred, 4912 const SCEV *&LHS, const SCEV *&RHS) { 4913 bool Changed = false; 4914 4915 // Canonicalize a constant to the right side. 4916 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) { 4917 // Check for both operands constant. 4918 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) { 4919 if (ConstantExpr::getICmp(Pred, 4920 LHSC->getValue(), 4921 RHSC->getValue())->isNullValue()) 4922 goto trivially_false; 4923 else 4924 goto trivially_true; 4925 } 4926 // Otherwise swap the operands to put the constant on the right. 4927 std::swap(LHS, RHS); 4928 Pred = ICmpInst::getSwappedPredicate(Pred); 4929 Changed = true; 4930 } 4931 4932 // If we're comparing an addrec with a value which is loop-invariant in the 4933 // addrec's loop, put the addrec on the left. Also make a dominance check, 4934 // as both operands could be addrecs loop-invariant in each other's loop. 4935 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(RHS)) { 4936 const Loop *L = AR->getLoop(); 4937 if (LHS->isLoopInvariant(L) && LHS->properlyDominates(L->getHeader(), DT)) { 4938 std::swap(LHS, RHS); 4939 Pred = ICmpInst::getSwappedPredicate(Pred); 4940 Changed = true; 4941 } 4942 } 4943 4944 // If there's a constant operand, canonicalize comparisons with boundary 4945 // cases, and canonicalize *-or-equal comparisons to regular comparisons. 4946 if (const SCEVConstant *RC = dyn_cast<SCEVConstant>(RHS)) { 4947 const APInt &RA = RC->getValue()->getValue(); 4948 switch (Pred) { 4949 default: llvm_unreachable("Unexpected ICmpInst::Predicate value!"); 4950 case ICmpInst::ICMP_EQ: 4951 case ICmpInst::ICMP_NE: 4952 break; 4953 case ICmpInst::ICMP_UGE: 4954 if ((RA - 1).isMinValue()) { 4955 Pred = ICmpInst::ICMP_NE; 4956 RHS = getConstant(RA - 1); 4957 Changed = true; 4958 break; 4959 } 4960 if (RA.isMaxValue()) { 4961 Pred = ICmpInst::ICMP_EQ; 4962 Changed = true; 4963 break; 4964 } 4965 if (RA.isMinValue()) goto trivially_true; 4966 4967 Pred = ICmpInst::ICMP_UGT; 4968 RHS = getConstant(RA - 1); 4969 Changed = true; 4970 break; 4971 case ICmpInst::ICMP_ULE: 4972 if ((RA + 1).isMaxValue()) { 4973 Pred = ICmpInst::ICMP_NE; 4974 RHS = getConstant(RA + 1); 4975 Changed = true; 4976 break; 4977 } 4978 if (RA.isMinValue()) { 4979 Pred = ICmpInst::ICMP_EQ; 4980 Changed = true; 4981 break; 4982 } 4983 if (RA.isMaxValue()) goto trivially_true; 4984 4985 Pred = ICmpInst::ICMP_ULT; 4986 RHS = getConstant(RA + 1); 4987 Changed = true; 4988 break; 4989 case ICmpInst::ICMP_SGE: 4990 if ((RA - 1).isMinSignedValue()) { 4991 Pred = ICmpInst::ICMP_NE; 4992 RHS = getConstant(RA - 1); 4993 Changed = true; 4994 break; 4995 } 4996 if (RA.isMaxSignedValue()) { 4997 Pred = ICmpInst::ICMP_EQ; 4998 Changed = true; 4999 break; 5000 } 5001 if (RA.isMinSignedValue()) goto trivially_true; 5002 5003 Pred = ICmpInst::ICMP_SGT; 5004 RHS = getConstant(RA - 1); 5005 Changed = true; 5006 break; 5007 case ICmpInst::ICMP_SLE: 5008 if ((RA + 1).isMaxSignedValue()) { 5009 Pred = ICmpInst::ICMP_NE; 5010 RHS = getConstant(RA + 1); 5011 Changed = true; 5012 break; 5013 } 5014 if (RA.isMinSignedValue()) { 5015 Pred = ICmpInst::ICMP_EQ; 5016 Changed = true; 5017 break; 5018 } 5019 if (RA.isMaxSignedValue()) goto trivially_true; 5020 5021 Pred = ICmpInst::ICMP_SLT; 5022 RHS = getConstant(RA + 1); 5023 Changed = true; 5024 break; 5025 case ICmpInst::ICMP_UGT: 5026 if (RA.isMinValue()) { 5027 Pred = ICmpInst::ICMP_NE; 5028 Changed = true; 5029 break; 5030 } 5031 if ((RA + 1).isMaxValue()) { 5032 Pred = ICmpInst::ICMP_EQ; 5033 RHS = getConstant(RA + 1); 5034 Changed = true; 5035 break; 5036 } 5037 if (RA.isMaxValue()) goto trivially_false; 5038 break; 5039 case ICmpInst::ICMP_ULT: 5040 if (RA.isMaxValue()) { 5041 Pred = ICmpInst::ICMP_NE; 5042 Changed = true; 5043 break; 5044 } 5045 if ((RA - 1).isMinValue()) { 5046 Pred = ICmpInst::ICMP_EQ; 5047 RHS = getConstant(RA - 1); 5048 Changed = true; 5049 break; 5050 } 5051 if (RA.isMinValue()) goto trivially_false; 5052 break; 5053 case ICmpInst::ICMP_SGT: 5054 if (RA.isMinSignedValue()) { 5055 Pred = ICmpInst::ICMP_NE; 5056 Changed = true; 5057 break; 5058 } 5059 if ((RA + 1).isMaxSignedValue()) { 5060 Pred = ICmpInst::ICMP_EQ; 5061 RHS = getConstant(RA + 1); 5062 Changed = true; 5063 break; 5064 } 5065 if (RA.isMaxSignedValue()) goto trivially_false; 5066 break; 5067 case ICmpInst::ICMP_SLT: 5068 if (RA.isMaxSignedValue()) { 5069 Pred = ICmpInst::ICMP_NE; 5070 Changed = true; 5071 break; 5072 } 5073 if ((RA - 1).isMinSignedValue()) { 5074 Pred = ICmpInst::ICMP_EQ; 5075 RHS = getConstant(RA - 1); 5076 Changed = true; 5077 break; 5078 } 5079 if (RA.isMinSignedValue()) goto trivially_false; 5080 break; 5081 } 5082 } 5083 5084 // Check for obvious equality. 5085 if (HasSameValue(LHS, RHS)) { 5086 if (ICmpInst::isTrueWhenEqual(Pred)) 5087 goto trivially_true; 5088 if (ICmpInst::isFalseWhenEqual(Pred)) 5089 goto trivially_false; 5090 } 5091 5092 // If possible, canonicalize GE/LE comparisons to GT/LT comparisons, by 5093 // adding or subtracting 1 from one of the operands. 5094 switch (Pred) { 5095 case ICmpInst::ICMP_SLE: 5096 if (!getSignedRange(RHS).getSignedMax().isMaxSignedValue()) { 5097 RHS = getAddExpr(getConstant(RHS->getType(), 1, true), RHS, 5098 /*HasNUW=*/false, /*HasNSW=*/true); 5099 Pred = ICmpInst::ICMP_SLT; 5100 Changed = true; 5101 } else if (!getSignedRange(LHS).getSignedMin().isMinSignedValue()) { 5102 LHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), LHS, 5103 /*HasNUW=*/false, /*HasNSW=*/true); 5104 Pred = ICmpInst::ICMP_SLT; 5105 Changed = true; 5106 } 5107 break; 5108 case ICmpInst::ICMP_SGE: 5109 if (!getSignedRange(RHS).getSignedMin().isMinSignedValue()) { 5110 RHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), RHS, 5111 /*HasNUW=*/false, /*HasNSW=*/true); 5112 Pred = ICmpInst::ICMP_SGT; 5113 Changed = true; 5114 } else if (!getSignedRange(LHS).getSignedMax().isMaxSignedValue()) { 5115 LHS = getAddExpr(getConstant(RHS->getType(), 1, true), LHS, 5116 /*HasNUW=*/false, /*HasNSW=*/true); 5117 Pred = ICmpInst::ICMP_SGT; 5118 Changed = true; 5119 } 5120 break; 5121 case ICmpInst::ICMP_ULE: 5122 if (!getUnsignedRange(RHS).getUnsignedMax().isMaxValue()) { 5123 RHS = getAddExpr(getConstant(RHS->getType(), 1, true), RHS, 5124 /*HasNUW=*/true, /*HasNSW=*/false); 5125 Pred = ICmpInst::ICMP_ULT; 5126 Changed = true; 5127 } else if (!getUnsignedRange(LHS).getUnsignedMin().isMinValue()) { 5128 LHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), LHS, 5129 /*HasNUW=*/true, /*HasNSW=*/false); 5130 Pred = ICmpInst::ICMP_ULT; 5131 Changed = true; 5132 } 5133 break; 5134 case ICmpInst::ICMP_UGE: 5135 if (!getUnsignedRange(RHS).getUnsignedMin().isMinValue()) { 5136 RHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), RHS, 5137 /*HasNUW=*/true, /*HasNSW=*/false); 5138 Pred = ICmpInst::ICMP_UGT; 5139 Changed = true; 5140 } else if (!getUnsignedRange(LHS).getUnsignedMax().isMaxValue()) { 5141 LHS = getAddExpr(getConstant(RHS->getType(), 1, true), LHS, 5142 /*HasNUW=*/true, /*HasNSW=*/false); 5143 Pred = ICmpInst::ICMP_UGT; 5144 Changed = true; 5145 } 5146 break; 5147 default: 5148 break; 5149 } 5150 5151 // TODO: More simplifications are possible here. 5152 5153 return Changed; 5154 5155 trivially_true: 5156 // Return 0 == 0. 5157 LHS = RHS = getConstant(Type::getInt1Ty(getContext()), 0); 5158 Pred = ICmpInst::ICMP_EQ; 5159 return true; 5160 5161 trivially_false: 5162 // Return 0 != 0. 5163 LHS = RHS = getConstant(Type::getInt1Ty(getContext()), 0); 5164 Pred = ICmpInst::ICMP_NE; 5165 return true; 5166 } 5167 5168 bool ScalarEvolution::isKnownNegative(const SCEV *S) { 5169 return getSignedRange(S).getSignedMax().isNegative(); 5170 } 5171 5172 bool ScalarEvolution::isKnownPositive(const SCEV *S) { 5173 return getSignedRange(S).getSignedMin().isStrictlyPositive(); 5174 } 5175 5176 bool ScalarEvolution::isKnownNonNegative(const SCEV *S) { 5177 return !getSignedRange(S).getSignedMin().isNegative(); 5178 } 5179 5180 bool ScalarEvolution::isKnownNonPositive(const SCEV *S) { 5181 return !getSignedRange(S).getSignedMax().isStrictlyPositive(); 5182 } 5183 5184 bool ScalarEvolution::isKnownNonZero(const SCEV *S) { 5185 return isKnownNegative(S) || isKnownPositive(S); 5186 } 5187 5188 bool ScalarEvolution::isKnownPredicate(ICmpInst::Predicate Pred, 5189 const SCEV *LHS, const SCEV *RHS) { 5190 // Canonicalize the inputs first. 5191 (void)SimplifyICmpOperands(Pred, LHS, RHS); 5192 5193 // If LHS or RHS is an addrec, check to see if the condition is true in 5194 // every iteration of the loop. 5195 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(LHS)) 5196 if (isLoopEntryGuardedByCond( 5197 AR->getLoop(), Pred, AR->getStart(), RHS) && 5198 isLoopBackedgeGuardedByCond( 5199 AR->getLoop(), Pred, AR->getPostIncExpr(*this), RHS)) 5200 return true; 5201 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(RHS)) 5202 if (isLoopEntryGuardedByCond( 5203 AR->getLoop(), Pred, LHS, AR->getStart()) && 5204 isLoopBackedgeGuardedByCond( 5205 AR->getLoop(), Pred, LHS, AR->getPostIncExpr(*this))) 5206 return true; 5207 5208 // Otherwise see what can be done with known constant ranges. 5209 return isKnownPredicateWithRanges(Pred, LHS, RHS); 5210 } 5211 5212 bool 5213 ScalarEvolution::isKnownPredicateWithRanges(ICmpInst::Predicate Pred, 5214 const SCEV *LHS, const SCEV *RHS) { 5215 if (HasSameValue(LHS, RHS)) 5216 return ICmpInst::isTrueWhenEqual(Pred); 5217 5218 // This code is split out from isKnownPredicate because it is called from 5219 // within isLoopEntryGuardedByCond. 5220 switch (Pred) { 5221 default: 5222 llvm_unreachable("Unexpected ICmpInst::Predicate value!"); 5223 break; 5224 case ICmpInst::ICMP_SGT: 5225 Pred = ICmpInst::ICMP_SLT; 5226 std::swap(LHS, RHS); 5227 case ICmpInst::ICMP_SLT: { 5228 ConstantRange LHSRange = getSignedRange(LHS); 5229 ConstantRange RHSRange = getSignedRange(RHS); 5230 if (LHSRange.getSignedMax().slt(RHSRange.getSignedMin())) 5231 return true; 5232 if (LHSRange.getSignedMin().sge(RHSRange.getSignedMax())) 5233 return false; 5234 break; 5235 } 5236 case ICmpInst::ICMP_SGE: 5237 Pred = ICmpInst::ICMP_SLE; 5238 std::swap(LHS, RHS); 5239 case ICmpInst::ICMP_SLE: { 5240 ConstantRange LHSRange = getSignedRange(LHS); 5241 ConstantRange RHSRange = getSignedRange(RHS); 5242 if (LHSRange.getSignedMax().sle(RHSRange.getSignedMin())) 5243 return true; 5244 if (LHSRange.getSignedMin().sgt(RHSRange.getSignedMax())) 5245 return false; 5246 break; 5247 } 5248 case ICmpInst::ICMP_UGT: 5249 Pred = ICmpInst::ICMP_ULT; 5250 std::swap(LHS, RHS); 5251 case ICmpInst::ICMP_ULT: { 5252 ConstantRange LHSRange = getUnsignedRange(LHS); 5253 ConstantRange RHSRange = getUnsignedRange(RHS); 5254 if (LHSRange.getUnsignedMax().ult(RHSRange.getUnsignedMin())) 5255 return true; 5256 if (LHSRange.getUnsignedMin().uge(RHSRange.getUnsignedMax())) 5257 return false; 5258 break; 5259 } 5260 case ICmpInst::ICMP_UGE: 5261 Pred = ICmpInst::ICMP_ULE; 5262 std::swap(LHS, RHS); 5263 case ICmpInst::ICMP_ULE: { 5264 ConstantRange LHSRange = getUnsignedRange(LHS); 5265 ConstantRange RHSRange = getUnsignedRange(RHS); 5266 if (LHSRange.getUnsignedMax().ule(RHSRange.getUnsignedMin())) 5267 return true; 5268 if (LHSRange.getUnsignedMin().ugt(RHSRange.getUnsignedMax())) 5269 return false; 5270 break; 5271 } 5272 case ICmpInst::ICMP_NE: { 5273 if (getUnsignedRange(LHS).intersectWith(getUnsignedRange(RHS)).isEmptySet()) 5274 return true; 5275 if (getSignedRange(LHS).intersectWith(getSignedRange(RHS)).isEmptySet()) 5276 return true; 5277 5278 const SCEV *Diff = getMinusSCEV(LHS, RHS); 5279 if (isKnownNonZero(Diff)) 5280 return true; 5281 break; 5282 } 5283 case ICmpInst::ICMP_EQ: 5284 // The check at the top of the function catches the case where 5285 // the values are known to be equal. 5286 break; 5287 } 5288 return false; 5289 } 5290 5291 /// isLoopBackedgeGuardedByCond - Test whether the backedge of the loop is 5292 /// protected by a conditional between LHS and RHS. This is used to 5293 /// to eliminate casts. 5294 bool 5295 ScalarEvolution::isLoopBackedgeGuardedByCond(const Loop *L, 5296 ICmpInst::Predicate Pred, 5297 const SCEV *LHS, const SCEV *RHS) { 5298 // Interpret a null as meaning no loop, where there is obviously no guard 5299 // (interprocedural conditions notwithstanding). 5300 if (!L) return true; 5301 5302 BasicBlock *Latch = L->getLoopLatch(); 5303 if (!Latch) 5304 return false; 5305 5306 BranchInst *LoopContinuePredicate = 5307 dyn_cast<BranchInst>(Latch->getTerminator()); 5308 if (!LoopContinuePredicate || 5309 LoopContinuePredicate->isUnconditional()) 5310 return false; 5311 5312 return isImpliedCond(Pred, LHS, RHS, 5313 LoopContinuePredicate->getCondition(), 5314 LoopContinuePredicate->getSuccessor(0) != L->getHeader()); 5315 } 5316 5317 /// isLoopEntryGuardedByCond - Test whether entry to the loop is protected 5318 /// by a conditional between LHS and RHS. This is used to help avoid max 5319 /// expressions in loop trip counts, and to eliminate casts. 5320 bool 5321 ScalarEvolution::isLoopEntryGuardedByCond(const Loop *L, 5322 ICmpInst::Predicate Pred, 5323 const SCEV *LHS, const SCEV *RHS) { 5324 // Interpret a null as meaning no loop, where there is obviously no guard 5325 // (interprocedural conditions notwithstanding). 5326 if (!L) return false; 5327 5328 // Starting at the loop predecessor, climb up the predecessor chain, as long 5329 // as there are predecessors that can be found that have unique successors 5330 // leading to the original header. 5331 for (std::pair<BasicBlock *, BasicBlock *> 5332 Pair(L->getLoopPredecessor(), L->getHeader()); 5333 Pair.first; 5334 Pair = getPredecessorWithUniqueSuccessorForBB(Pair.first)) { 5335 5336 BranchInst *LoopEntryPredicate = 5337 dyn_cast<BranchInst>(Pair.first->getTerminator()); 5338 if (!LoopEntryPredicate || 5339 LoopEntryPredicate->isUnconditional()) 5340 continue; 5341 5342 if (isImpliedCond(Pred, LHS, RHS, 5343 LoopEntryPredicate->getCondition(), 5344 LoopEntryPredicate->getSuccessor(0) != Pair.second)) 5345 return true; 5346 } 5347 5348 return false; 5349 } 5350 5351 /// isImpliedCond - Test whether the condition described by Pred, LHS, 5352 /// and RHS is true whenever the given Cond value evaluates to true. 5353 bool ScalarEvolution::isImpliedCond(ICmpInst::Predicate Pred, 5354 const SCEV *LHS, const SCEV *RHS, 5355 Value *FoundCondValue, 5356 bool Inverse) { 5357 // Recursively handle And and Or conditions. 5358 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(FoundCondValue)) { 5359 if (BO->getOpcode() == Instruction::And) { 5360 if (!Inverse) 5361 return isImpliedCond(Pred, LHS, RHS, BO->getOperand(0), Inverse) || 5362 isImpliedCond(Pred, LHS, RHS, BO->getOperand(1), Inverse); 5363 } else if (BO->getOpcode() == Instruction::Or) { 5364 if (Inverse) 5365 return isImpliedCond(Pred, LHS, RHS, BO->getOperand(0), Inverse) || 5366 isImpliedCond(Pred, LHS, RHS, BO->getOperand(1), Inverse); 5367 } 5368 } 5369 5370 ICmpInst *ICI = dyn_cast<ICmpInst>(FoundCondValue); 5371 if (!ICI) return false; 5372 5373 // Bail if the ICmp's operands' types are wider than the needed type 5374 // before attempting to call getSCEV on them. This avoids infinite 5375 // recursion, since the analysis of widening casts can require loop 5376 // exit condition information for overflow checking, which would 5377 // lead back here. 5378 if (getTypeSizeInBits(LHS->getType()) < 5379 getTypeSizeInBits(ICI->getOperand(0)->getType())) 5380 return false; 5381 5382 // Now that we found a conditional branch that dominates the loop, check to 5383 // see if it is the comparison we are looking for. 5384 ICmpInst::Predicate FoundPred; 5385 if (Inverse) 5386 FoundPred = ICI->getInversePredicate(); 5387 else 5388 FoundPred = ICI->getPredicate(); 5389 5390 const SCEV *FoundLHS = getSCEV(ICI->getOperand(0)); 5391 const SCEV *FoundRHS = getSCEV(ICI->getOperand(1)); 5392 5393 // Balance the types. The case where FoundLHS' type is wider than 5394 // LHS' type is checked for above. 5395 if (getTypeSizeInBits(LHS->getType()) > 5396 getTypeSizeInBits(FoundLHS->getType())) { 5397 if (CmpInst::isSigned(Pred)) { 5398 FoundLHS = getSignExtendExpr(FoundLHS, LHS->getType()); 5399 FoundRHS = getSignExtendExpr(FoundRHS, LHS->getType()); 5400 } else { 5401 FoundLHS = getZeroExtendExpr(FoundLHS, LHS->getType()); 5402 FoundRHS = getZeroExtendExpr(FoundRHS, LHS->getType()); 5403 } 5404 } 5405 5406 // Canonicalize the query to match the way instcombine will have 5407 // canonicalized the comparison. 5408 if (SimplifyICmpOperands(Pred, LHS, RHS)) 5409 if (LHS == RHS) 5410 return CmpInst::isTrueWhenEqual(Pred); 5411 if (SimplifyICmpOperands(FoundPred, FoundLHS, FoundRHS)) 5412 if (FoundLHS == FoundRHS) 5413 return CmpInst::isFalseWhenEqual(Pred); 5414 5415 // Check to see if we can make the LHS or RHS match. 5416 if (LHS == FoundRHS || RHS == FoundLHS) { 5417 if (isa<SCEVConstant>(RHS)) { 5418 std::swap(FoundLHS, FoundRHS); 5419 FoundPred = ICmpInst::getSwappedPredicate(FoundPred); 5420 } else { 5421 std::swap(LHS, RHS); 5422 Pred = ICmpInst::getSwappedPredicate(Pred); 5423 } 5424 } 5425 5426 // Check whether the found predicate is the same as the desired predicate. 5427 if (FoundPred == Pred) 5428 return isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS); 5429 5430 // Check whether swapping the found predicate makes it the same as the 5431 // desired predicate. 5432 if (ICmpInst::getSwappedPredicate(FoundPred) == Pred) { 5433 if (isa<SCEVConstant>(RHS)) 5434 return isImpliedCondOperands(Pred, LHS, RHS, FoundRHS, FoundLHS); 5435 else 5436 return isImpliedCondOperands(ICmpInst::getSwappedPredicate(Pred), 5437 RHS, LHS, FoundLHS, FoundRHS); 5438 } 5439 5440 // Check whether the actual condition is beyond sufficient. 5441 if (FoundPred == ICmpInst::ICMP_EQ) 5442 if (ICmpInst::isTrueWhenEqual(Pred)) 5443 if (isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS)) 5444 return true; 5445 if (Pred == ICmpInst::ICMP_NE) 5446 if (!ICmpInst::isTrueWhenEqual(FoundPred)) 5447 if (isImpliedCondOperands(FoundPred, LHS, RHS, FoundLHS, FoundRHS)) 5448 return true; 5449 5450 // Otherwise assume the worst. 5451 return false; 5452 } 5453 5454 /// isImpliedCondOperands - Test whether the condition described by Pred, 5455 /// LHS, and RHS is true whenever the condition described by Pred, FoundLHS, 5456 /// and FoundRHS is true. 5457 bool ScalarEvolution::isImpliedCondOperands(ICmpInst::Predicate Pred, 5458 const SCEV *LHS, const SCEV *RHS, 5459 const SCEV *FoundLHS, 5460 const SCEV *FoundRHS) { 5461 return isImpliedCondOperandsHelper(Pred, LHS, RHS, 5462 FoundLHS, FoundRHS) || 5463 // ~x < ~y --> x > y 5464 isImpliedCondOperandsHelper(Pred, LHS, RHS, 5465 getNotSCEV(FoundRHS), 5466 getNotSCEV(FoundLHS)); 5467 } 5468 5469 /// isImpliedCondOperandsHelper - Test whether the condition described by 5470 /// Pred, LHS, and RHS is true whenever the condition described by Pred, 5471 /// FoundLHS, and FoundRHS is true. 5472 bool 5473 ScalarEvolution::isImpliedCondOperandsHelper(ICmpInst::Predicate Pred, 5474 const SCEV *LHS, const SCEV *RHS, 5475 const SCEV *FoundLHS, 5476 const SCEV *FoundRHS) { 5477 switch (Pred) { 5478 default: llvm_unreachable("Unexpected ICmpInst::Predicate value!"); 5479 case ICmpInst::ICMP_EQ: 5480 case ICmpInst::ICMP_NE: 5481 if (HasSameValue(LHS, FoundLHS) && HasSameValue(RHS, FoundRHS)) 5482 return true; 5483 break; 5484 case ICmpInst::ICMP_SLT: 5485 case ICmpInst::ICMP_SLE: 5486 if (isKnownPredicateWithRanges(ICmpInst::ICMP_SLE, LHS, FoundLHS) && 5487 isKnownPredicateWithRanges(ICmpInst::ICMP_SGE, RHS, FoundRHS)) 5488 return true; 5489 break; 5490 case ICmpInst::ICMP_SGT: 5491 case ICmpInst::ICMP_SGE: 5492 if (isKnownPredicateWithRanges(ICmpInst::ICMP_SGE, LHS, FoundLHS) && 5493 isKnownPredicateWithRanges(ICmpInst::ICMP_SLE, RHS, FoundRHS)) 5494 return true; 5495 break; 5496 case ICmpInst::ICMP_ULT: 5497 case ICmpInst::ICMP_ULE: 5498 if (isKnownPredicateWithRanges(ICmpInst::ICMP_ULE, LHS, FoundLHS) && 5499 isKnownPredicateWithRanges(ICmpInst::ICMP_UGE, RHS, FoundRHS)) 5500 return true; 5501 break; 5502 case ICmpInst::ICMP_UGT: 5503 case ICmpInst::ICMP_UGE: 5504 if (isKnownPredicateWithRanges(ICmpInst::ICMP_UGE, LHS, FoundLHS) && 5505 isKnownPredicateWithRanges(ICmpInst::ICMP_ULE, RHS, FoundRHS)) 5506 return true; 5507 break; 5508 } 5509 5510 return false; 5511 } 5512 5513 /// getBECount - Subtract the end and start values and divide by the step, 5514 /// rounding up, to get the number of times the backedge is executed. Return 5515 /// CouldNotCompute if an intermediate computation overflows. 5516 const SCEV *ScalarEvolution::getBECount(const SCEV *Start, 5517 const SCEV *End, 5518 const SCEV *Step, 5519 bool NoWrap) { 5520 assert(!isKnownNegative(Step) && 5521 "This code doesn't handle negative strides yet!"); 5522 5523 const Type *Ty = Start->getType(); 5524 const SCEV *NegOne = getConstant(Ty, (uint64_t)-1); 5525 const SCEV *Diff = getMinusSCEV(End, Start); 5526 const SCEV *RoundUp = getAddExpr(Step, NegOne); 5527 5528 // Add an adjustment to the difference between End and Start so that 5529 // the division will effectively round up. 5530 const SCEV *Add = getAddExpr(Diff, RoundUp); 5531 5532 if (!NoWrap) { 5533 // Check Add for unsigned overflow. 5534 // TODO: More sophisticated things could be done here. 5535 const Type *WideTy = IntegerType::get(getContext(), 5536 getTypeSizeInBits(Ty) + 1); 5537 const SCEV *EDiff = getZeroExtendExpr(Diff, WideTy); 5538 const SCEV *ERoundUp = getZeroExtendExpr(RoundUp, WideTy); 5539 const SCEV *OperandExtendedAdd = getAddExpr(EDiff, ERoundUp); 5540 if (getZeroExtendExpr(Add, WideTy) != OperandExtendedAdd) 5541 return getCouldNotCompute(); 5542 } 5543 5544 return getUDivExpr(Add, Step); 5545 } 5546 5547 /// HowManyLessThans - Return the number of times a backedge containing the 5548 /// specified less-than comparison will execute. If not computable, return 5549 /// CouldNotCompute. 5550 ScalarEvolution::BackedgeTakenInfo 5551 ScalarEvolution::HowManyLessThans(const SCEV *LHS, const SCEV *RHS, 5552 const Loop *L, bool isSigned) { 5553 // Only handle: "ADDREC < LoopInvariant". 5554 if (!RHS->isLoopInvariant(L)) return getCouldNotCompute(); 5555 5556 const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(LHS); 5557 if (!AddRec || AddRec->getLoop() != L) 5558 return getCouldNotCompute(); 5559 5560 // Check to see if we have a flag which makes analysis easy. 5561 bool NoWrap = isSigned ? AddRec->hasNoSignedWrap() : 5562 AddRec->hasNoUnsignedWrap(); 5563 5564 if (AddRec->isAffine()) { 5565 unsigned BitWidth = getTypeSizeInBits(AddRec->getType()); 5566 const SCEV *Step = AddRec->getStepRecurrence(*this); 5567 5568 if (Step->isZero()) 5569 return getCouldNotCompute(); 5570 if (Step->isOne()) { 5571 // With unit stride, the iteration never steps past the limit value. 5572 } else if (isKnownPositive(Step)) { 5573 // Test whether a positive iteration can step past the limit 5574 // value and past the maximum value for its type in a single step. 5575 // Note that it's not sufficient to check NoWrap here, because even 5576 // though the value after a wrap is undefined, it's not undefined 5577 // behavior, so if wrap does occur, the loop could either terminate or 5578 // loop infinitely, but in either case, the loop is guaranteed to 5579 // iterate at least until the iteration where the wrapping occurs. 5580 const SCEV *One = getConstant(Step->getType(), 1); 5581 if (isSigned) { 5582 APInt Max = APInt::getSignedMaxValue(BitWidth); 5583 if ((Max - getSignedRange(getMinusSCEV(Step, One)).getSignedMax()) 5584 .slt(getSignedRange(RHS).getSignedMax())) 5585 return getCouldNotCompute(); 5586 } else { 5587 APInt Max = APInt::getMaxValue(BitWidth); 5588 if ((Max - getUnsignedRange(getMinusSCEV(Step, One)).getUnsignedMax()) 5589 .ult(getUnsignedRange(RHS).getUnsignedMax())) 5590 return getCouldNotCompute(); 5591 } 5592 } else 5593 // TODO: Handle negative strides here and below. 5594 return getCouldNotCompute(); 5595 5596 // We know the LHS is of the form {n,+,s} and the RHS is some loop-invariant 5597 // m. So, we count the number of iterations in which {n,+,s} < m is true. 5598 // Note that we cannot simply return max(m-n,0)/s because it's not safe to 5599 // treat m-n as signed nor unsigned due to overflow possibility. 5600 5601 // First, we get the value of the LHS in the first iteration: n 5602 const SCEV *Start = AddRec->getOperand(0); 5603 5604 // Determine the minimum constant start value. 5605 const SCEV *MinStart = getConstant(isSigned ? 5606 getSignedRange(Start).getSignedMin() : 5607 getUnsignedRange(Start).getUnsignedMin()); 5608 5609 // If we know that the condition is true in order to enter the loop, 5610 // then we know that it will run exactly (m-n)/s times. Otherwise, we 5611 // only know that it will execute (max(m,n)-n)/s times. In both cases, 5612 // the division must round up. 5613 const SCEV *End = RHS; 5614 if (!isLoopEntryGuardedByCond(L, 5615 isSigned ? ICmpInst::ICMP_SLT : 5616 ICmpInst::ICMP_ULT, 5617 getMinusSCEV(Start, Step), RHS)) 5618 End = isSigned ? getSMaxExpr(RHS, Start) 5619 : getUMaxExpr(RHS, Start); 5620 5621 // Determine the maximum constant end value. 5622 const SCEV *MaxEnd = getConstant(isSigned ? 5623 getSignedRange(End).getSignedMax() : 5624 getUnsignedRange(End).getUnsignedMax()); 5625 5626 // If MaxEnd is within a step of the maximum integer value in its type, 5627 // adjust it down to the minimum value which would produce the same effect. 5628 // This allows the subsequent ceiling division of (N+(step-1))/step to 5629 // compute the correct value. 5630 const SCEV *StepMinusOne = getMinusSCEV(Step, 5631 getConstant(Step->getType(), 1)); 5632 MaxEnd = isSigned ? 5633 getSMinExpr(MaxEnd, 5634 getMinusSCEV(getConstant(APInt::getSignedMaxValue(BitWidth)), 5635 StepMinusOne)) : 5636 getUMinExpr(MaxEnd, 5637 getMinusSCEV(getConstant(APInt::getMaxValue(BitWidth)), 5638 StepMinusOne)); 5639 5640 // Finally, we subtract these two values and divide, rounding up, to get 5641 // the number of times the backedge is executed. 5642 const SCEV *BECount = getBECount(Start, End, Step, NoWrap); 5643 5644 // The maximum backedge count is similar, except using the minimum start 5645 // value and the maximum end value. 5646 const SCEV *MaxBECount = getBECount(MinStart, MaxEnd, Step, NoWrap); 5647 5648 return BackedgeTakenInfo(BECount, MaxBECount); 5649 } 5650 5651 return getCouldNotCompute(); 5652 } 5653 5654 /// getNumIterationsInRange - Return the number of iterations of this loop that 5655 /// produce values in the specified constant range. Another way of looking at 5656 /// this is that it returns the first iteration number where the value is not in 5657 /// the condition, thus computing the exit count. If the iteration count can't 5658 /// be computed, an instance of SCEVCouldNotCompute is returned. 5659 const SCEV *SCEVAddRecExpr::getNumIterationsInRange(ConstantRange Range, 5660 ScalarEvolution &SE) const { 5661 if (Range.isFullSet()) // Infinite loop. 5662 return SE.getCouldNotCompute(); 5663 5664 // If the start is a non-zero constant, shift the range to simplify things. 5665 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(getStart())) 5666 if (!SC->getValue()->isZero()) { 5667 SmallVector<const SCEV *, 4> Operands(op_begin(), op_end()); 5668 Operands[0] = SE.getConstant(SC->getType(), 0); 5669 const SCEV *Shifted = SE.getAddRecExpr(Operands, getLoop()); 5670 if (const SCEVAddRecExpr *ShiftedAddRec = 5671 dyn_cast<SCEVAddRecExpr>(Shifted)) 5672 return ShiftedAddRec->getNumIterationsInRange( 5673 Range.subtract(SC->getValue()->getValue()), SE); 5674 // This is strange and shouldn't happen. 5675 return SE.getCouldNotCompute(); 5676 } 5677 5678 // The only time we can solve this is when we have all constant indices. 5679 // Otherwise, we cannot determine the overflow conditions. 5680 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) 5681 if (!isa<SCEVConstant>(getOperand(i))) 5682 return SE.getCouldNotCompute(); 5683 5684 5685 // Okay at this point we know that all elements of the chrec are constants and 5686 // that the start element is zero. 5687 5688 // First check to see if the range contains zero. If not, the first 5689 // iteration exits. 5690 unsigned BitWidth = SE.getTypeSizeInBits(getType()); 5691 if (!Range.contains(APInt(BitWidth, 0))) 5692 return SE.getConstant(getType(), 0); 5693 5694 if (isAffine()) { 5695 // If this is an affine expression then we have this situation: 5696 // Solve {0,+,A} in Range === Ax in Range 5697 5698 // We know that zero is in the range. If A is positive then we know that 5699 // the upper value of the range must be the first possible exit value. 5700 // If A is negative then the lower of the range is the last possible loop 5701 // value. Also note that we already checked for a full range. 5702 APInt One(BitWidth,1); 5703 APInt A = cast<SCEVConstant>(getOperand(1))->getValue()->getValue(); 5704 APInt End = A.sge(One) ? (Range.getUpper() - One) : Range.getLower(); 5705 5706 // The exit value should be (End+A)/A. 5707 APInt ExitVal = (End + A).udiv(A); 5708 ConstantInt *ExitValue = ConstantInt::get(SE.getContext(), ExitVal); 5709 5710 // Evaluate at the exit value. If we really did fall out of the valid 5711 // range, then we computed our trip count, otherwise wrap around or other 5712 // things must have happened. 5713 ConstantInt *Val = EvaluateConstantChrecAtConstant(this, ExitValue, SE); 5714 if (Range.contains(Val->getValue())) 5715 return SE.getCouldNotCompute(); // Something strange happened 5716 5717 // Ensure that the previous value is in the range. This is a sanity check. 5718 assert(Range.contains( 5719 EvaluateConstantChrecAtConstant(this, 5720 ConstantInt::get(SE.getContext(), ExitVal - One), SE)->getValue()) && 5721 "Linear scev computation is off in a bad way!"); 5722 return SE.getConstant(ExitValue); 5723 } else if (isQuadratic()) { 5724 // If this is a quadratic (3-term) AddRec {L,+,M,+,N}, find the roots of the 5725 // quadratic equation to solve it. To do this, we must frame our problem in 5726 // terms of figuring out when zero is crossed, instead of when 5727 // Range.getUpper() is crossed. 5728 SmallVector<const SCEV *, 4> NewOps(op_begin(), op_end()); 5729 NewOps[0] = SE.getNegativeSCEV(SE.getConstant(Range.getUpper())); 5730 const SCEV *NewAddRec = SE.getAddRecExpr(NewOps, getLoop()); 5731 5732 // Next, solve the constructed addrec 5733 std::pair<const SCEV *,const SCEV *> Roots = 5734 SolveQuadraticEquation(cast<SCEVAddRecExpr>(NewAddRec), SE); 5735 const SCEVConstant *R1 = dyn_cast<SCEVConstant>(Roots.first); 5736 const SCEVConstant *R2 = dyn_cast<SCEVConstant>(Roots.second); 5737 if (R1) { 5738 // Pick the smallest positive root value. 5739 if (ConstantInt *CB = 5740 dyn_cast<ConstantInt>(ConstantExpr::getICmp(ICmpInst::ICMP_ULT, 5741 R1->getValue(), R2->getValue()))) { 5742 if (CB->getZExtValue() == false) 5743 std::swap(R1, R2); // R1 is the minimum root now. 5744 5745 // Make sure the root is not off by one. The returned iteration should 5746 // not be in the range, but the previous one should be. When solving 5747 // for "X*X < 5", for example, we should not return a root of 2. 5748 ConstantInt *R1Val = EvaluateConstantChrecAtConstant(this, 5749 R1->getValue(), 5750 SE); 5751 if (Range.contains(R1Val->getValue())) { 5752 // The next iteration must be out of the range... 5753 ConstantInt *NextVal = 5754 ConstantInt::get(SE.getContext(), R1->getValue()->getValue()+1); 5755 5756 R1Val = EvaluateConstantChrecAtConstant(this, NextVal, SE); 5757 if (!Range.contains(R1Val->getValue())) 5758 return SE.getConstant(NextVal); 5759 return SE.getCouldNotCompute(); // Something strange happened 5760 } 5761 5762 // If R1 was not in the range, then it is a good return value. Make 5763 // sure that R1-1 WAS in the range though, just in case. 5764 ConstantInt *NextVal = 5765 ConstantInt::get(SE.getContext(), R1->getValue()->getValue()-1); 5766 R1Val = EvaluateConstantChrecAtConstant(this, NextVal, SE); 5767 if (Range.contains(R1Val->getValue())) 5768 return R1; 5769 return SE.getCouldNotCompute(); // Something strange happened 5770 } 5771 } 5772 } 5773 5774 return SE.getCouldNotCompute(); 5775 } 5776 5777 5778 5779 //===----------------------------------------------------------------------===// 5780 // SCEVCallbackVH Class Implementation 5781 //===----------------------------------------------------------------------===// 5782 5783 void ScalarEvolution::SCEVCallbackVH::deleted() { 5784 assert(SE && "SCEVCallbackVH called with a null ScalarEvolution!"); 5785 if (PHINode *PN = dyn_cast<PHINode>(getValPtr())) 5786 SE->ConstantEvolutionLoopExitValue.erase(PN); 5787 SE->ValueExprMap.erase(getValPtr()); 5788 // this now dangles! 5789 } 5790 5791 void ScalarEvolution::SCEVCallbackVH::allUsesReplacedWith(Value *V) { 5792 assert(SE && "SCEVCallbackVH called with a null ScalarEvolution!"); 5793 5794 // Forget all the expressions associated with users of the old value, 5795 // so that future queries will recompute the expressions using the new 5796 // value. 5797 Value *Old = getValPtr(); 5798 SmallVector<User *, 16> Worklist; 5799 SmallPtrSet<User *, 8> Visited; 5800 for (Value::use_iterator UI = Old->use_begin(), UE = Old->use_end(); 5801 UI != UE; ++UI) 5802 Worklist.push_back(*UI); 5803 while (!Worklist.empty()) { 5804 User *U = Worklist.pop_back_val(); 5805 // Deleting the Old value will cause this to dangle. Postpone 5806 // that until everything else is done. 5807 if (U == Old) 5808 continue; 5809 if (!Visited.insert(U)) 5810 continue; 5811 if (PHINode *PN = dyn_cast<PHINode>(U)) 5812 SE->ConstantEvolutionLoopExitValue.erase(PN); 5813 SE->ValueExprMap.erase(U); 5814 for (Value::use_iterator UI = U->use_begin(), UE = U->use_end(); 5815 UI != UE; ++UI) 5816 Worklist.push_back(*UI); 5817 } 5818 // Delete the Old value. 5819 if (PHINode *PN = dyn_cast<PHINode>(Old)) 5820 SE->ConstantEvolutionLoopExitValue.erase(PN); 5821 SE->ValueExprMap.erase(Old); 5822 // this now dangles! 5823 } 5824 5825 ScalarEvolution::SCEVCallbackVH::SCEVCallbackVH(Value *V, ScalarEvolution *se) 5826 : CallbackVH(V), SE(se) {} 5827 5828 //===----------------------------------------------------------------------===// 5829 // ScalarEvolution Class Implementation 5830 //===----------------------------------------------------------------------===// 5831 5832 ScalarEvolution::ScalarEvolution() 5833 : FunctionPass(ID), FirstUnknown(0) { 5834 } 5835 5836 bool ScalarEvolution::runOnFunction(Function &F) { 5837 this->F = &F; 5838 LI = &getAnalysis<LoopInfo>(); 5839 TD = getAnalysisIfAvailable<TargetData>(); 5840 DT = &getAnalysis<DominatorTree>(); 5841 return false; 5842 } 5843 5844 void ScalarEvolution::releaseMemory() { 5845 // Iterate through all the SCEVUnknown instances and call their 5846 // destructors, so that they release their references to their values. 5847 for (SCEVUnknown *U = FirstUnknown; U; U = U->Next) 5848 U->~SCEVUnknown(); 5849 FirstUnknown = 0; 5850 5851 ValueExprMap.clear(); 5852 BackedgeTakenCounts.clear(); 5853 ConstantEvolutionLoopExitValue.clear(); 5854 ValuesAtScopes.clear(); 5855 UniqueSCEVs.clear(); 5856 SCEVAllocator.Reset(); 5857 } 5858 5859 void ScalarEvolution::getAnalysisUsage(AnalysisUsage &AU) const { 5860 AU.setPreservesAll(); 5861 AU.addRequiredTransitive<LoopInfo>(); 5862 AU.addRequiredTransitive<DominatorTree>(); 5863 } 5864 5865 bool ScalarEvolution::hasLoopInvariantBackedgeTakenCount(const Loop *L) { 5866 return !isa<SCEVCouldNotCompute>(getBackedgeTakenCount(L)); 5867 } 5868 5869 static void PrintLoopInfo(raw_ostream &OS, ScalarEvolution *SE, 5870 const Loop *L) { 5871 // Print all inner loops first 5872 for (Loop::iterator I = L->begin(), E = L->end(); I != E; ++I) 5873 PrintLoopInfo(OS, SE, *I); 5874 5875 OS << "Loop "; 5876 WriteAsOperand(OS, L->getHeader(), /*PrintType=*/false); 5877 OS << ": "; 5878 5879 SmallVector<BasicBlock *, 8> ExitBlocks; 5880 L->getExitBlocks(ExitBlocks); 5881 if (ExitBlocks.size() != 1) 5882 OS << "<multiple exits> "; 5883 5884 if (SE->hasLoopInvariantBackedgeTakenCount(L)) { 5885 OS << "backedge-taken count is " << *SE->getBackedgeTakenCount(L); 5886 } else { 5887 OS << "Unpredictable backedge-taken count. "; 5888 } 5889 5890 OS << "\n" 5891 "Loop "; 5892 WriteAsOperand(OS, L->getHeader(), /*PrintType=*/false); 5893 OS << ": "; 5894 5895 if (!isa<SCEVCouldNotCompute>(SE->getMaxBackedgeTakenCount(L))) { 5896 OS << "max backedge-taken count is " << *SE->getMaxBackedgeTakenCount(L); 5897 } else { 5898 OS << "Unpredictable max backedge-taken count. "; 5899 } 5900 5901 OS << "\n"; 5902 } 5903 5904 void ScalarEvolution::print(raw_ostream &OS, const Module *) const { 5905 // ScalarEvolution's implementation of the print method is to print 5906 // out SCEV values of all instructions that are interesting. Doing 5907 // this potentially causes it to create new SCEV objects though, 5908 // which technically conflicts with the const qualifier. This isn't 5909 // observable from outside the class though, so casting away the 5910 // const isn't dangerous. 5911 ScalarEvolution &SE = *const_cast<ScalarEvolution *>(this); 5912 5913 OS << "Classifying expressions for: "; 5914 WriteAsOperand(OS, F, /*PrintType=*/false); 5915 OS << "\n"; 5916 for (inst_iterator I = inst_begin(F), E = inst_end(F); I != E; ++I) 5917 if (isSCEVable(I->getType()) && !isa<CmpInst>(*I)) { 5918 OS << *I << '\n'; 5919 OS << " --> "; 5920 const SCEV *SV = SE.getSCEV(&*I); 5921 SV->print(OS); 5922 5923 const Loop *L = LI->getLoopFor((*I).getParent()); 5924 5925 const SCEV *AtUse = SE.getSCEVAtScope(SV, L); 5926 if (AtUse != SV) { 5927 OS << " --> "; 5928 AtUse->print(OS); 5929 } 5930 5931 if (L) { 5932 OS << "\t\t" "Exits: "; 5933 const SCEV *ExitValue = SE.getSCEVAtScope(SV, L->getParentLoop()); 5934 if (!ExitValue->isLoopInvariant(L)) { 5935 OS << "<<Unknown>>"; 5936 } else { 5937 OS << *ExitValue; 5938 } 5939 } 5940 5941 OS << "\n"; 5942 } 5943 5944 OS << "Determining loop execution counts for: "; 5945 WriteAsOperand(OS, F, /*PrintType=*/false); 5946 OS << "\n"; 5947 for (LoopInfo::iterator I = LI->begin(), E = LI->end(); I != E; ++I) 5948 PrintLoopInfo(OS, &SE, *I); 5949 } 5950 5951