1 //===- ScalarEvolution.cpp - Scalar Evolution Analysis ----------*- C++ -*-===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file contains the implementation of the scalar evolution analysis 11 // engine, which is used primarily to analyze expressions involving induction 12 // variables in loops. 13 // 14 // There are several aspects to this library. First is the representation of 15 // scalar expressions, which are represented as subclasses of the SCEV class. 16 // These classes are used to represent certain types of subexpressions that we 17 // can handle. We only create one SCEV of a particular shape, so 18 // pointer-comparisons for equality are legal. 19 // 20 // One important aspect of the SCEV objects is that they are never cyclic, even 21 // if there is a cycle in the dataflow for an expression (ie, a PHI node). If 22 // the PHI node is one of the idioms that we can represent (e.g., a polynomial 23 // recurrence) then we represent it directly as a recurrence node, otherwise we 24 // represent it as a SCEVUnknown node. 25 // 26 // In addition to being able to represent expressions of various types, we also 27 // have folders that are used to build the *canonical* representation for a 28 // particular expression. These folders are capable of using a variety of 29 // rewrite rules to simplify the expressions. 30 // 31 // Once the folders are defined, we can implement the more interesting 32 // higher-level code, such as the code that recognizes PHI nodes of various 33 // types, computes the execution count of a loop, etc. 34 // 35 // TODO: We should use these routines and value representations to implement 36 // dependence analysis! 37 // 38 //===----------------------------------------------------------------------===// 39 // 40 // There are several good references for the techniques used in this analysis. 41 // 42 // Chains of recurrences -- a method to expedite the evaluation 43 // of closed-form functions 44 // Olaf Bachmann, Paul S. Wang, Eugene V. Zima 45 // 46 // On computational properties of chains of recurrences 47 // Eugene V. Zima 48 // 49 // Symbolic Evaluation of Chains of Recurrences for Loop Optimization 50 // Robert A. van Engelen 51 // 52 // Efficient Symbolic Analysis for Optimizing Compilers 53 // Robert A. van Engelen 54 // 55 // Using the chains of recurrences algebra for data dependence testing and 56 // induction variable substitution 57 // MS Thesis, Johnie Birch 58 // 59 //===----------------------------------------------------------------------===// 60 61 #define DEBUG_TYPE "scalar-evolution" 62 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 63 #include "llvm/Constants.h" 64 #include "llvm/DerivedTypes.h" 65 #include "llvm/GlobalVariable.h" 66 #include "llvm/GlobalAlias.h" 67 #include "llvm/Instructions.h" 68 #include "llvm/LLVMContext.h" 69 #include "llvm/Operator.h" 70 #include "llvm/Analysis/ConstantFolding.h" 71 #include "llvm/Analysis/Dominators.h" 72 #include "llvm/Analysis/LoopInfo.h" 73 #include "llvm/Analysis/ValueTracking.h" 74 #include "llvm/Assembly/Writer.h" 75 #include "llvm/Target/TargetData.h" 76 #include "llvm/Support/CommandLine.h" 77 #include "llvm/Support/ConstantRange.h" 78 #include "llvm/Support/Debug.h" 79 #include "llvm/Support/ErrorHandling.h" 80 #include "llvm/Support/GetElementPtrTypeIterator.h" 81 #include "llvm/Support/InstIterator.h" 82 #include "llvm/Support/MathExtras.h" 83 #include "llvm/Support/raw_ostream.h" 84 #include "llvm/ADT/Statistic.h" 85 #include "llvm/ADT/STLExtras.h" 86 #include "llvm/ADT/SmallPtrSet.h" 87 #include <algorithm> 88 using namespace llvm; 89 90 STATISTIC(NumArrayLenItCounts, 91 "Number of trip counts computed with array length"); 92 STATISTIC(NumTripCountsComputed, 93 "Number of loops with predictable loop counts"); 94 STATISTIC(NumTripCountsNotComputed, 95 "Number of loops without predictable loop counts"); 96 STATISTIC(NumBruteForceTripCountsComputed, 97 "Number of loops with trip counts computed by force"); 98 99 static cl::opt<unsigned> 100 MaxBruteForceIterations("scalar-evolution-max-iterations", cl::ReallyHidden, 101 cl::desc("Maximum number of iterations SCEV will " 102 "symbolically execute a constant " 103 "derived loop"), 104 cl::init(100)); 105 106 static RegisterPass<ScalarEvolution> 107 R("scalar-evolution", "Scalar Evolution Analysis", false, true); 108 char ScalarEvolution::ID = 0; 109 110 //===----------------------------------------------------------------------===// 111 // SCEV class definitions 112 //===----------------------------------------------------------------------===// 113 114 //===----------------------------------------------------------------------===// 115 // Implementation of the SCEV class. 116 // 117 118 SCEV::~SCEV() {} 119 120 void SCEV::dump() const { 121 print(dbgs()); 122 dbgs() << '\n'; 123 } 124 125 bool SCEV::isZero() const { 126 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this)) 127 return SC->getValue()->isZero(); 128 return false; 129 } 130 131 bool SCEV::isOne() const { 132 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this)) 133 return SC->getValue()->isOne(); 134 return false; 135 } 136 137 bool SCEV::isAllOnesValue() const { 138 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this)) 139 return SC->getValue()->isAllOnesValue(); 140 return false; 141 } 142 143 SCEVCouldNotCompute::SCEVCouldNotCompute() : 144 SCEV(FoldingSetNodeID(), scCouldNotCompute) {} 145 146 bool SCEVCouldNotCompute::isLoopInvariant(const Loop *L) const { 147 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 148 return false; 149 } 150 151 const Type *SCEVCouldNotCompute::getType() const { 152 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 153 return 0; 154 } 155 156 bool SCEVCouldNotCompute::hasComputableLoopEvolution(const Loop *L) const { 157 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 158 return false; 159 } 160 161 bool SCEVCouldNotCompute::hasOperand(const SCEV *) const { 162 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 163 return false; 164 } 165 166 void SCEVCouldNotCompute::print(raw_ostream &OS) const { 167 OS << "***COULDNOTCOMPUTE***"; 168 } 169 170 bool SCEVCouldNotCompute::classof(const SCEV *S) { 171 return S->getSCEVType() == scCouldNotCompute; 172 } 173 174 const SCEV *ScalarEvolution::getConstant(ConstantInt *V) { 175 FoldingSetNodeID ID; 176 ID.AddInteger(scConstant); 177 ID.AddPointer(V); 178 void *IP = 0; 179 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 180 SCEV *S = SCEVAllocator.Allocate<SCEVConstant>(); 181 new (S) SCEVConstant(ID, V); 182 UniqueSCEVs.InsertNode(S, IP); 183 return S; 184 } 185 186 const SCEV *ScalarEvolution::getConstant(const APInt& Val) { 187 return getConstant(ConstantInt::get(getContext(), Val)); 188 } 189 190 const SCEV * 191 ScalarEvolution::getConstant(const Type *Ty, uint64_t V, bool isSigned) { 192 return getConstant( 193 ConstantInt::get(cast<IntegerType>(Ty), V, isSigned)); 194 } 195 196 const Type *SCEVConstant::getType() const { return V->getType(); } 197 198 void SCEVConstant::print(raw_ostream &OS) const { 199 WriteAsOperand(OS, V, false); 200 } 201 202 SCEVCastExpr::SCEVCastExpr(const FoldingSetNodeID &ID, 203 unsigned SCEVTy, const SCEV *op, const Type *ty) 204 : SCEV(ID, SCEVTy), Op(op), Ty(ty) {} 205 206 bool SCEVCastExpr::dominates(BasicBlock *BB, DominatorTree *DT) const { 207 return Op->dominates(BB, DT); 208 } 209 210 bool SCEVCastExpr::properlyDominates(BasicBlock *BB, DominatorTree *DT) const { 211 return Op->properlyDominates(BB, DT); 212 } 213 214 SCEVTruncateExpr::SCEVTruncateExpr(const FoldingSetNodeID &ID, 215 const SCEV *op, const Type *ty) 216 : SCEVCastExpr(ID, scTruncate, op, ty) { 217 assert((Op->getType()->isInteger() || isa<PointerType>(Op->getType())) && 218 (Ty->isInteger() || isa<PointerType>(Ty)) && 219 "Cannot truncate non-integer value!"); 220 } 221 222 void SCEVTruncateExpr::print(raw_ostream &OS) const { 223 OS << "(trunc " << *Op->getType() << " " << *Op << " to " << *Ty << ")"; 224 } 225 226 SCEVZeroExtendExpr::SCEVZeroExtendExpr(const FoldingSetNodeID &ID, 227 const SCEV *op, const Type *ty) 228 : SCEVCastExpr(ID, scZeroExtend, op, ty) { 229 assert((Op->getType()->isInteger() || isa<PointerType>(Op->getType())) && 230 (Ty->isInteger() || isa<PointerType>(Ty)) && 231 "Cannot zero extend non-integer value!"); 232 } 233 234 void SCEVZeroExtendExpr::print(raw_ostream &OS) const { 235 OS << "(zext " << *Op->getType() << " " << *Op << " to " << *Ty << ")"; 236 } 237 238 SCEVSignExtendExpr::SCEVSignExtendExpr(const FoldingSetNodeID &ID, 239 const SCEV *op, const Type *ty) 240 : SCEVCastExpr(ID, scSignExtend, op, ty) { 241 assert((Op->getType()->isInteger() || isa<PointerType>(Op->getType())) && 242 (Ty->isInteger() || isa<PointerType>(Ty)) && 243 "Cannot sign extend non-integer value!"); 244 } 245 246 void SCEVSignExtendExpr::print(raw_ostream &OS) const { 247 OS << "(sext " << *Op->getType() << " " << *Op << " to " << *Ty << ")"; 248 } 249 250 void SCEVCommutativeExpr::print(raw_ostream &OS) const { 251 assert(Operands.size() > 1 && "This plus expr shouldn't exist!"); 252 const char *OpStr = getOperationStr(); 253 OS << "(" << *Operands[0]; 254 for (unsigned i = 1, e = Operands.size(); i != e; ++i) 255 OS << OpStr << *Operands[i]; 256 OS << ")"; 257 } 258 259 bool SCEVNAryExpr::dominates(BasicBlock *BB, DominatorTree *DT) const { 260 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) { 261 if (!getOperand(i)->dominates(BB, DT)) 262 return false; 263 } 264 return true; 265 } 266 267 bool SCEVNAryExpr::properlyDominates(BasicBlock *BB, DominatorTree *DT) const { 268 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) { 269 if (!getOperand(i)->properlyDominates(BB, DT)) 270 return false; 271 } 272 return true; 273 } 274 275 bool SCEVUDivExpr::dominates(BasicBlock *BB, DominatorTree *DT) const { 276 return LHS->dominates(BB, DT) && RHS->dominates(BB, DT); 277 } 278 279 bool SCEVUDivExpr::properlyDominates(BasicBlock *BB, DominatorTree *DT) const { 280 return LHS->properlyDominates(BB, DT) && RHS->properlyDominates(BB, DT); 281 } 282 283 void SCEVUDivExpr::print(raw_ostream &OS) const { 284 OS << "(" << *LHS << " /u " << *RHS << ")"; 285 } 286 287 const Type *SCEVUDivExpr::getType() const { 288 // In most cases the types of LHS and RHS will be the same, but in some 289 // crazy cases one or the other may be a pointer. ScalarEvolution doesn't 290 // depend on the type for correctness, but handling types carefully can 291 // avoid extra casts in the SCEVExpander. The LHS is more likely to be 292 // a pointer type than the RHS, so use the RHS' type here. 293 return RHS->getType(); 294 } 295 296 bool SCEVAddRecExpr::isLoopInvariant(const Loop *QueryLoop) const { 297 // Add recurrences are never invariant in the function-body (null loop). 298 if (!QueryLoop) 299 return false; 300 301 // This recurrence is variant w.r.t. QueryLoop if QueryLoop contains L. 302 if (QueryLoop->contains(L)) 303 return false; 304 305 // This recurrence is variant w.r.t. QueryLoop if any of its operands 306 // are variant. 307 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) 308 if (!getOperand(i)->isLoopInvariant(QueryLoop)) 309 return false; 310 311 // Otherwise it's loop-invariant. 312 return true; 313 } 314 315 void SCEVAddRecExpr::print(raw_ostream &OS) const { 316 OS << "{" << *Operands[0]; 317 for (unsigned i = 1, e = Operands.size(); i != e; ++i) 318 OS << ",+," << *Operands[i]; 319 OS << "}<"; 320 WriteAsOperand(OS, L->getHeader(), /*PrintType=*/false); 321 OS << ">"; 322 } 323 324 bool SCEVUnknown::isLoopInvariant(const Loop *L) const { 325 // All non-instruction values are loop invariant. All instructions are loop 326 // invariant if they are not contained in the specified loop. 327 // Instructions are never considered invariant in the function body 328 // (null loop) because they are defined within the "loop". 329 if (Instruction *I = dyn_cast<Instruction>(V)) 330 return L && !L->contains(I); 331 return true; 332 } 333 334 bool SCEVUnknown::dominates(BasicBlock *BB, DominatorTree *DT) const { 335 if (Instruction *I = dyn_cast<Instruction>(getValue())) 336 return DT->dominates(I->getParent(), BB); 337 return true; 338 } 339 340 bool SCEVUnknown::properlyDominates(BasicBlock *BB, DominatorTree *DT) const { 341 if (Instruction *I = dyn_cast<Instruction>(getValue())) 342 return DT->properlyDominates(I->getParent(), BB); 343 return true; 344 } 345 346 const Type *SCEVUnknown::getType() const { 347 return V->getType(); 348 } 349 350 bool SCEVUnknown::isSizeOf(const Type *&AllocTy) const { 351 if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(V)) 352 if (VCE->getOpcode() == Instruction::PtrToInt) 353 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0))) 354 if (CE->getOpcode() == Instruction::GetElementPtr) 355 if (CE->getOperand(0)->isNullValue()) { 356 const Type *Ty = 357 cast<PointerType>(CE->getOperand(0)->getType())->getElementType(); 358 if (CE->getNumOperands() == 2) 359 if (ConstantInt *CI = dyn_cast<ConstantInt>(CE->getOperand(1))) 360 if (CI->isOne()) { 361 AllocTy = Ty; 362 return true; 363 } 364 } 365 366 return false; 367 } 368 369 bool SCEVUnknown::isAlignOf(const Type *&AllocTy) const { 370 if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(V)) 371 if (VCE->getOpcode() == Instruction::PtrToInt) 372 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0))) 373 if (CE->getOpcode() == Instruction::GetElementPtr) 374 if (CE->getOperand(0)->isNullValue()) { 375 const Type *Ty = 376 cast<PointerType>(CE->getOperand(0)->getType())->getElementType(); 377 if (const StructType *STy = dyn_cast<StructType>(Ty)) 378 if (!STy->isPacked() && 379 CE->getNumOperands() == 3 && 380 CE->getOperand(1)->isNullValue()) { 381 if (ConstantInt *CI = dyn_cast<ConstantInt>(CE->getOperand(2))) 382 if (CI->isOne() && 383 STy->getNumElements() == 2 && 384 STy->getElementType(0)->isInteger(1)) { 385 AllocTy = STy->getElementType(1); 386 return true; 387 } 388 } 389 } 390 391 return false; 392 } 393 394 bool SCEVUnknown::isOffsetOf(const Type *&CTy, Constant *&FieldNo) const { 395 if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(V)) 396 if (VCE->getOpcode() == Instruction::PtrToInt) 397 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0))) 398 if (CE->getOpcode() == Instruction::GetElementPtr && 399 CE->getNumOperands() == 3 && 400 CE->getOperand(0)->isNullValue() && 401 CE->getOperand(1)->isNullValue()) { 402 const Type *Ty = 403 cast<PointerType>(CE->getOperand(0)->getType())->getElementType(); 404 // Ignore vector types here so that ScalarEvolutionExpander doesn't 405 // emit getelementptrs that index into vectors. 406 if (isa<StructType>(Ty) || isa<ArrayType>(Ty)) { 407 CTy = Ty; 408 FieldNo = CE->getOperand(2); 409 return true; 410 } 411 } 412 413 return false; 414 } 415 416 void SCEVUnknown::print(raw_ostream &OS) const { 417 const Type *AllocTy; 418 if (isSizeOf(AllocTy)) { 419 OS << "sizeof(" << *AllocTy << ")"; 420 return; 421 } 422 if (isAlignOf(AllocTy)) { 423 OS << "alignof(" << *AllocTy << ")"; 424 return; 425 } 426 427 const Type *CTy; 428 Constant *FieldNo; 429 if (isOffsetOf(CTy, FieldNo)) { 430 OS << "offsetof(" << *CTy << ", "; 431 WriteAsOperand(OS, FieldNo, false); 432 OS << ")"; 433 return; 434 } 435 436 // Otherwise just print it normally. 437 WriteAsOperand(OS, V, false); 438 } 439 440 //===----------------------------------------------------------------------===// 441 // SCEV Utilities 442 //===----------------------------------------------------------------------===// 443 444 static bool CompareTypes(const Type *A, const Type *B) { 445 if (A->getTypeID() != B->getTypeID()) 446 return A->getTypeID() < B->getTypeID(); 447 if (const IntegerType *AI = dyn_cast<IntegerType>(A)) { 448 const IntegerType *BI = cast<IntegerType>(B); 449 return AI->getBitWidth() < BI->getBitWidth(); 450 } 451 if (const PointerType *AI = dyn_cast<PointerType>(A)) { 452 const PointerType *BI = cast<PointerType>(B); 453 return CompareTypes(AI->getElementType(), BI->getElementType()); 454 } 455 if (const ArrayType *AI = dyn_cast<ArrayType>(A)) { 456 const ArrayType *BI = cast<ArrayType>(B); 457 if (AI->getNumElements() != BI->getNumElements()) 458 return AI->getNumElements() < BI->getNumElements(); 459 return CompareTypes(AI->getElementType(), BI->getElementType()); 460 } 461 if (const VectorType *AI = dyn_cast<VectorType>(A)) { 462 const VectorType *BI = cast<VectorType>(B); 463 if (AI->getNumElements() != BI->getNumElements()) 464 return AI->getNumElements() < BI->getNumElements(); 465 return CompareTypes(AI->getElementType(), BI->getElementType()); 466 } 467 if (const StructType *AI = dyn_cast<StructType>(A)) { 468 const StructType *BI = cast<StructType>(B); 469 if (AI->getNumElements() != BI->getNumElements()) 470 return AI->getNumElements() < BI->getNumElements(); 471 for (unsigned i = 0, e = AI->getNumElements(); i != e; ++i) 472 if (CompareTypes(AI->getElementType(i), BI->getElementType(i)) || 473 CompareTypes(BI->getElementType(i), AI->getElementType(i))) 474 return CompareTypes(AI->getElementType(i), BI->getElementType(i)); 475 } 476 return false; 477 } 478 479 namespace { 480 /// SCEVComplexityCompare - Return true if the complexity of the LHS is less 481 /// than the complexity of the RHS. This comparator is used to canonicalize 482 /// expressions. 483 class SCEVComplexityCompare { 484 LoopInfo *LI; 485 public: 486 explicit SCEVComplexityCompare(LoopInfo *li) : LI(li) {} 487 488 bool operator()(const SCEV *LHS, const SCEV *RHS) const { 489 // Fast-path: SCEVs are uniqued so we can do a quick equality check. 490 if (LHS == RHS) 491 return false; 492 493 // Primarily, sort the SCEVs by their getSCEVType(). 494 if (LHS->getSCEVType() != RHS->getSCEVType()) 495 return LHS->getSCEVType() < RHS->getSCEVType(); 496 497 // Aside from the getSCEVType() ordering, the particular ordering 498 // isn't very important except that it's beneficial to be consistent, 499 // so that (a + b) and (b + a) don't end up as different expressions. 500 501 // Sort SCEVUnknown values with some loose heuristics. TODO: This is 502 // not as complete as it could be. 503 if (const SCEVUnknown *LU = dyn_cast<SCEVUnknown>(LHS)) { 504 const SCEVUnknown *RU = cast<SCEVUnknown>(RHS); 505 506 // Order pointer values after integer values. This helps SCEVExpander 507 // form GEPs. 508 if (isa<PointerType>(LU->getType()) && !isa<PointerType>(RU->getType())) 509 return false; 510 if (isa<PointerType>(RU->getType()) && !isa<PointerType>(LU->getType())) 511 return true; 512 513 // Compare getValueID values. 514 if (LU->getValue()->getValueID() != RU->getValue()->getValueID()) 515 return LU->getValue()->getValueID() < RU->getValue()->getValueID(); 516 517 // Sort arguments by their position. 518 if (const Argument *LA = dyn_cast<Argument>(LU->getValue())) { 519 const Argument *RA = cast<Argument>(RU->getValue()); 520 return LA->getArgNo() < RA->getArgNo(); 521 } 522 523 // For instructions, compare their loop depth, and their opcode. 524 // This is pretty loose. 525 if (Instruction *LV = dyn_cast<Instruction>(LU->getValue())) { 526 Instruction *RV = cast<Instruction>(RU->getValue()); 527 528 // Compare loop depths. 529 if (LI->getLoopDepth(LV->getParent()) != 530 LI->getLoopDepth(RV->getParent())) 531 return LI->getLoopDepth(LV->getParent()) < 532 LI->getLoopDepth(RV->getParent()); 533 534 // Compare opcodes. 535 if (LV->getOpcode() != RV->getOpcode()) 536 return LV->getOpcode() < RV->getOpcode(); 537 538 // Compare the number of operands. 539 if (LV->getNumOperands() != RV->getNumOperands()) 540 return LV->getNumOperands() < RV->getNumOperands(); 541 } 542 543 return false; 544 } 545 546 // Compare constant values. 547 if (const SCEVConstant *LC = dyn_cast<SCEVConstant>(LHS)) { 548 const SCEVConstant *RC = cast<SCEVConstant>(RHS); 549 if (LC->getValue()->getBitWidth() != RC->getValue()->getBitWidth()) 550 return LC->getValue()->getBitWidth() < RC->getValue()->getBitWidth(); 551 return LC->getValue()->getValue().ult(RC->getValue()->getValue()); 552 } 553 554 // Compare addrec loop depths. 555 if (const SCEVAddRecExpr *LA = dyn_cast<SCEVAddRecExpr>(LHS)) { 556 const SCEVAddRecExpr *RA = cast<SCEVAddRecExpr>(RHS); 557 if (LA->getLoop()->getLoopDepth() != RA->getLoop()->getLoopDepth()) 558 return LA->getLoop()->getLoopDepth() < RA->getLoop()->getLoopDepth(); 559 } 560 561 // Lexicographically compare n-ary expressions. 562 if (const SCEVNAryExpr *LC = dyn_cast<SCEVNAryExpr>(LHS)) { 563 const SCEVNAryExpr *RC = cast<SCEVNAryExpr>(RHS); 564 for (unsigned i = 0, e = LC->getNumOperands(); i != e; ++i) { 565 if (i >= RC->getNumOperands()) 566 return false; 567 if (operator()(LC->getOperand(i), RC->getOperand(i))) 568 return true; 569 if (operator()(RC->getOperand(i), LC->getOperand(i))) 570 return false; 571 } 572 return LC->getNumOperands() < RC->getNumOperands(); 573 } 574 575 // Lexicographically compare udiv expressions. 576 if (const SCEVUDivExpr *LC = dyn_cast<SCEVUDivExpr>(LHS)) { 577 const SCEVUDivExpr *RC = cast<SCEVUDivExpr>(RHS); 578 if (operator()(LC->getLHS(), RC->getLHS())) 579 return true; 580 if (operator()(RC->getLHS(), LC->getLHS())) 581 return false; 582 if (operator()(LC->getRHS(), RC->getRHS())) 583 return true; 584 if (operator()(RC->getRHS(), LC->getRHS())) 585 return false; 586 return false; 587 } 588 589 // Compare cast expressions by operand. 590 if (const SCEVCastExpr *LC = dyn_cast<SCEVCastExpr>(LHS)) { 591 const SCEVCastExpr *RC = cast<SCEVCastExpr>(RHS); 592 return operator()(LC->getOperand(), RC->getOperand()); 593 } 594 595 llvm_unreachable("Unknown SCEV kind!"); 596 return false; 597 } 598 }; 599 } 600 601 /// GroupByComplexity - Given a list of SCEV objects, order them by their 602 /// complexity, and group objects of the same complexity together by value. 603 /// When this routine is finished, we know that any duplicates in the vector are 604 /// consecutive and that complexity is monotonically increasing. 605 /// 606 /// Note that we go take special precautions to ensure that we get determinstic 607 /// results from this routine. In other words, we don't want the results of 608 /// this to depend on where the addresses of various SCEV objects happened to 609 /// land in memory. 610 /// 611 static void GroupByComplexity(SmallVectorImpl<const SCEV *> &Ops, 612 LoopInfo *LI) { 613 if (Ops.size() < 2) return; // Noop 614 if (Ops.size() == 2) { 615 // This is the common case, which also happens to be trivially simple. 616 // Special case it. 617 if (SCEVComplexityCompare(LI)(Ops[1], Ops[0])) 618 std::swap(Ops[0], Ops[1]); 619 return; 620 } 621 622 // Do the rough sort by complexity. 623 std::stable_sort(Ops.begin(), Ops.end(), SCEVComplexityCompare(LI)); 624 625 // Now that we are sorted by complexity, group elements of the same 626 // complexity. Note that this is, at worst, N^2, but the vector is likely to 627 // be extremely short in practice. Note that we take this approach because we 628 // do not want to depend on the addresses of the objects we are grouping. 629 for (unsigned i = 0, e = Ops.size(); i != e-2; ++i) { 630 const SCEV *S = Ops[i]; 631 unsigned Complexity = S->getSCEVType(); 632 633 // If there are any objects of the same complexity and same value as this 634 // one, group them. 635 for (unsigned j = i+1; j != e && Ops[j]->getSCEVType() == Complexity; ++j) { 636 if (Ops[j] == S) { // Found a duplicate. 637 // Move it to immediately after i'th element. 638 std::swap(Ops[i+1], Ops[j]); 639 ++i; // no need to rescan it. 640 if (i == e-2) return; // Done! 641 } 642 } 643 } 644 } 645 646 647 648 //===----------------------------------------------------------------------===// 649 // Simple SCEV method implementations 650 //===----------------------------------------------------------------------===// 651 652 /// BinomialCoefficient - Compute BC(It, K). The result has width W. 653 /// Assume, K > 0. 654 static const SCEV *BinomialCoefficient(const SCEV *It, unsigned K, 655 ScalarEvolution &SE, 656 const Type* ResultTy) { 657 // Handle the simplest case efficiently. 658 if (K == 1) 659 return SE.getTruncateOrZeroExtend(It, ResultTy); 660 661 // We are using the following formula for BC(It, K): 662 // 663 // BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / K! 664 // 665 // Suppose, W is the bitwidth of the return value. We must be prepared for 666 // overflow. Hence, we must assure that the result of our computation is 667 // equal to the accurate one modulo 2^W. Unfortunately, division isn't 668 // safe in modular arithmetic. 669 // 670 // However, this code doesn't use exactly that formula; the formula it uses 671 // is something like the following, where T is the number of factors of 2 in 672 // K! (i.e. trailing zeros in the binary representation of K!), and ^ is 673 // exponentiation: 674 // 675 // BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / 2^T / (K! / 2^T) 676 // 677 // This formula is trivially equivalent to the previous formula. However, 678 // this formula can be implemented much more efficiently. The trick is that 679 // K! / 2^T is odd, and exact division by an odd number *is* safe in modular 680 // arithmetic. To do exact division in modular arithmetic, all we have 681 // to do is multiply by the inverse. Therefore, this step can be done at 682 // width W. 683 // 684 // The next issue is how to safely do the division by 2^T. The way this 685 // is done is by doing the multiplication step at a width of at least W + T 686 // bits. This way, the bottom W+T bits of the product are accurate. Then, 687 // when we perform the division by 2^T (which is equivalent to a right shift 688 // by T), the bottom W bits are accurate. Extra bits are okay; they'll get 689 // truncated out after the division by 2^T. 690 // 691 // In comparison to just directly using the first formula, this technique 692 // is much more efficient; using the first formula requires W * K bits, 693 // but this formula less than W + K bits. Also, the first formula requires 694 // a division step, whereas this formula only requires multiplies and shifts. 695 // 696 // It doesn't matter whether the subtraction step is done in the calculation 697 // width or the input iteration count's width; if the subtraction overflows, 698 // the result must be zero anyway. We prefer here to do it in the width of 699 // the induction variable because it helps a lot for certain cases; CodeGen 700 // isn't smart enough to ignore the overflow, which leads to much less 701 // efficient code if the width of the subtraction is wider than the native 702 // register width. 703 // 704 // (It's possible to not widen at all by pulling out factors of 2 before 705 // the multiplication; for example, K=2 can be calculated as 706 // It/2*(It+(It*INT_MIN/INT_MIN)+-1). However, it requires 707 // extra arithmetic, so it's not an obvious win, and it gets 708 // much more complicated for K > 3.) 709 710 // Protection from insane SCEVs; this bound is conservative, 711 // but it probably doesn't matter. 712 if (K > 1000) 713 return SE.getCouldNotCompute(); 714 715 unsigned W = SE.getTypeSizeInBits(ResultTy); 716 717 // Calculate K! / 2^T and T; we divide out the factors of two before 718 // multiplying for calculating K! / 2^T to avoid overflow. 719 // Other overflow doesn't matter because we only care about the bottom 720 // W bits of the result. 721 APInt OddFactorial(W, 1); 722 unsigned T = 1; 723 for (unsigned i = 3; i <= K; ++i) { 724 APInt Mult(W, i); 725 unsigned TwoFactors = Mult.countTrailingZeros(); 726 T += TwoFactors; 727 Mult = Mult.lshr(TwoFactors); 728 OddFactorial *= Mult; 729 } 730 731 // We need at least W + T bits for the multiplication step 732 unsigned CalculationBits = W + T; 733 734 // Calcuate 2^T, at width T+W. 735 APInt DivFactor = APInt(CalculationBits, 1).shl(T); 736 737 // Calculate the multiplicative inverse of K! / 2^T; 738 // this multiplication factor will perform the exact division by 739 // K! / 2^T. 740 APInt Mod = APInt::getSignedMinValue(W+1); 741 APInt MultiplyFactor = OddFactorial.zext(W+1); 742 MultiplyFactor = MultiplyFactor.multiplicativeInverse(Mod); 743 MultiplyFactor = MultiplyFactor.trunc(W); 744 745 // Calculate the product, at width T+W 746 const IntegerType *CalculationTy = IntegerType::get(SE.getContext(), 747 CalculationBits); 748 const SCEV *Dividend = SE.getTruncateOrZeroExtend(It, CalculationTy); 749 for (unsigned i = 1; i != K; ++i) { 750 const SCEV *S = SE.getMinusSCEV(It, SE.getIntegerSCEV(i, It->getType())); 751 Dividend = SE.getMulExpr(Dividend, 752 SE.getTruncateOrZeroExtend(S, CalculationTy)); 753 } 754 755 // Divide by 2^T 756 const SCEV *DivResult = SE.getUDivExpr(Dividend, SE.getConstant(DivFactor)); 757 758 // Truncate the result, and divide by K! / 2^T. 759 760 return SE.getMulExpr(SE.getConstant(MultiplyFactor), 761 SE.getTruncateOrZeroExtend(DivResult, ResultTy)); 762 } 763 764 /// evaluateAtIteration - Return the value of this chain of recurrences at 765 /// the specified iteration number. We can evaluate this recurrence by 766 /// multiplying each element in the chain by the binomial coefficient 767 /// corresponding to it. In other words, we can evaluate {A,+,B,+,C,+,D} as: 768 /// 769 /// A*BC(It, 0) + B*BC(It, 1) + C*BC(It, 2) + D*BC(It, 3) 770 /// 771 /// where BC(It, k) stands for binomial coefficient. 772 /// 773 const SCEV *SCEVAddRecExpr::evaluateAtIteration(const SCEV *It, 774 ScalarEvolution &SE) const { 775 const SCEV *Result = getStart(); 776 for (unsigned i = 1, e = getNumOperands(); i != e; ++i) { 777 // The computation is correct in the face of overflow provided that the 778 // multiplication is performed _after_ the evaluation of the binomial 779 // coefficient. 780 const SCEV *Coeff = BinomialCoefficient(It, i, SE, getType()); 781 if (isa<SCEVCouldNotCompute>(Coeff)) 782 return Coeff; 783 784 Result = SE.getAddExpr(Result, SE.getMulExpr(getOperand(i), Coeff)); 785 } 786 return Result; 787 } 788 789 //===----------------------------------------------------------------------===// 790 // SCEV Expression folder implementations 791 //===----------------------------------------------------------------------===// 792 793 const SCEV *ScalarEvolution::getTruncateExpr(const SCEV *Op, 794 const Type *Ty) { 795 assert(getTypeSizeInBits(Op->getType()) > getTypeSizeInBits(Ty) && 796 "This is not a truncating conversion!"); 797 assert(isSCEVable(Ty) && 798 "This is not a conversion to a SCEVable type!"); 799 Ty = getEffectiveSCEVType(Ty); 800 801 FoldingSetNodeID ID; 802 ID.AddInteger(scTruncate); 803 ID.AddPointer(Op); 804 ID.AddPointer(Ty); 805 void *IP = 0; 806 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 807 808 // Fold if the operand is constant. 809 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) 810 return getConstant( 811 cast<ConstantInt>(ConstantExpr::getTrunc(SC->getValue(), Ty))); 812 813 // trunc(trunc(x)) --> trunc(x) 814 if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) 815 return getTruncateExpr(ST->getOperand(), Ty); 816 817 // trunc(sext(x)) --> sext(x) if widening or trunc(x) if narrowing 818 if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op)) 819 return getTruncateOrSignExtend(SS->getOperand(), Ty); 820 821 // trunc(zext(x)) --> zext(x) if widening or trunc(x) if narrowing 822 if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op)) 823 return getTruncateOrZeroExtend(SZ->getOperand(), Ty); 824 825 // If the input value is a chrec scev, truncate the chrec's operands. 826 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(Op)) { 827 SmallVector<const SCEV *, 4> Operands; 828 for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) 829 Operands.push_back(getTruncateExpr(AddRec->getOperand(i), Ty)); 830 return getAddRecExpr(Operands, AddRec->getLoop()); 831 } 832 833 // The cast wasn't folded; create an explicit cast node. 834 // Recompute the insert position, as it may have been invalidated. 835 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 836 SCEV *S = SCEVAllocator.Allocate<SCEVTruncateExpr>(); 837 new (S) SCEVTruncateExpr(ID, Op, Ty); 838 UniqueSCEVs.InsertNode(S, IP); 839 return S; 840 } 841 842 const SCEV *ScalarEvolution::getZeroExtendExpr(const SCEV *Op, 843 const Type *Ty) { 844 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && 845 "This is not an extending conversion!"); 846 assert(isSCEVable(Ty) && 847 "This is not a conversion to a SCEVable type!"); 848 Ty = getEffectiveSCEVType(Ty); 849 850 // Fold if the operand is constant. 851 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) { 852 const Type *IntTy = getEffectiveSCEVType(Ty); 853 Constant *C = ConstantExpr::getZExt(SC->getValue(), IntTy); 854 if (IntTy != Ty) C = ConstantExpr::getIntToPtr(C, Ty); 855 return getConstant(cast<ConstantInt>(C)); 856 } 857 858 // zext(zext(x)) --> zext(x) 859 if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op)) 860 return getZeroExtendExpr(SZ->getOperand(), Ty); 861 862 // Before doing any expensive analysis, check to see if we've already 863 // computed a SCEV for this Op and Ty. 864 FoldingSetNodeID ID; 865 ID.AddInteger(scZeroExtend); 866 ID.AddPointer(Op); 867 ID.AddPointer(Ty); 868 void *IP = 0; 869 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 870 871 // If the input value is a chrec scev, and we can prove that the value 872 // did not overflow the old, smaller, value, we can zero extend all of the 873 // operands (often constants). This allows analysis of something like 874 // this: for (unsigned char X = 0; X < 100; ++X) { int Y = X; } 875 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) 876 if (AR->isAffine()) { 877 const SCEV *Start = AR->getStart(); 878 const SCEV *Step = AR->getStepRecurrence(*this); 879 unsigned BitWidth = getTypeSizeInBits(AR->getType()); 880 const Loop *L = AR->getLoop(); 881 882 // If we have special knowledge that this addrec won't overflow, 883 // we don't need to do any further analysis. 884 if (AR->hasNoUnsignedWrap()) 885 return getAddRecExpr(getZeroExtendExpr(Start, Ty), 886 getZeroExtendExpr(Step, Ty), 887 L); 888 889 // Check whether the backedge-taken count is SCEVCouldNotCompute. 890 // Note that this serves two purposes: It filters out loops that are 891 // simply not analyzable, and it covers the case where this code is 892 // being called from within backedge-taken count analysis, such that 893 // attempting to ask for the backedge-taken count would likely result 894 // in infinite recursion. In the later case, the analysis code will 895 // cope with a conservative value, and it will take care to purge 896 // that value once it has finished. 897 const SCEV *MaxBECount = getMaxBackedgeTakenCount(L); 898 if (!isa<SCEVCouldNotCompute>(MaxBECount)) { 899 // Manually compute the final value for AR, checking for 900 // overflow. 901 902 // Check whether the backedge-taken count can be losslessly casted to 903 // the addrec's type. The count is always unsigned. 904 const SCEV *CastedMaxBECount = 905 getTruncateOrZeroExtend(MaxBECount, Start->getType()); 906 const SCEV *RecastedMaxBECount = 907 getTruncateOrZeroExtend(CastedMaxBECount, MaxBECount->getType()); 908 if (MaxBECount == RecastedMaxBECount) { 909 const Type *WideTy = IntegerType::get(getContext(), BitWidth * 2); 910 // Check whether Start+Step*MaxBECount has no unsigned overflow. 911 const SCEV *ZMul = 912 getMulExpr(CastedMaxBECount, 913 getTruncateOrZeroExtend(Step, Start->getType())); 914 const SCEV *Add = getAddExpr(Start, ZMul); 915 const SCEV *OperandExtendedAdd = 916 getAddExpr(getZeroExtendExpr(Start, WideTy), 917 getMulExpr(getZeroExtendExpr(CastedMaxBECount, WideTy), 918 getZeroExtendExpr(Step, WideTy))); 919 if (getZeroExtendExpr(Add, WideTy) == OperandExtendedAdd) 920 // Return the expression with the addrec on the outside. 921 return getAddRecExpr(getZeroExtendExpr(Start, Ty), 922 getZeroExtendExpr(Step, Ty), 923 L); 924 925 // Similar to above, only this time treat the step value as signed. 926 // This covers loops that count down. 927 const SCEV *SMul = 928 getMulExpr(CastedMaxBECount, 929 getTruncateOrSignExtend(Step, Start->getType())); 930 Add = getAddExpr(Start, SMul); 931 OperandExtendedAdd = 932 getAddExpr(getZeroExtendExpr(Start, WideTy), 933 getMulExpr(getZeroExtendExpr(CastedMaxBECount, WideTy), 934 getSignExtendExpr(Step, WideTy))); 935 if (getZeroExtendExpr(Add, WideTy) == OperandExtendedAdd) 936 // Return the expression with the addrec on the outside. 937 return getAddRecExpr(getZeroExtendExpr(Start, Ty), 938 getSignExtendExpr(Step, Ty), 939 L); 940 } 941 942 // If the backedge is guarded by a comparison with the pre-inc value 943 // the addrec is safe. Also, if the entry is guarded by a comparison 944 // with the start value and the backedge is guarded by a comparison 945 // with the post-inc value, the addrec is safe. 946 if (isKnownPositive(Step)) { 947 const SCEV *N = getConstant(APInt::getMinValue(BitWidth) - 948 getUnsignedRange(Step).getUnsignedMax()); 949 if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_ULT, AR, N) || 950 (isLoopGuardedByCond(L, ICmpInst::ICMP_ULT, Start, N) && 951 isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_ULT, 952 AR->getPostIncExpr(*this), N))) 953 // Return the expression with the addrec on the outside. 954 return getAddRecExpr(getZeroExtendExpr(Start, Ty), 955 getZeroExtendExpr(Step, Ty), 956 L); 957 } else if (isKnownNegative(Step)) { 958 const SCEV *N = getConstant(APInt::getMaxValue(BitWidth) - 959 getSignedRange(Step).getSignedMin()); 960 if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_UGT, AR, N) && 961 (isLoopGuardedByCond(L, ICmpInst::ICMP_UGT, Start, N) || 962 isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_UGT, 963 AR->getPostIncExpr(*this), N))) 964 // Return the expression with the addrec on the outside. 965 return getAddRecExpr(getZeroExtendExpr(Start, Ty), 966 getSignExtendExpr(Step, Ty), 967 L); 968 } 969 } 970 } 971 972 // The cast wasn't folded; create an explicit cast node. 973 // Recompute the insert position, as it may have been invalidated. 974 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 975 SCEV *S = SCEVAllocator.Allocate<SCEVZeroExtendExpr>(); 976 new (S) SCEVZeroExtendExpr(ID, Op, Ty); 977 UniqueSCEVs.InsertNode(S, IP); 978 return S; 979 } 980 981 const SCEV *ScalarEvolution::getSignExtendExpr(const SCEV *Op, 982 const Type *Ty) { 983 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && 984 "This is not an extending conversion!"); 985 assert(isSCEVable(Ty) && 986 "This is not a conversion to a SCEVable type!"); 987 Ty = getEffectiveSCEVType(Ty); 988 989 // Fold if the operand is constant. 990 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) { 991 const Type *IntTy = getEffectiveSCEVType(Ty); 992 Constant *C = ConstantExpr::getSExt(SC->getValue(), IntTy); 993 if (IntTy != Ty) C = ConstantExpr::getIntToPtr(C, Ty); 994 return getConstant(cast<ConstantInt>(C)); 995 } 996 997 // sext(sext(x)) --> sext(x) 998 if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op)) 999 return getSignExtendExpr(SS->getOperand(), Ty); 1000 1001 // Before doing any expensive analysis, check to see if we've already 1002 // computed a SCEV for this Op and Ty. 1003 FoldingSetNodeID ID; 1004 ID.AddInteger(scSignExtend); 1005 ID.AddPointer(Op); 1006 ID.AddPointer(Ty); 1007 void *IP = 0; 1008 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 1009 1010 // If the input value is a chrec scev, and we can prove that the value 1011 // did not overflow the old, smaller, value, we can sign extend all of the 1012 // operands (often constants). This allows analysis of something like 1013 // this: for (signed char X = 0; X < 100; ++X) { int Y = X; } 1014 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) 1015 if (AR->isAffine()) { 1016 const SCEV *Start = AR->getStart(); 1017 const SCEV *Step = AR->getStepRecurrence(*this); 1018 unsigned BitWidth = getTypeSizeInBits(AR->getType()); 1019 const Loop *L = AR->getLoop(); 1020 1021 // If we have special knowledge that this addrec won't overflow, 1022 // we don't need to do any further analysis. 1023 if (AR->hasNoSignedWrap()) 1024 return getAddRecExpr(getSignExtendExpr(Start, Ty), 1025 getSignExtendExpr(Step, Ty), 1026 L); 1027 1028 // Check whether the backedge-taken count is SCEVCouldNotCompute. 1029 // Note that this serves two purposes: It filters out loops that are 1030 // simply not analyzable, and it covers the case where this code is 1031 // being called from within backedge-taken count analysis, such that 1032 // attempting to ask for the backedge-taken count would likely result 1033 // in infinite recursion. In the later case, the analysis code will 1034 // cope with a conservative value, and it will take care to purge 1035 // that value once it has finished. 1036 const SCEV *MaxBECount = getMaxBackedgeTakenCount(L); 1037 if (!isa<SCEVCouldNotCompute>(MaxBECount)) { 1038 // Manually compute the final value for AR, checking for 1039 // overflow. 1040 1041 // Check whether the backedge-taken count can be losslessly casted to 1042 // the addrec's type. The count is always unsigned. 1043 const SCEV *CastedMaxBECount = 1044 getTruncateOrZeroExtend(MaxBECount, Start->getType()); 1045 const SCEV *RecastedMaxBECount = 1046 getTruncateOrZeroExtend(CastedMaxBECount, MaxBECount->getType()); 1047 if (MaxBECount == RecastedMaxBECount) { 1048 const Type *WideTy = IntegerType::get(getContext(), BitWidth * 2); 1049 // Check whether Start+Step*MaxBECount has no signed overflow. 1050 const SCEV *SMul = 1051 getMulExpr(CastedMaxBECount, 1052 getTruncateOrSignExtend(Step, Start->getType())); 1053 const SCEV *Add = getAddExpr(Start, SMul); 1054 const SCEV *OperandExtendedAdd = 1055 getAddExpr(getSignExtendExpr(Start, WideTy), 1056 getMulExpr(getZeroExtendExpr(CastedMaxBECount, WideTy), 1057 getSignExtendExpr(Step, WideTy))); 1058 if (getSignExtendExpr(Add, WideTy) == OperandExtendedAdd) 1059 // Return the expression with the addrec on the outside. 1060 return getAddRecExpr(getSignExtendExpr(Start, Ty), 1061 getSignExtendExpr(Step, Ty), 1062 L); 1063 1064 // Similar to above, only this time treat the step value as unsigned. 1065 // This covers loops that count up with an unsigned step. 1066 const SCEV *UMul = 1067 getMulExpr(CastedMaxBECount, 1068 getTruncateOrZeroExtend(Step, Start->getType())); 1069 Add = getAddExpr(Start, UMul); 1070 OperandExtendedAdd = 1071 getAddExpr(getSignExtendExpr(Start, WideTy), 1072 getMulExpr(getZeroExtendExpr(CastedMaxBECount, WideTy), 1073 getZeroExtendExpr(Step, WideTy))); 1074 if (getSignExtendExpr(Add, WideTy) == OperandExtendedAdd) 1075 // Return the expression with the addrec on the outside. 1076 return getAddRecExpr(getSignExtendExpr(Start, Ty), 1077 getZeroExtendExpr(Step, Ty), 1078 L); 1079 } 1080 1081 // If the backedge is guarded by a comparison with the pre-inc value 1082 // the addrec is safe. Also, if the entry is guarded by a comparison 1083 // with the start value and the backedge is guarded by a comparison 1084 // with the post-inc value, the addrec is safe. 1085 if (isKnownPositive(Step)) { 1086 const SCEV *N = getConstant(APInt::getSignedMinValue(BitWidth) - 1087 getSignedRange(Step).getSignedMax()); 1088 if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_SLT, AR, N) || 1089 (isLoopGuardedByCond(L, ICmpInst::ICMP_SLT, Start, N) && 1090 isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_SLT, 1091 AR->getPostIncExpr(*this), N))) 1092 // Return the expression with the addrec on the outside. 1093 return getAddRecExpr(getSignExtendExpr(Start, Ty), 1094 getSignExtendExpr(Step, Ty), 1095 L); 1096 } else if (isKnownNegative(Step)) { 1097 const SCEV *N = getConstant(APInt::getSignedMaxValue(BitWidth) - 1098 getSignedRange(Step).getSignedMin()); 1099 if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_SGT, AR, N) || 1100 (isLoopGuardedByCond(L, ICmpInst::ICMP_SGT, Start, N) && 1101 isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_SGT, 1102 AR->getPostIncExpr(*this), N))) 1103 // Return the expression with the addrec on the outside. 1104 return getAddRecExpr(getSignExtendExpr(Start, Ty), 1105 getSignExtendExpr(Step, Ty), 1106 L); 1107 } 1108 } 1109 } 1110 1111 // The cast wasn't folded; create an explicit cast node. 1112 // Recompute the insert position, as it may have been invalidated. 1113 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 1114 SCEV *S = SCEVAllocator.Allocate<SCEVSignExtendExpr>(); 1115 new (S) SCEVSignExtendExpr(ID, Op, Ty); 1116 UniqueSCEVs.InsertNode(S, IP); 1117 return S; 1118 } 1119 1120 /// getAnyExtendExpr - Return a SCEV for the given operand extended with 1121 /// unspecified bits out to the given type. 1122 /// 1123 const SCEV *ScalarEvolution::getAnyExtendExpr(const SCEV *Op, 1124 const Type *Ty) { 1125 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && 1126 "This is not an extending conversion!"); 1127 assert(isSCEVable(Ty) && 1128 "This is not a conversion to a SCEVable type!"); 1129 Ty = getEffectiveSCEVType(Ty); 1130 1131 // Sign-extend negative constants. 1132 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) 1133 if (SC->getValue()->getValue().isNegative()) 1134 return getSignExtendExpr(Op, Ty); 1135 1136 // Peel off a truncate cast. 1137 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Op)) { 1138 const SCEV *NewOp = T->getOperand(); 1139 if (getTypeSizeInBits(NewOp->getType()) < getTypeSizeInBits(Ty)) 1140 return getAnyExtendExpr(NewOp, Ty); 1141 return getTruncateOrNoop(NewOp, Ty); 1142 } 1143 1144 // Next try a zext cast. If the cast is folded, use it. 1145 const SCEV *ZExt = getZeroExtendExpr(Op, Ty); 1146 if (!isa<SCEVZeroExtendExpr>(ZExt)) 1147 return ZExt; 1148 1149 // Next try a sext cast. If the cast is folded, use it. 1150 const SCEV *SExt = getSignExtendExpr(Op, Ty); 1151 if (!isa<SCEVSignExtendExpr>(SExt)) 1152 return SExt; 1153 1154 // Force the cast to be folded into the operands of an addrec. 1155 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) { 1156 SmallVector<const SCEV *, 4> Ops; 1157 for (SCEVAddRecExpr::op_iterator I = AR->op_begin(), E = AR->op_end(); 1158 I != E; ++I) 1159 Ops.push_back(getAnyExtendExpr(*I, Ty)); 1160 return getAddRecExpr(Ops, AR->getLoop()); 1161 } 1162 1163 // If the expression is obviously signed, use the sext cast value. 1164 if (isa<SCEVSMaxExpr>(Op)) 1165 return SExt; 1166 1167 // Absent any other information, use the zext cast value. 1168 return ZExt; 1169 } 1170 1171 /// CollectAddOperandsWithScales - Process the given Ops list, which is 1172 /// a list of operands to be added under the given scale, update the given 1173 /// map. This is a helper function for getAddRecExpr. As an example of 1174 /// what it does, given a sequence of operands that would form an add 1175 /// expression like this: 1176 /// 1177 /// m + n + 13 + (A * (o + p + (B * q + m + 29))) + r + (-1 * r) 1178 /// 1179 /// where A and B are constants, update the map with these values: 1180 /// 1181 /// (m, 1+A*B), (n, 1), (o, A), (p, A), (q, A*B), (r, 0) 1182 /// 1183 /// and add 13 + A*B*29 to AccumulatedConstant. 1184 /// This will allow getAddRecExpr to produce this: 1185 /// 1186 /// 13+A*B*29 + n + (m * (1+A*B)) + ((o + p) * A) + (q * A*B) 1187 /// 1188 /// This form often exposes folding opportunities that are hidden in 1189 /// the original operand list. 1190 /// 1191 /// Return true iff it appears that any interesting folding opportunities 1192 /// may be exposed. This helps getAddRecExpr short-circuit extra work in 1193 /// the common case where no interesting opportunities are present, and 1194 /// is also used as a check to avoid infinite recursion. 1195 /// 1196 static bool 1197 CollectAddOperandsWithScales(DenseMap<const SCEV *, APInt> &M, 1198 SmallVector<const SCEV *, 8> &NewOps, 1199 APInt &AccumulatedConstant, 1200 const SmallVectorImpl<const SCEV *> &Ops, 1201 const APInt &Scale, 1202 ScalarEvolution &SE) { 1203 bool Interesting = false; 1204 1205 // Iterate over the add operands. 1206 for (unsigned i = 0, e = Ops.size(); i != e; ++i) { 1207 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[i]); 1208 if (Mul && isa<SCEVConstant>(Mul->getOperand(0))) { 1209 APInt NewScale = 1210 Scale * cast<SCEVConstant>(Mul->getOperand(0))->getValue()->getValue(); 1211 if (Mul->getNumOperands() == 2 && isa<SCEVAddExpr>(Mul->getOperand(1))) { 1212 // A multiplication of a constant with another add; recurse. 1213 Interesting |= 1214 CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant, 1215 cast<SCEVAddExpr>(Mul->getOperand(1)) 1216 ->getOperands(), 1217 NewScale, SE); 1218 } else { 1219 // A multiplication of a constant with some other value. Update 1220 // the map. 1221 SmallVector<const SCEV *, 4> MulOps(Mul->op_begin()+1, Mul->op_end()); 1222 const SCEV *Key = SE.getMulExpr(MulOps); 1223 std::pair<DenseMap<const SCEV *, APInt>::iterator, bool> Pair = 1224 M.insert(std::make_pair(Key, NewScale)); 1225 if (Pair.second) { 1226 NewOps.push_back(Pair.first->first); 1227 } else { 1228 Pair.first->second += NewScale; 1229 // The map already had an entry for this value, which may indicate 1230 // a folding opportunity. 1231 Interesting = true; 1232 } 1233 } 1234 } else if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) { 1235 // Pull a buried constant out to the outside. 1236 if (Scale != 1 || AccumulatedConstant != 0 || C->isZero()) 1237 Interesting = true; 1238 AccumulatedConstant += Scale * C->getValue()->getValue(); 1239 } else { 1240 // An ordinary operand. Update the map. 1241 std::pair<DenseMap<const SCEV *, APInt>::iterator, bool> Pair = 1242 M.insert(std::make_pair(Ops[i], Scale)); 1243 if (Pair.second) { 1244 NewOps.push_back(Pair.first->first); 1245 } else { 1246 Pair.first->second += Scale; 1247 // The map already had an entry for this value, which may indicate 1248 // a folding opportunity. 1249 Interesting = true; 1250 } 1251 } 1252 } 1253 1254 return Interesting; 1255 } 1256 1257 namespace { 1258 struct APIntCompare { 1259 bool operator()(const APInt &LHS, const APInt &RHS) const { 1260 return LHS.ult(RHS); 1261 } 1262 }; 1263 } 1264 1265 /// getAddExpr - Get a canonical add expression, or something simpler if 1266 /// possible. 1267 const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops, 1268 bool HasNUW, bool HasNSW) { 1269 assert(!Ops.empty() && "Cannot get empty add!"); 1270 if (Ops.size() == 1) return Ops[0]; 1271 #ifndef NDEBUG 1272 for (unsigned i = 1, e = Ops.size(); i != e; ++i) 1273 assert(getEffectiveSCEVType(Ops[i]->getType()) == 1274 getEffectiveSCEVType(Ops[0]->getType()) && 1275 "SCEVAddExpr operand types don't match!"); 1276 #endif 1277 1278 // If HasNSW is true and all the operands are non-negative, infer HasNUW. 1279 if (!HasNUW && HasNSW) { 1280 bool All = true; 1281 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 1282 if (!isKnownNonNegative(Ops[i])) { 1283 All = false; 1284 break; 1285 } 1286 if (All) HasNUW = true; 1287 } 1288 1289 // Sort by complexity, this groups all similar expression types together. 1290 GroupByComplexity(Ops, LI); 1291 1292 // If there are any constants, fold them together. 1293 unsigned Idx = 0; 1294 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { 1295 ++Idx; 1296 assert(Idx < Ops.size()); 1297 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { 1298 // We found two constants, fold them together! 1299 Ops[0] = getConstant(LHSC->getValue()->getValue() + 1300 RHSC->getValue()->getValue()); 1301 if (Ops.size() == 2) return Ops[0]; 1302 Ops.erase(Ops.begin()+1); // Erase the folded element 1303 LHSC = cast<SCEVConstant>(Ops[0]); 1304 } 1305 1306 // If we are left with a constant zero being added, strip it off. 1307 if (cast<SCEVConstant>(Ops[0])->getValue()->isZero()) { 1308 Ops.erase(Ops.begin()); 1309 --Idx; 1310 } 1311 } 1312 1313 if (Ops.size() == 1) return Ops[0]; 1314 1315 // Okay, check to see if the same value occurs in the operand list twice. If 1316 // so, merge them together into an multiply expression. Since we sorted the 1317 // list, these values are required to be adjacent. 1318 const Type *Ty = Ops[0]->getType(); 1319 for (unsigned i = 0, e = Ops.size()-1; i != e; ++i) 1320 if (Ops[i] == Ops[i+1]) { // X + Y + Y --> X + Y*2 1321 // Found a match, merge the two values into a multiply, and add any 1322 // remaining values to the result. 1323 const SCEV *Two = getIntegerSCEV(2, Ty); 1324 const SCEV *Mul = getMulExpr(Ops[i], Two); 1325 if (Ops.size() == 2) 1326 return Mul; 1327 Ops.erase(Ops.begin()+i, Ops.begin()+i+2); 1328 Ops.push_back(Mul); 1329 return getAddExpr(Ops, HasNUW, HasNSW); 1330 } 1331 1332 // Check for truncates. If all the operands are truncated from the same 1333 // type, see if factoring out the truncate would permit the result to be 1334 // folded. eg., trunc(x) + m*trunc(n) --> trunc(x + trunc(m)*n) 1335 // if the contents of the resulting outer trunc fold to something simple. 1336 for (; Idx < Ops.size() && isa<SCEVTruncateExpr>(Ops[Idx]); ++Idx) { 1337 const SCEVTruncateExpr *Trunc = cast<SCEVTruncateExpr>(Ops[Idx]); 1338 const Type *DstType = Trunc->getType(); 1339 const Type *SrcType = Trunc->getOperand()->getType(); 1340 SmallVector<const SCEV *, 8> LargeOps; 1341 bool Ok = true; 1342 // Check all the operands to see if they can be represented in the 1343 // source type of the truncate. 1344 for (unsigned i = 0, e = Ops.size(); i != e; ++i) { 1345 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Ops[i])) { 1346 if (T->getOperand()->getType() != SrcType) { 1347 Ok = false; 1348 break; 1349 } 1350 LargeOps.push_back(T->getOperand()); 1351 } else if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) { 1352 // This could be either sign or zero extension, but sign extension 1353 // is much more likely to be foldable here. 1354 LargeOps.push_back(getSignExtendExpr(C, SrcType)); 1355 } else if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(Ops[i])) { 1356 SmallVector<const SCEV *, 8> LargeMulOps; 1357 for (unsigned j = 0, f = M->getNumOperands(); j != f && Ok; ++j) { 1358 if (const SCEVTruncateExpr *T = 1359 dyn_cast<SCEVTruncateExpr>(M->getOperand(j))) { 1360 if (T->getOperand()->getType() != SrcType) { 1361 Ok = false; 1362 break; 1363 } 1364 LargeMulOps.push_back(T->getOperand()); 1365 } else if (const SCEVConstant *C = 1366 dyn_cast<SCEVConstant>(M->getOperand(j))) { 1367 // This could be either sign or zero extension, but sign extension 1368 // is much more likely to be foldable here. 1369 LargeMulOps.push_back(getSignExtendExpr(C, SrcType)); 1370 } else { 1371 Ok = false; 1372 break; 1373 } 1374 } 1375 if (Ok) 1376 LargeOps.push_back(getMulExpr(LargeMulOps)); 1377 } else { 1378 Ok = false; 1379 break; 1380 } 1381 } 1382 if (Ok) { 1383 // Evaluate the expression in the larger type. 1384 const SCEV *Fold = getAddExpr(LargeOps, HasNUW, HasNSW); 1385 // If it folds to something simple, use it. Otherwise, don't. 1386 if (isa<SCEVConstant>(Fold) || isa<SCEVUnknown>(Fold)) 1387 return getTruncateExpr(Fold, DstType); 1388 } 1389 } 1390 1391 // Skip past any other cast SCEVs. 1392 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddExpr) 1393 ++Idx; 1394 1395 // If there are add operands they would be next. 1396 if (Idx < Ops.size()) { 1397 bool DeletedAdd = false; 1398 while (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[Idx])) { 1399 // If we have an add, expand the add operands onto the end of the operands 1400 // list. 1401 Ops.insert(Ops.end(), Add->op_begin(), Add->op_end()); 1402 Ops.erase(Ops.begin()+Idx); 1403 DeletedAdd = true; 1404 } 1405 1406 // If we deleted at least one add, we added operands to the end of the list, 1407 // and they are not necessarily sorted. Recurse to resort and resimplify 1408 // any operands we just aquired. 1409 if (DeletedAdd) 1410 return getAddExpr(Ops); 1411 } 1412 1413 // Skip over the add expression until we get to a multiply. 1414 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr) 1415 ++Idx; 1416 1417 // Check to see if there are any folding opportunities present with 1418 // operands multiplied by constant values. 1419 if (Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx])) { 1420 uint64_t BitWidth = getTypeSizeInBits(Ty); 1421 DenseMap<const SCEV *, APInt> M; 1422 SmallVector<const SCEV *, 8> NewOps; 1423 APInt AccumulatedConstant(BitWidth, 0); 1424 if (CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant, 1425 Ops, APInt(BitWidth, 1), *this)) { 1426 // Some interesting folding opportunity is present, so its worthwhile to 1427 // re-generate the operands list. Group the operands by constant scale, 1428 // to avoid multiplying by the same constant scale multiple times. 1429 std::map<APInt, SmallVector<const SCEV *, 4>, APIntCompare> MulOpLists; 1430 for (SmallVector<const SCEV *, 8>::iterator I = NewOps.begin(), 1431 E = NewOps.end(); I != E; ++I) 1432 MulOpLists[M.find(*I)->second].push_back(*I); 1433 // Re-generate the operands list. 1434 Ops.clear(); 1435 if (AccumulatedConstant != 0) 1436 Ops.push_back(getConstant(AccumulatedConstant)); 1437 for (std::map<APInt, SmallVector<const SCEV *, 4>, APIntCompare>::iterator 1438 I = MulOpLists.begin(), E = MulOpLists.end(); I != E; ++I) 1439 if (I->first != 0) 1440 Ops.push_back(getMulExpr(getConstant(I->first), 1441 getAddExpr(I->second))); 1442 if (Ops.empty()) 1443 return getIntegerSCEV(0, Ty); 1444 if (Ops.size() == 1) 1445 return Ops[0]; 1446 return getAddExpr(Ops); 1447 } 1448 } 1449 1450 // If we are adding something to a multiply expression, make sure the 1451 // something is not already an operand of the multiply. If so, merge it into 1452 // the multiply. 1453 for (; Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx]); ++Idx) { 1454 const SCEVMulExpr *Mul = cast<SCEVMulExpr>(Ops[Idx]); 1455 for (unsigned MulOp = 0, e = Mul->getNumOperands(); MulOp != e; ++MulOp) { 1456 const SCEV *MulOpSCEV = Mul->getOperand(MulOp); 1457 for (unsigned AddOp = 0, e = Ops.size(); AddOp != e; ++AddOp) 1458 if (MulOpSCEV == Ops[AddOp] && !isa<SCEVConstant>(Ops[AddOp])) { 1459 // Fold W + X + (X * Y * Z) --> W + (X * ((Y*Z)+1)) 1460 const SCEV *InnerMul = Mul->getOperand(MulOp == 0); 1461 if (Mul->getNumOperands() != 2) { 1462 // If the multiply has more than two operands, we must get the 1463 // Y*Z term. 1464 SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(), Mul->op_end()); 1465 MulOps.erase(MulOps.begin()+MulOp); 1466 InnerMul = getMulExpr(MulOps); 1467 } 1468 const SCEV *One = getIntegerSCEV(1, Ty); 1469 const SCEV *AddOne = getAddExpr(InnerMul, One); 1470 const SCEV *OuterMul = getMulExpr(AddOne, Ops[AddOp]); 1471 if (Ops.size() == 2) return OuterMul; 1472 if (AddOp < Idx) { 1473 Ops.erase(Ops.begin()+AddOp); 1474 Ops.erase(Ops.begin()+Idx-1); 1475 } else { 1476 Ops.erase(Ops.begin()+Idx); 1477 Ops.erase(Ops.begin()+AddOp-1); 1478 } 1479 Ops.push_back(OuterMul); 1480 return getAddExpr(Ops); 1481 } 1482 1483 // Check this multiply against other multiplies being added together. 1484 for (unsigned OtherMulIdx = Idx+1; 1485 OtherMulIdx < Ops.size() && isa<SCEVMulExpr>(Ops[OtherMulIdx]); 1486 ++OtherMulIdx) { 1487 const SCEVMulExpr *OtherMul = cast<SCEVMulExpr>(Ops[OtherMulIdx]); 1488 // If MulOp occurs in OtherMul, we can fold the two multiplies 1489 // together. 1490 for (unsigned OMulOp = 0, e = OtherMul->getNumOperands(); 1491 OMulOp != e; ++OMulOp) 1492 if (OtherMul->getOperand(OMulOp) == MulOpSCEV) { 1493 // Fold X + (A*B*C) + (A*D*E) --> X + (A*(B*C+D*E)) 1494 const SCEV *InnerMul1 = Mul->getOperand(MulOp == 0); 1495 if (Mul->getNumOperands() != 2) { 1496 SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(), 1497 Mul->op_end()); 1498 MulOps.erase(MulOps.begin()+MulOp); 1499 InnerMul1 = getMulExpr(MulOps); 1500 } 1501 const SCEV *InnerMul2 = OtherMul->getOperand(OMulOp == 0); 1502 if (OtherMul->getNumOperands() != 2) { 1503 SmallVector<const SCEV *, 4> MulOps(OtherMul->op_begin(), 1504 OtherMul->op_end()); 1505 MulOps.erase(MulOps.begin()+OMulOp); 1506 InnerMul2 = getMulExpr(MulOps); 1507 } 1508 const SCEV *InnerMulSum = getAddExpr(InnerMul1,InnerMul2); 1509 const SCEV *OuterMul = getMulExpr(MulOpSCEV, InnerMulSum); 1510 if (Ops.size() == 2) return OuterMul; 1511 Ops.erase(Ops.begin()+Idx); 1512 Ops.erase(Ops.begin()+OtherMulIdx-1); 1513 Ops.push_back(OuterMul); 1514 return getAddExpr(Ops); 1515 } 1516 } 1517 } 1518 } 1519 1520 // If there are any add recurrences in the operands list, see if any other 1521 // added values are loop invariant. If so, we can fold them into the 1522 // recurrence. 1523 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr) 1524 ++Idx; 1525 1526 // Scan over all recurrences, trying to fold loop invariants into them. 1527 for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) { 1528 // Scan all of the other operands to this add and add them to the vector if 1529 // they are loop invariant w.r.t. the recurrence. 1530 SmallVector<const SCEV *, 8> LIOps; 1531 const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]); 1532 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 1533 if (Ops[i]->isLoopInvariant(AddRec->getLoop())) { 1534 LIOps.push_back(Ops[i]); 1535 Ops.erase(Ops.begin()+i); 1536 --i; --e; 1537 } 1538 1539 // If we found some loop invariants, fold them into the recurrence. 1540 if (!LIOps.empty()) { 1541 // NLI + LI + {Start,+,Step} --> NLI + {LI+Start,+,Step} 1542 LIOps.push_back(AddRec->getStart()); 1543 1544 SmallVector<const SCEV *, 4> AddRecOps(AddRec->op_begin(), 1545 AddRec->op_end()); 1546 AddRecOps[0] = getAddExpr(LIOps); 1547 1548 // It's tempting to propagate NUW/NSW flags here, but nuw/nsw addition 1549 // is not associative so this isn't necessarily safe. 1550 const SCEV *NewRec = getAddRecExpr(AddRecOps, AddRec->getLoop()); 1551 1552 // If all of the other operands were loop invariant, we are done. 1553 if (Ops.size() == 1) return NewRec; 1554 1555 // Otherwise, add the folded AddRec by the non-liv parts. 1556 for (unsigned i = 0;; ++i) 1557 if (Ops[i] == AddRec) { 1558 Ops[i] = NewRec; 1559 break; 1560 } 1561 return getAddExpr(Ops); 1562 } 1563 1564 // Okay, if there weren't any loop invariants to be folded, check to see if 1565 // there are multiple AddRec's with the same loop induction variable being 1566 // added together. If so, we can fold them. 1567 for (unsigned OtherIdx = Idx+1; 1568 OtherIdx < Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);++OtherIdx) 1569 if (OtherIdx != Idx) { 1570 const SCEVAddRecExpr *OtherAddRec = cast<SCEVAddRecExpr>(Ops[OtherIdx]); 1571 if (AddRec->getLoop() == OtherAddRec->getLoop()) { 1572 // Other + {A,+,B} + {C,+,D} --> Other + {A+C,+,B+D} 1573 SmallVector<const SCEV *, 4> NewOps(AddRec->op_begin(), 1574 AddRec->op_end()); 1575 for (unsigned i = 0, e = OtherAddRec->getNumOperands(); i != e; ++i) { 1576 if (i >= NewOps.size()) { 1577 NewOps.insert(NewOps.end(), OtherAddRec->op_begin()+i, 1578 OtherAddRec->op_end()); 1579 break; 1580 } 1581 NewOps[i] = getAddExpr(NewOps[i], OtherAddRec->getOperand(i)); 1582 } 1583 const SCEV *NewAddRec = getAddRecExpr(NewOps, AddRec->getLoop()); 1584 1585 if (Ops.size() == 2) return NewAddRec; 1586 1587 Ops.erase(Ops.begin()+Idx); 1588 Ops.erase(Ops.begin()+OtherIdx-1); 1589 Ops.push_back(NewAddRec); 1590 return getAddExpr(Ops); 1591 } 1592 } 1593 1594 // Otherwise couldn't fold anything into this recurrence. Move onto the 1595 // next one. 1596 } 1597 1598 // Okay, it looks like we really DO need an add expr. Check to see if we 1599 // already have one, otherwise create a new one. 1600 FoldingSetNodeID ID; 1601 ID.AddInteger(scAddExpr); 1602 ID.AddInteger(Ops.size()); 1603 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 1604 ID.AddPointer(Ops[i]); 1605 void *IP = 0; 1606 SCEVAddExpr *S = 1607 static_cast<SCEVAddExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); 1608 if (!S) { 1609 S = SCEVAllocator.Allocate<SCEVAddExpr>(); 1610 new (S) SCEVAddExpr(ID, Ops); 1611 UniqueSCEVs.InsertNode(S, IP); 1612 } 1613 if (HasNUW) S->setHasNoUnsignedWrap(true); 1614 if (HasNSW) S->setHasNoSignedWrap(true); 1615 return S; 1616 } 1617 1618 /// getMulExpr - Get a canonical multiply expression, or something simpler if 1619 /// possible. 1620 const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops, 1621 bool HasNUW, bool HasNSW) { 1622 assert(!Ops.empty() && "Cannot get empty mul!"); 1623 if (Ops.size() == 1) return Ops[0]; 1624 #ifndef NDEBUG 1625 for (unsigned i = 1, e = Ops.size(); i != e; ++i) 1626 assert(getEffectiveSCEVType(Ops[i]->getType()) == 1627 getEffectiveSCEVType(Ops[0]->getType()) && 1628 "SCEVMulExpr operand types don't match!"); 1629 #endif 1630 1631 // If HasNSW is true and all the operands are non-negative, infer HasNUW. 1632 if (!HasNUW && HasNSW) { 1633 bool All = true; 1634 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 1635 if (!isKnownNonNegative(Ops[i])) { 1636 All = false; 1637 break; 1638 } 1639 if (All) HasNUW = true; 1640 } 1641 1642 // Sort by complexity, this groups all similar expression types together. 1643 GroupByComplexity(Ops, LI); 1644 1645 // If there are any constants, fold them together. 1646 unsigned Idx = 0; 1647 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { 1648 1649 // C1*(C2+V) -> C1*C2 + C1*V 1650 if (Ops.size() == 2) 1651 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1])) 1652 if (Add->getNumOperands() == 2 && 1653 isa<SCEVConstant>(Add->getOperand(0))) 1654 return getAddExpr(getMulExpr(LHSC, Add->getOperand(0)), 1655 getMulExpr(LHSC, Add->getOperand(1))); 1656 1657 ++Idx; 1658 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { 1659 // We found two constants, fold them together! 1660 ConstantInt *Fold = ConstantInt::get(getContext(), 1661 LHSC->getValue()->getValue() * 1662 RHSC->getValue()->getValue()); 1663 Ops[0] = getConstant(Fold); 1664 Ops.erase(Ops.begin()+1); // Erase the folded element 1665 if (Ops.size() == 1) return Ops[0]; 1666 LHSC = cast<SCEVConstant>(Ops[0]); 1667 } 1668 1669 // If we are left with a constant one being multiplied, strip it off. 1670 if (cast<SCEVConstant>(Ops[0])->getValue()->equalsInt(1)) { 1671 Ops.erase(Ops.begin()); 1672 --Idx; 1673 } else if (cast<SCEVConstant>(Ops[0])->getValue()->isZero()) { 1674 // If we have a multiply of zero, it will always be zero. 1675 return Ops[0]; 1676 } else if (Ops[0]->isAllOnesValue()) { 1677 // If we have a mul by -1 of an add, try distributing the -1 among the 1678 // add operands. 1679 if (Ops.size() == 2) 1680 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1])) { 1681 SmallVector<const SCEV *, 4> NewOps; 1682 bool AnyFolded = false; 1683 for (SCEVAddRecExpr::op_iterator I = Add->op_begin(), E = Add->op_end(); 1684 I != E; ++I) { 1685 const SCEV *Mul = getMulExpr(Ops[0], *I); 1686 if (!isa<SCEVMulExpr>(Mul)) AnyFolded = true; 1687 NewOps.push_back(Mul); 1688 } 1689 if (AnyFolded) 1690 return getAddExpr(NewOps); 1691 } 1692 } 1693 } 1694 1695 // Skip over the add expression until we get to a multiply. 1696 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr) 1697 ++Idx; 1698 1699 if (Ops.size() == 1) 1700 return Ops[0]; 1701 1702 // If there are mul operands inline them all into this expression. 1703 if (Idx < Ops.size()) { 1704 bool DeletedMul = false; 1705 while (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[Idx])) { 1706 // If we have an mul, expand the mul operands onto the end of the operands 1707 // list. 1708 Ops.insert(Ops.end(), Mul->op_begin(), Mul->op_end()); 1709 Ops.erase(Ops.begin()+Idx); 1710 DeletedMul = true; 1711 } 1712 1713 // If we deleted at least one mul, we added operands to the end of the list, 1714 // and they are not necessarily sorted. Recurse to resort and resimplify 1715 // any operands we just aquired. 1716 if (DeletedMul) 1717 return getMulExpr(Ops); 1718 } 1719 1720 // If there are any add recurrences in the operands list, see if any other 1721 // added values are loop invariant. If so, we can fold them into the 1722 // recurrence. 1723 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr) 1724 ++Idx; 1725 1726 // Scan over all recurrences, trying to fold loop invariants into them. 1727 for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) { 1728 // Scan all of the other operands to this mul and add them to the vector if 1729 // they are loop invariant w.r.t. the recurrence. 1730 SmallVector<const SCEV *, 8> LIOps; 1731 const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]); 1732 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 1733 if (Ops[i]->isLoopInvariant(AddRec->getLoop())) { 1734 LIOps.push_back(Ops[i]); 1735 Ops.erase(Ops.begin()+i); 1736 --i; --e; 1737 } 1738 1739 // If we found some loop invariants, fold them into the recurrence. 1740 if (!LIOps.empty()) { 1741 // NLI * LI * {Start,+,Step} --> NLI * {LI*Start,+,LI*Step} 1742 SmallVector<const SCEV *, 4> NewOps; 1743 NewOps.reserve(AddRec->getNumOperands()); 1744 if (LIOps.size() == 1) { 1745 const SCEV *Scale = LIOps[0]; 1746 for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) 1747 NewOps.push_back(getMulExpr(Scale, AddRec->getOperand(i))); 1748 } else { 1749 for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) { 1750 SmallVector<const SCEV *, 4> MulOps(LIOps.begin(), LIOps.end()); 1751 MulOps.push_back(AddRec->getOperand(i)); 1752 NewOps.push_back(getMulExpr(MulOps)); 1753 } 1754 } 1755 1756 // It's tempting to propagate the NSW flag here, but nsw multiplication 1757 // is not associative so this isn't necessarily safe. 1758 const SCEV *NewRec = getAddRecExpr(NewOps, AddRec->getLoop(), 1759 HasNUW && AddRec->hasNoUnsignedWrap(), 1760 /*HasNSW=*/false); 1761 1762 // If all of the other operands were loop invariant, we are done. 1763 if (Ops.size() == 1) return NewRec; 1764 1765 // Otherwise, multiply the folded AddRec by the non-liv parts. 1766 for (unsigned i = 0;; ++i) 1767 if (Ops[i] == AddRec) { 1768 Ops[i] = NewRec; 1769 break; 1770 } 1771 return getMulExpr(Ops); 1772 } 1773 1774 // Okay, if there weren't any loop invariants to be folded, check to see if 1775 // there are multiple AddRec's with the same loop induction variable being 1776 // multiplied together. If so, we can fold them. 1777 for (unsigned OtherIdx = Idx+1; 1778 OtherIdx < Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);++OtherIdx) 1779 if (OtherIdx != Idx) { 1780 const SCEVAddRecExpr *OtherAddRec = cast<SCEVAddRecExpr>(Ops[OtherIdx]); 1781 if (AddRec->getLoop() == OtherAddRec->getLoop()) { 1782 // F * G --> {A,+,B} * {C,+,D} --> {A*C,+,F*D + G*B + B*D} 1783 const SCEVAddRecExpr *F = AddRec, *G = OtherAddRec; 1784 const SCEV *NewStart = getMulExpr(F->getStart(), 1785 G->getStart()); 1786 const SCEV *B = F->getStepRecurrence(*this); 1787 const SCEV *D = G->getStepRecurrence(*this); 1788 const SCEV *NewStep = getAddExpr(getMulExpr(F, D), 1789 getMulExpr(G, B), 1790 getMulExpr(B, D)); 1791 const SCEV *NewAddRec = getAddRecExpr(NewStart, NewStep, 1792 F->getLoop()); 1793 if (Ops.size() == 2) return NewAddRec; 1794 1795 Ops.erase(Ops.begin()+Idx); 1796 Ops.erase(Ops.begin()+OtherIdx-1); 1797 Ops.push_back(NewAddRec); 1798 return getMulExpr(Ops); 1799 } 1800 } 1801 1802 // Otherwise couldn't fold anything into this recurrence. Move onto the 1803 // next one. 1804 } 1805 1806 // Okay, it looks like we really DO need an mul expr. Check to see if we 1807 // already have one, otherwise create a new one. 1808 FoldingSetNodeID ID; 1809 ID.AddInteger(scMulExpr); 1810 ID.AddInteger(Ops.size()); 1811 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 1812 ID.AddPointer(Ops[i]); 1813 void *IP = 0; 1814 SCEVMulExpr *S = 1815 static_cast<SCEVMulExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); 1816 if (!S) { 1817 S = SCEVAllocator.Allocate<SCEVMulExpr>(); 1818 new (S) SCEVMulExpr(ID, Ops); 1819 UniqueSCEVs.InsertNode(S, IP); 1820 } 1821 if (HasNUW) S->setHasNoUnsignedWrap(true); 1822 if (HasNSW) S->setHasNoSignedWrap(true); 1823 return S; 1824 } 1825 1826 /// getUDivExpr - Get a canonical unsigned division expression, or something 1827 /// simpler if possible. 1828 const SCEV *ScalarEvolution::getUDivExpr(const SCEV *LHS, 1829 const SCEV *RHS) { 1830 assert(getEffectiveSCEVType(LHS->getType()) == 1831 getEffectiveSCEVType(RHS->getType()) && 1832 "SCEVUDivExpr operand types don't match!"); 1833 1834 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) { 1835 if (RHSC->getValue()->equalsInt(1)) 1836 return LHS; // X udiv 1 --> x 1837 if (RHSC->isZero()) 1838 return getIntegerSCEV(0, LHS->getType()); // value is undefined 1839 1840 // Determine if the division can be folded into the operands of 1841 // its operands. 1842 // TODO: Generalize this to non-constants by using known-bits information. 1843 const Type *Ty = LHS->getType(); 1844 unsigned LZ = RHSC->getValue()->getValue().countLeadingZeros(); 1845 unsigned MaxShiftAmt = getTypeSizeInBits(Ty) - LZ; 1846 // For non-power-of-two values, effectively round the value up to the 1847 // nearest power of two. 1848 if (!RHSC->getValue()->getValue().isPowerOf2()) 1849 ++MaxShiftAmt; 1850 const IntegerType *ExtTy = 1851 IntegerType::get(getContext(), getTypeSizeInBits(Ty) + MaxShiftAmt); 1852 // {X,+,N}/C --> {X/C,+,N/C} if safe and N/C can be folded. 1853 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(LHS)) 1854 if (const SCEVConstant *Step = 1855 dyn_cast<SCEVConstant>(AR->getStepRecurrence(*this))) 1856 if (!Step->getValue()->getValue() 1857 .urem(RHSC->getValue()->getValue()) && 1858 getZeroExtendExpr(AR, ExtTy) == 1859 getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy), 1860 getZeroExtendExpr(Step, ExtTy), 1861 AR->getLoop())) { 1862 SmallVector<const SCEV *, 4> Operands; 1863 for (unsigned i = 0, e = AR->getNumOperands(); i != e; ++i) 1864 Operands.push_back(getUDivExpr(AR->getOperand(i), RHS)); 1865 return getAddRecExpr(Operands, AR->getLoop()); 1866 } 1867 // (A*B)/C --> A*(B/C) if safe and B/C can be folded. 1868 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(LHS)) { 1869 SmallVector<const SCEV *, 4> Operands; 1870 for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) 1871 Operands.push_back(getZeroExtendExpr(M->getOperand(i), ExtTy)); 1872 if (getZeroExtendExpr(M, ExtTy) == getMulExpr(Operands)) 1873 // Find an operand that's safely divisible. 1874 for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) { 1875 const SCEV *Op = M->getOperand(i); 1876 const SCEV *Div = getUDivExpr(Op, RHSC); 1877 if (!isa<SCEVUDivExpr>(Div) && getMulExpr(Div, RHSC) == Op) { 1878 const SmallVectorImpl<const SCEV *> &MOperands = M->getOperands(); 1879 Operands = SmallVector<const SCEV *, 4>(MOperands.begin(), 1880 MOperands.end()); 1881 Operands[i] = Div; 1882 return getMulExpr(Operands); 1883 } 1884 } 1885 } 1886 // (A+B)/C --> (A/C + B/C) if safe and A/C and B/C can be folded. 1887 if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(LHS)) { 1888 SmallVector<const SCEV *, 4> Operands; 1889 for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i) 1890 Operands.push_back(getZeroExtendExpr(A->getOperand(i), ExtTy)); 1891 if (getZeroExtendExpr(A, ExtTy) == getAddExpr(Operands)) { 1892 Operands.clear(); 1893 for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i) { 1894 const SCEV *Op = getUDivExpr(A->getOperand(i), RHS); 1895 if (isa<SCEVUDivExpr>(Op) || getMulExpr(Op, RHS) != A->getOperand(i)) 1896 break; 1897 Operands.push_back(Op); 1898 } 1899 if (Operands.size() == A->getNumOperands()) 1900 return getAddExpr(Operands); 1901 } 1902 } 1903 1904 // Fold if both operands are constant. 1905 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) { 1906 Constant *LHSCV = LHSC->getValue(); 1907 Constant *RHSCV = RHSC->getValue(); 1908 return getConstant(cast<ConstantInt>(ConstantExpr::getUDiv(LHSCV, 1909 RHSCV))); 1910 } 1911 } 1912 1913 FoldingSetNodeID ID; 1914 ID.AddInteger(scUDivExpr); 1915 ID.AddPointer(LHS); 1916 ID.AddPointer(RHS); 1917 void *IP = 0; 1918 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 1919 SCEV *S = SCEVAllocator.Allocate<SCEVUDivExpr>(); 1920 new (S) SCEVUDivExpr(ID, LHS, RHS); 1921 UniqueSCEVs.InsertNode(S, IP); 1922 return S; 1923 } 1924 1925 1926 /// getAddRecExpr - Get an add recurrence expression for the specified loop. 1927 /// Simplify the expression as much as possible. 1928 const SCEV *ScalarEvolution::getAddRecExpr(const SCEV *Start, 1929 const SCEV *Step, const Loop *L, 1930 bool HasNUW, bool HasNSW) { 1931 SmallVector<const SCEV *, 4> Operands; 1932 Operands.push_back(Start); 1933 if (const SCEVAddRecExpr *StepChrec = dyn_cast<SCEVAddRecExpr>(Step)) 1934 if (StepChrec->getLoop() == L) { 1935 Operands.insert(Operands.end(), StepChrec->op_begin(), 1936 StepChrec->op_end()); 1937 return getAddRecExpr(Operands, L); 1938 } 1939 1940 Operands.push_back(Step); 1941 return getAddRecExpr(Operands, L, HasNUW, HasNSW); 1942 } 1943 1944 /// getAddRecExpr - Get an add recurrence expression for the specified loop. 1945 /// Simplify the expression as much as possible. 1946 const SCEV * 1947 ScalarEvolution::getAddRecExpr(SmallVectorImpl<const SCEV *> &Operands, 1948 const Loop *L, 1949 bool HasNUW, bool HasNSW) { 1950 if (Operands.size() == 1) return Operands[0]; 1951 #ifndef NDEBUG 1952 for (unsigned i = 1, e = Operands.size(); i != e; ++i) 1953 assert(getEffectiveSCEVType(Operands[i]->getType()) == 1954 getEffectiveSCEVType(Operands[0]->getType()) && 1955 "SCEVAddRecExpr operand types don't match!"); 1956 #endif 1957 1958 if (Operands.back()->isZero()) { 1959 Operands.pop_back(); 1960 return getAddRecExpr(Operands, L, HasNUW, HasNSW); // {X,+,0} --> X 1961 } 1962 1963 // If HasNSW is true and all the operands are non-negative, infer HasNUW. 1964 if (!HasNUW && HasNSW) { 1965 bool All = true; 1966 for (unsigned i = 0, e = Operands.size(); i != e; ++i) 1967 if (!isKnownNonNegative(Operands[i])) { 1968 All = false; 1969 break; 1970 } 1971 if (All) HasNUW = true; 1972 } 1973 1974 // Canonicalize nested AddRecs in by nesting them in order of loop depth. 1975 if (const SCEVAddRecExpr *NestedAR = dyn_cast<SCEVAddRecExpr>(Operands[0])) { 1976 const Loop *NestedLoop = NestedAR->getLoop(); 1977 if (L->contains(NestedLoop->getHeader()) ? 1978 (L->getLoopDepth() < NestedLoop->getLoopDepth()) : 1979 (!NestedLoop->contains(L->getHeader()) && 1980 DT->dominates(L->getHeader(), NestedLoop->getHeader()))) { 1981 SmallVector<const SCEV *, 4> NestedOperands(NestedAR->op_begin(), 1982 NestedAR->op_end()); 1983 Operands[0] = NestedAR->getStart(); 1984 // AddRecs require their operands be loop-invariant with respect to their 1985 // loops. Don't perform this transformation if it would break this 1986 // requirement. 1987 bool AllInvariant = true; 1988 for (unsigned i = 0, e = Operands.size(); i != e; ++i) 1989 if (!Operands[i]->isLoopInvariant(L)) { 1990 AllInvariant = false; 1991 break; 1992 } 1993 if (AllInvariant) { 1994 NestedOperands[0] = getAddRecExpr(Operands, L); 1995 AllInvariant = true; 1996 for (unsigned i = 0, e = NestedOperands.size(); i != e; ++i) 1997 if (!NestedOperands[i]->isLoopInvariant(NestedLoop)) { 1998 AllInvariant = false; 1999 break; 2000 } 2001 if (AllInvariant) 2002 // Ok, both add recurrences are valid after the transformation. 2003 return getAddRecExpr(NestedOperands, NestedLoop, HasNUW, HasNSW); 2004 } 2005 // Reset Operands to its original state. 2006 Operands[0] = NestedAR; 2007 } 2008 } 2009 2010 // Okay, it looks like we really DO need an addrec expr. Check to see if we 2011 // already have one, otherwise create a new one. 2012 FoldingSetNodeID ID; 2013 ID.AddInteger(scAddRecExpr); 2014 ID.AddInteger(Operands.size()); 2015 for (unsigned i = 0, e = Operands.size(); i != e; ++i) 2016 ID.AddPointer(Operands[i]); 2017 ID.AddPointer(L); 2018 void *IP = 0; 2019 SCEVAddRecExpr *S = 2020 static_cast<SCEVAddRecExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP)); 2021 if (!S) { 2022 S = SCEVAllocator.Allocate<SCEVAddRecExpr>(); 2023 new (S) SCEVAddRecExpr(ID, Operands, L); 2024 UniqueSCEVs.InsertNode(S, IP); 2025 } 2026 if (HasNUW) S->setHasNoUnsignedWrap(true); 2027 if (HasNSW) S->setHasNoSignedWrap(true); 2028 return S; 2029 } 2030 2031 const SCEV *ScalarEvolution::getSMaxExpr(const SCEV *LHS, 2032 const SCEV *RHS) { 2033 SmallVector<const SCEV *, 2> Ops; 2034 Ops.push_back(LHS); 2035 Ops.push_back(RHS); 2036 return getSMaxExpr(Ops); 2037 } 2038 2039 const SCEV * 2040 ScalarEvolution::getSMaxExpr(SmallVectorImpl<const SCEV *> &Ops) { 2041 assert(!Ops.empty() && "Cannot get empty smax!"); 2042 if (Ops.size() == 1) return Ops[0]; 2043 #ifndef NDEBUG 2044 for (unsigned i = 1, e = Ops.size(); i != e; ++i) 2045 assert(getEffectiveSCEVType(Ops[i]->getType()) == 2046 getEffectiveSCEVType(Ops[0]->getType()) && 2047 "SCEVSMaxExpr operand types don't match!"); 2048 #endif 2049 2050 // Sort by complexity, this groups all similar expression types together. 2051 GroupByComplexity(Ops, LI); 2052 2053 // If there are any constants, fold them together. 2054 unsigned Idx = 0; 2055 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { 2056 ++Idx; 2057 assert(Idx < Ops.size()); 2058 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { 2059 // We found two constants, fold them together! 2060 ConstantInt *Fold = ConstantInt::get(getContext(), 2061 APIntOps::smax(LHSC->getValue()->getValue(), 2062 RHSC->getValue()->getValue())); 2063 Ops[0] = getConstant(Fold); 2064 Ops.erase(Ops.begin()+1); // Erase the folded element 2065 if (Ops.size() == 1) return Ops[0]; 2066 LHSC = cast<SCEVConstant>(Ops[0]); 2067 } 2068 2069 // If we are left with a constant minimum-int, strip it off. 2070 if (cast<SCEVConstant>(Ops[0])->getValue()->isMinValue(true)) { 2071 Ops.erase(Ops.begin()); 2072 --Idx; 2073 } else if (cast<SCEVConstant>(Ops[0])->getValue()->isMaxValue(true)) { 2074 // If we have an smax with a constant maximum-int, it will always be 2075 // maximum-int. 2076 return Ops[0]; 2077 } 2078 } 2079 2080 if (Ops.size() == 1) return Ops[0]; 2081 2082 // Find the first SMax 2083 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scSMaxExpr) 2084 ++Idx; 2085 2086 // Check to see if one of the operands is an SMax. If so, expand its operands 2087 // onto our operand list, and recurse to simplify. 2088 if (Idx < Ops.size()) { 2089 bool DeletedSMax = false; 2090 while (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(Ops[Idx])) { 2091 Ops.insert(Ops.end(), SMax->op_begin(), SMax->op_end()); 2092 Ops.erase(Ops.begin()+Idx); 2093 DeletedSMax = true; 2094 } 2095 2096 if (DeletedSMax) 2097 return getSMaxExpr(Ops); 2098 } 2099 2100 // Okay, check to see if the same value occurs in the operand list twice. If 2101 // so, delete one. Since we sorted the list, these values are required to 2102 // be adjacent. 2103 for (unsigned i = 0, e = Ops.size()-1; i != e; ++i) 2104 if (Ops[i] == Ops[i+1]) { // X smax Y smax Y --> X smax Y 2105 Ops.erase(Ops.begin()+i, Ops.begin()+i+1); 2106 --i; --e; 2107 } 2108 2109 if (Ops.size() == 1) return Ops[0]; 2110 2111 assert(!Ops.empty() && "Reduced smax down to nothing!"); 2112 2113 // Okay, it looks like we really DO need an smax expr. Check to see if we 2114 // already have one, otherwise create a new one. 2115 FoldingSetNodeID ID; 2116 ID.AddInteger(scSMaxExpr); 2117 ID.AddInteger(Ops.size()); 2118 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 2119 ID.AddPointer(Ops[i]); 2120 void *IP = 0; 2121 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 2122 SCEV *S = SCEVAllocator.Allocate<SCEVSMaxExpr>(); 2123 new (S) SCEVSMaxExpr(ID, Ops); 2124 UniqueSCEVs.InsertNode(S, IP); 2125 return S; 2126 } 2127 2128 const SCEV *ScalarEvolution::getUMaxExpr(const SCEV *LHS, 2129 const SCEV *RHS) { 2130 SmallVector<const SCEV *, 2> Ops; 2131 Ops.push_back(LHS); 2132 Ops.push_back(RHS); 2133 return getUMaxExpr(Ops); 2134 } 2135 2136 const SCEV * 2137 ScalarEvolution::getUMaxExpr(SmallVectorImpl<const SCEV *> &Ops) { 2138 assert(!Ops.empty() && "Cannot get empty umax!"); 2139 if (Ops.size() == 1) return Ops[0]; 2140 #ifndef NDEBUG 2141 for (unsigned i = 1, e = Ops.size(); i != e; ++i) 2142 assert(getEffectiveSCEVType(Ops[i]->getType()) == 2143 getEffectiveSCEVType(Ops[0]->getType()) && 2144 "SCEVUMaxExpr operand types don't match!"); 2145 #endif 2146 2147 // Sort by complexity, this groups all similar expression types together. 2148 GroupByComplexity(Ops, LI); 2149 2150 // If there are any constants, fold them together. 2151 unsigned Idx = 0; 2152 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) { 2153 ++Idx; 2154 assert(Idx < Ops.size()); 2155 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) { 2156 // We found two constants, fold them together! 2157 ConstantInt *Fold = ConstantInt::get(getContext(), 2158 APIntOps::umax(LHSC->getValue()->getValue(), 2159 RHSC->getValue()->getValue())); 2160 Ops[0] = getConstant(Fold); 2161 Ops.erase(Ops.begin()+1); // Erase the folded element 2162 if (Ops.size() == 1) return Ops[0]; 2163 LHSC = cast<SCEVConstant>(Ops[0]); 2164 } 2165 2166 // If we are left with a constant minimum-int, strip it off. 2167 if (cast<SCEVConstant>(Ops[0])->getValue()->isMinValue(false)) { 2168 Ops.erase(Ops.begin()); 2169 --Idx; 2170 } else if (cast<SCEVConstant>(Ops[0])->getValue()->isMaxValue(false)) { 2171 // If we have an umax with a constant maximum-int, it will always be 2172 // maximum-int. 2173 return Ops[0]; 2174 } 2175 } 2176 2177 if (Ops.size() == 1) return Ops[0]; 2178 2179 // Find the first UMax 2180 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scUMaxExpr) 2181 ++Idx; 2182 2183 // Check to see if one of the operands is a UMax. If so, expand its operands 2184 // onto our operand list, and recurse to simplify. 2185 if (Idx < Ops.size()) { 2186 bool DeletedUMax = false; 2187 while (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(Ops[Idx])) { 2188 Ops.insert(Ops.end(), UMax->op_begin(), UMax->op_end()); 2189 Ops.erase(Ops.begin()+Idx); 2190 DeletedUMax = true; 2191 } 2192 2193 if (DeletedUMax) 2194 return getUMaxExpr(Ops); 2195 } 2196 2197 // Okay, check to see if the same value occurs in the operand list twice. If 2198 // so, delete one. Since we sorted the list, these values are required to 2199 // be adjacent. 2200 for (unsigned i = 0, e = Ops.size()-1; i != e; ++i) 2201 if (Ops[i] == Ops[i+1]) { // X umax Y umax Y --> X umax Y 2202 Ops.erase(Ops.begin()+i, Ops.begin()+i+1); 2203 --i; --e; 2204 } 2205 2206 if (Ops.size() == 1) return Ops[0]; 2207 2208 assert(!Ops.empty() && "Reduced umax down to nothing!"); 2209 2210 // Okay, it looks like we really DO need a umax expr. Check to see if we 2211 // already have one, otherwise create a new one. 2212 FoldingSetNodeID ID; 2213 ID.AddInteger(scUMaxExpr); 2214 ID.AddInteger(Ops.size()); 2215 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 2216 ID.AddPointer(Ops[i]); 2217 void *IP = 0; 2218 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 2219 SCEV *S = SCEVAllocator.Allocate<SCEVUMaxExpr>(); 2220 new (S) SCEVUMaxExpr(ID, Ops); 2221 UniqueSCEVs.InsertNode(S, IP); 2222 return S; 2223 } 2224 2225 const SCEV *ScalarEvolution::getSMinExpr(const SCEV *LHS, 2226 const SCEV *RHS) { 2227 // ~smax(~x, ~y) == smin(x, y). 2228 return getNotSCEV(getSMaxExpr(getNotSCEV(LHS), getNotSCEV(RHS))); 2229 } 2230 2231 const SCEV *ScalarEvolution::getUMinExpr(const SCEV *LHS, 2232 const SCEV *RHS) { 2233 // ~umax(~x, ~y) == umin(x, y) 2234 return getNotSCEV(getUMaxExpr(getNotSCEV(LHS), getNotSCEV(RHS))); 2235 } 2236 2237 const SCEV *ScalarEvolution::getSizeOfExpr(const Type *AllocTy) { 2238 Constant *C = ConstantExpr::getSizeOf(AllocTy); 2239 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) 2240 C = ConstantFoldConstantExpression(CE, TD); 2241 const Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(AllocTy)); 2242 return getTruncateOrZeroExtend(getSCEV(C), Ty); 2243 } 2244 2245 const SCEV *ScalarEvolution::getAlignOfExpr(const Type *AllocTy) { 2246 Constant *C = ConstantExpr::getAlignOf(AllocTy); 2247 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) 2248 C = ConstantFoldConstantExpression(CE, TD); 2249 const Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(AllocTy)); 2250 return getTruncateOrZeroExtend(getSCEV(C), Ty); 2251 } 2252 2253 const SCEV *ScalarEvolution::getOffsetOfExpr(const StructType *STy, 2254 unsigned FieldNo) { 2255 Constant *C = ConstantExpr::getOffsetOf(STy, FieldNo); 2256 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) 2257 C = ConstantFoldConstantExpression(CE, TD); 2258 const Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(STy)); 2259 return getTruncateOrZeroExtend(getSCEV(C), Ty); 2260 } 2261 2262 const SCEV *ScalarEvolution::getOffsetOfExpr(const Type *CTy, 2263 Constant *FieldNo) { 2264 Constant *C = ConstantExpr::getOffsetOf(CTy, FieldNo); 2265 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) 2266 C = ConstantFoldConstantExpression(CE, TD); 2267 const Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(CTy)); 2268 return getTruncateOrZeroExtend(getSCEV(C), Ty); 2269 } 2270 2271 const SCEV *ScalarEvolution::getUnknown(Value *V) { 2272 // Don't attempt to do anything other than create a SCEVUnknown object 2273 // here. createSCEV only calls getUnknown after checking for all other 2274 // interesting possibilities, and any other code that calls getUnknown 2275 // is doing so in order to hide a value from SCEV canonicalization. 2276 2277 FoldingSetNodeID ID; 2278 ID.AddInteger(scUnknown); 2279 ID.AddPointer(V); 2280 void *IP = 0; 2281 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; 2282 SCEV *S = SCEVAllocator.Allocate<SCEVUnknown>(); 2283 new (S) SCEVUnknown(ID, V); 2284 UniqueSCEVs.InsertNode(S, IP); 2285 return S; 2286 } 2287 2288 //===----------------------------------------------------------------------===// 2289 // Basic SCEV Analysis and PHI Idiom Recognition Code 2290 // 2291 2292 /// isSCEVable - Test if values of the given type are analyzable within 2293 /// the SCEV framework. This primarily includes integer types, and it 2294 /// can optionally include pointer types if the ScalarEvolution class 2295 /// has access to target-specific information. 2296 bool ScalarEvolution::isSCEVable(const Type *Ty) const { 2297 // Integers and pointers are always SCEVable. 2298 return Ty->isInteger() || isa<PointerType>(Ty); 2299 } 2300 2301 /// getTypeSizeInBits - Return the size in bits of the specified type, 2302 /// for which isSCEVable must return true. 2303 uint64_t ScalarEvolution::getTypeSizeInBits(const Type *Ty) const { 2304 assert(isSCEVable(Ty) && "Type is not SCEVable!"); 2305 2306 // If we have a TargetData, use it! 2307 if (TD) 2308 return TD->getTypeSizeInBits(Ty); 2309 2310 // Integer types have fixed sizes. 2311 if (Ty->isInteger()) 2312 return Ty->getPrimitiveSizeInBits(); 2313 2314 // The only other support type is pointer. Without TargetData, conservatively 2315 // assume pointers are 64-bit. 2316 assert(isa<PointerType>(Ty) && "isSCEVable permitted a non-SCEVable type!"); 2317 return 64; 2318 } 2319 2320 /// getEffectiveSCEVType - Return a type with the same bitwidth as 2321 /// the given type and which represents how SCEV will treat the given 2322 /// type, for which isSCEVable must return true. For pointer types, 2323 /// this is the pointer-sized integer type. 2324 const Type *ScalarEvolution::getEffectiveSCEVType(const Type *Ty) const { 2325 assert(isSCEVable(Ty) && "Type is not SCEVable!"); 2326 2327 if (Ty->isInteger()) 2328 return Ty; 2329 2330 // The only other support type is pointer. 2331 assert(isa<PointerType>(Ty) && "Unexpected non-pointer non-integer type!"); 2332 if (TD) return TD->getIntPtrType(getContext()); 2333 2334 // Without TargetData, conservatively assume pointers are 64-bit. 2335 return Type::getInt64Ty(getContext()); 2336 } 2337 2338 const SCEV *ScalarEvolution::getCouldNotCompute() { 2339 return &CouldNotCompute; 2340 } 2341 2342 /// getSCEV - Return an existing SCEV if it exists, otherwise analyze the 2343 /// expression and create a new one. 2344 const SCEV *ScalarEvolution::getSCEV(Value *V) { 2345 assert(isSCEVable(V->getType()) && "Value is not SCEVable!"); 2346 2347 std::map<SCEVCallbackVH, const SCEV *>::iterator I = Scalars.find(V); 2348 if (I != Scalars.end()) return I->second; 2349 const SCEV *S = createSCEV(V); 2350 Scalars.insert(std::make_pair(SCEVCallbackVH(V, this), S)); 2351 return S; 2352 } 2353 2354 /// getIntegerSCEV - Given a SCEVable type, create a constant for the 2355 /// specified signed integer value and return a SCEV for the constant. 2356 const SCEV *ScalarEvolution::getIntegerSCEV(int Val, const Type *Ty) { 2357 const IntegerType *ITy = cast<IntegerType>(getEffectiveSCEVType(Ty)); 2358 return getConstant(ConstantInt::get(ITy, Val)); 2359 } 2360 2361 /// getNegativeSCEV - Return a SCEV corresponding to -V = -1*V 2362 /// 2363 const SCEV *ScalarEvolution::getNegativeSCEV(const SCEV *V) { 2364 if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V)) 2365 return getConstant( 2366 cast<ConstantInt>(ConstantExpr::getNeg(VC->getValue()))); 2367 2368 const Type *Ty = V->getType(); 2369 Ty = getEffectiveSCEVType(Ty); 2370 return getMulExpr(V, 2371 getConstant(cast<ConstantInt>(Constant::getAllOnesValue(Ty)))); 2372 } 2373 2374 /// getNotSCEV - Return a SCEV corresponding to ~V = -1-V 2375 const SCEV *ScalarEvolution::getNotSCEV(const SCEV *V) { 2376 if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V)) 2377 return getConstant( 2378 cast<ConstantInt>(ConstantExpr::getNot(VC->getValue()))); 2379 2380 const Type *Ty = V->getType(); 2381 Ty = getEffectiveSCEVType(Ty); 2382 const SCEV *AllOnes = 2383 getConstant(cast<ConstantInt>(Constant::getAllOnesValue(Ty))); 2384 return getMinusSCEV(AllOnes, V); 2385 } 2386 2387 /// getMinusSCEV - Return a SCEV corresponding to LHS - RHS. 2388 /// 2389 const SCEV *ScalarEvolution::getMinusSCEV(const SCEV *LHS, 2390 const SCEV *RHS) { 2391 // X - Y --> X + -Y 2392 return getAddExpr(LHS, getNegativeSCEV(RHS)); 2393 } 2394 2395 /// getTruncateOrZeroExtend - Return a SCEV corresponding to a conversion of the 2396 /// input value to the specified type. If the type must be extended, it is zero 2397 /// extended. 2398 const SCEV * 2399 ScalarEvolution::getTruncateOrZeroExtend(const SCEV *V, 2400 const Type *Ty) { 2401 const Type *SrcTy = V->getType(); 2402 assert((SrcTy->isInteger() || isa<PointerType>(SrcTy)) && 2403 (Ty->isInteger() || isa<PointerType>(Ty)) && 2404 "Cannot truncate or zero extend with non-integer arguments!"); 2405 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 2406 return V; // No conversion 2407 if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty)) 2408 return getTruncateExpr(V, Ty); 2409 return getZeroExtendExpr(V, Ty); 2410 } 2411 2412 /// getTruncateOrSignExtend - Return a SCEV corresponding to a conversion of the 2413 /// input value to the specified type. If the type must be extended, it is sign 2414 /// extended. 2415 const SCEV * 2416 ScalarEvolution::getTruncateOrSignExtend(const SCEV *V, 2417 const Type *Ty) { 2418 const Type *SrcTy = V->getType(); 2419 assert((SrcTy->isInteger() || isa<PointerType>(SrcTy)) && 2420 (Ty->isInteger() || isa<PointerType>(Ty)) && 2421 "Cannot truncate or zero extend with non-integer arguments!"); 2422 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 2423 return V; // No conversion 2424 if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty)) 2425 return getTruncateExpr(V, Ty); 2426 return getSignExtendExpr(V, Ty); 2427 } 2428 2429 /// getNoopOrZeroExtend - Return a SCEV corresponding to a conversion of the 2430 /// input value to the specified type. If the type must be extended, it is zero 2431 /// extended. The conversion must not be narrowing. 2432 const SCEV * 2433 ScalarEvolution::getNoopOrZeroExtend(const SCEV *V, const Type *Ty) { 2434 const Type *SrcTy = V->getType(); 2435 assert((SrcTy->isInteger() || isa<PointerType>(SrcTy)) && 2436 (Ty->isInteger() || isa<PointerType>(Ty)) && 2437 "Cannot noop or zero extend with non-integer arguments!"); 2438 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) && 2439 "getNoopOrZeroExtend cannot truncate!"); 2440 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 2441 return V; // No conversion 2442 return getZeroExtendExpr(V, Ty); 2443 } 2444 2445 /// getNoopOrSignExtend - Return a SCEV corresponding to a conversion of the 2446 /// input value to the specified type. If the type must be extended, it is sign 2447 /// extended. The conversion must not be narrowing. 2448 const SCEV * 2449 ScalarEvolution::getNoopOrSignExtend(const SCEV *V, const Type *Ty) { 2450 const Type *SrcTy = V->getType(); 2451 assert((SrcTy->isInteger() || isa<PointerType>(SrcTy)) && 2452 (Ty->isInteger() || isa<PointerType>(Ty)) && 2453 "Cannot noop or sign extend with non-integer arguments!"); 2454 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) && 2455 "getNoopOrSignExtend cannot truncate!"); 2456 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 2457 return V; // No conversion 2458 return getSignExtendExpr(V, Ty); 2459 } 2460 2461 /// getNoopOrAnyExtend - Return a SCEV corresponding to a conversion of 2462 /// the input value to the specified type. If the type must be extended, 2463 /// it is extended with unspecified bits. The conversion must not be 2464 /// narrowing. 2465 const SCEV * 2466 ScalarEvolution::getNoopOrAnyExtend(const SCEV *V, const Type *Ty) { 2467 const Type *SrcTy = V->getType(); 2468 assert((SrcTy->isInteger() || isa<PointerType>(SrcTy)) && 2469 (Ty->isInteger() || isa<PointerType>(Ty)) && 2470 "Cannot noop or any extend with non-integer arguments!"); 2471 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) && 2472 "getNoopOrAnyExtend cannot truncate!"); 2473 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 2474 return V; // No conversion 2475 return getAnyExtendExpr(V, Ty); 2476 } 2477 2478 /// getTruncateOrNoop - Return a SCEV corresponding to a conversion of the 2479 /// input value to the specified type. The conversion must not be widening. 2480 const SCEV * 2481 ScalarEvolution::getTruncateOrNoop(const SCEV *V, const Type *Ty) { 2482 const Type *SrcTy = V->getType(); 2483 assert((SrcTy->isInteger() || isa<PointerType>(SrcTy)) && 2484 (Ty->isInteger() || isa<PointerType>(Ty)) && 2485 "Cannot truncate or noop with non-integer arguments!"); 2486 assert(getTypeSizeInBits(SrcTy) >= getTypeSizeInBits(Ty) && 2487 "getTruncateOrNoop cannot extend!"); 2488 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty)) 2489 return V; // No conversion 2490 return getTruncateExpr(V, Ty); 2491 } 2492 2493 /// getUMaxFromMismatchedTypes - Promote the operands to the wider of 2494 /// the types using zero-extension, and then perform a umax operation 2495 /// with them. 2496 const SCEV *ScalarEvolution::getUMaxFromMismatchedTypes(const SCEV *LHS, 2497 const SCEV *RHS) { 2498 const SCEV *PromotedLHS = LHS; 2499 const SCEV *PromotedRHS = RHS; 2500 2501 if (getTypeSizeInBits(LHS->getType()) > getTypeSizeInBits(RHS->getType())) 2502 PromotedRHS = getZeroExtendExpr(RHS, LHS->getType()); 2503 else 2504 PromotedLHS = getNoopOrZeroExtend(LHS, RHS->getType()); 2505 2506 return getUMaxExpr(PromotedLHS, PromotedRHS); 2507 } 2508 2509 /// getUMinFromMismatchedTypes - Promote the operands to the wider of 2510 /// the types using zero-extension, and then perform a umin operation 2511 /// with them. 2512 const SCEV *ScalarEvolution::getUMinFromMismatchedTypes(const SCEV *LHS, 2513 const SCEV *RHS) { 2514 const SCEV *PromotedLHS = LHS; 2515 const SCEV *PromotedRHS = RHS; 2516 2517 if (getTypeSizeInBits(LHS->getType()) > getTypeSizeInBits(RHS->getType())) 2518 PromotedRHS = getZeroExtendExpr(RHS, LHS->getType()); 2519 else 2520 PromotedLHS = getNoopOrZeroExtend(LHS, RHS->getType()); 2521 2522 return getUMinExpr(PromotedLHS, PromotedRHS); 2523 } 2524 2525 /// PushDefUseChildren - Push users of the given Instruction 2526 /// onto the given Worklist. 2527 static void 2528 PushDefUseChildren(Instruction *I, 2529 SmallVectorImpl<Instruction *> &Worklist) { 2530 // Push the def-use children onto the Worklist stack. 2531 for (Value::use_iterator UI = I->use_begin(), UE = I->use_end(); 2532 UI != UE; ++UI) 2533 Worklist.push_back(cast<Instruction>(UI)); 2534 } 2535 2536 /// ForgetSymbolicValue - This looks up computed SCEV values for all 2537 /// instructions that depend on the given instruction and removes them from 2538 /// the Scalars map if they reference SymName. This is used during PHI 2539 /// resolution. 2540 void 2541 ScalarEvolution::ForgetSymbolicName(Instruction *I, const SCEV *SymName) { 2542 SmallVector<Instruction *, 16> Worklist; 2543 PushDefUseChildren(I, Worklist); 2544 2545 SmallPtrSet<Instruction *, 8> Visited; 2546 Visited.insert(I); 2547 while (!Worklist.empty()) { 2548 Instruction *I = Worklist.pop_back_val(); 2549 if (!Visited.insert(I)) continue; 2550 2551 std::map<SCEVCallbackVH, const SCEV *>::iterator It = 2552 Scalars.find(static_cast<Value *>(I)); 2553 if (It != Scalars.end()) { 2554 // Short-circuit the def-use traversal if the symbolic name 2555 // ceases to appear in expressions. 2556 if (!It->second->hasOperand(SymName)) 2557 continue; 2558 2559 // SCEVUnknown for a PHI either means that it has an unrecognized 2560 // structure, or it's a PHI that's in the progress of being computed 2561 // by createNodeForPHI. In the former case, additional loop trip 2562 // count information isn't going to change anything. In the later 2563 // case, createNodeForPHI will perform the necessary updates on its 2564 // own when it gets to that point. 2565 if (!isa<PHINode>(I) || !isa<SCEVUnknown>(It->second)) { 2566 ValuesAtScopes.erase(It->second); 2567 Scalars.erase(It); 2568 } 2569 } 2570 2571 PushDefUseChildren(I, Worklist); 2572 } 2573 } 2574 2575 /// createNodeForPHI - PHI nodes have two cases. Either the PHI node exists in 2576 /// a loop header, making it a potential recurrence, or it doesn't. 2577 /// 2578 const SCEV *ScalarEvolution::createNodeForPHI(PHINode *PN) { 2579 if (PN->getNumIncomingValues() == 2) // The loops have been canonicalized. 2580 if (const Loop *L = LI->getLoopFor(PN->getParent())) 2581 if (L->getHeader() == PN->getParent()) { 2582 // If it lives in the loop header, it has two incoming values, one 2583 // from outside the loop, and one from inside. 2584 unsigned IncomingEdge = L->contains(PN->getIncomingBlock(0)); 2585 unsigned BackEdge = IncomingEdge^1; 2586 2587 // While we are analyzing this PHI node, handle its value symbolically. 2588 const SCEV *SymbolicName = getUnknown(PN); 2589 assert(Scalars.find(PN) == Scalars.end() && 2590 "PHI node already processed?"); 2591 Scalars.insert(std::make_pair(SCEVCallbackVH(PN, this), SymbolicName)); 2592 2593 // Using this symbolic name for the PHI, analyze the value coming around 2594 // the back-edge. 2595 Value *BEValueV = PN->getIncomingValue(BackEdge); 2596 const SCEV *BEValue = getSCEV(BEValueV); 2597 2598 // NOTE: If BEValue is loop invariant, we know that the PHI node just 2599 // has a special value for the first iteration of the loop. 2600 2601 // If the value coming around the backedge is an add with the symbolic 2602 // value we just inserted, then we found a simple induction variable! 2603 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(BEValue)) { 2604 // If there is a single occurrence of the symbolic value, replace it 2605 // with a recurrence. 2606 unsigned FoundIndex = Add->getNumOperands(); 2607 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i) 2608 if (Add->getOperand(i) == SymbolicName) 2609 if (FoundIndex == e) { 2610 FoundIndex = i; 2611 break; 2612 } 2613 2614 if (FoundIndex != Add->getNumOperands()) { 2615 // Create an add with everything but the specified operand. 2616 SmallVector<const SCEV *, 8> Ops; 2617 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i) 2618 if (i != FoundIndex) 2619 Ops.push_back(Add->getOperand(i)); 2620 const SCEV *Accum = getAddExpr(Ops); 2621 2622 // This is not a valid addrec if the step amount is varying each 2623 // loop iteration, but is not itself an addrec in this loop. 2624 if (Accum->isLoopInvariant(L) || 2625 (isa<SCEVAddRecExpr>(Accum) && 2626 cast<SCEVAddRecExpr>(Accum)->getLoop() == L)) { 2627 bool HasNUW = false; 2628 bool HasNSW = false; 2629 2630 // If the increment doesn't overflow, then neither the addrec nor 2631 // the post-increment will overflow. 2632 if (const AddOperator *OBO = dyn_cast<AddOperator>(BEValueV)) { 2633 if (OBO->hasNoUnsignedWrap()) 2634 HasNUW = true; 2635 if (OBO->hasNoSignedWrap()) 2636 HasNSW = true; 2637 } 2638 2639 const SCEV *StartVal = 2640 getSCEV(PN->getIncomingValue(IncomingEdge)); 2641 const SCEV *PHISCEV = 2642 getAddRecExpr(StartVal, Accum, L, HasNUW, HasNSW); 2643 2644 // Since the no-wrap flags are on the increment, they apply to the 2645 // post-incremented value as well. 2646 if (Accum->isLoopInvariant(L)) 2647 (void)getAddRecExpr(getAddExpr(StartVal, Accum), 2648 Accum, L, HasNUW, HasNSW); 2649 2650 // Okay, for the entire analysis of this edge we assumed the PHI 2651 // to be symbolic. We now need to go back and purge all of the 2652 // entries for the scalars that use the symbolic expression. 2653 ForgetSymbolicName(PN, SymbolicName); 2654 Scalars[SCEVCallbackVH(PN, this)] = PHISCEV; 2655 return PHISCEV; 2656 } 2657 } 2658 } else if (const SCEVAddRecExpr *AddRec = 2659 dyn_cast<SCEVAddRecExpr>(BEValue)) { 2660 // Otherwise, this could be a loop like this: 2661 // i = 0; for (j = 1; ..; ++j) { .... i = j; } 2662 // In this case, j = {1,+,1} and BEValue is j. 2663 // Because the other in-value of i (0) fits the evolution of BEValue 2664 // i really is an addrec evolution. 2665 if (AddRec->getLoop() == L && AddRec->isAffine()) { 2666 const SCEV *StartVal = getSCEV(PN->getIncomingValue(IncomingEdge)); 2667 2668 // If StartVal = j.start - j.stride, we can use StartVal as the 2669 // initial step of the addrec evolution. 2670 if (StartVal == getMinusSCEV(AddRec->getOperand(0), 2671 AddRec->getOperand(1))) { 2672 const SCEV *PHISCEV = 2673 getAddRecExpr(StartVal, AddRec->getOperand(1), L); 2674 2675 // Okay, for the entire analysis of this edge we assumed the PHI 2676 // to be symbolic. We now need to go back and purge all of the 2677 // entries for the scalars that use the symbolic expression. 2678 ForgetSymbolicName(PN, SymbolicName); 2679 Scalars[SCEVCallbackVH(PN, this)] = PHISCEV; 2680 return PHISCEV; 2681 } 2682 } 2683 } 2684 2685 return SymbolicName; 2686 } 2687 2688 // It's tempting to recognize PHIs with a unique incoming value, however 2689 // this leads passes like indvars to break LCSSA form. Fortunately, such 2690 // PHIs are rare, as instcombine zaps them. 2691 2692 // If it's not a loop phi, we can't handle it yet. 2693 return getUnknown(PN); 2694 } 2695 2696 /// createNodeForGEP - Expand GEP instructions into add and multiply 2697 /// operations. This allows them to be analyzed by regular SCEV code. 2698 /// 2699 const SCEV *ScalarEvolution::createNodeForGEP(GEPOperator *GEP) { 2700 2701 bool InBounds = GEP->isInBounds(); 2702 const Type *IntPtrTy = getEffectiveSCEVType(GEP->getType()); 2703 Value *Base = GEP->getOperand(0); 2704 // Don't attempt to analyze GEPs over unsized objects. 2705 if (!cast<PointerType>(Base->getType())->getElementType()->isSized()) 2706 return getUnknown(GEP); 2707 const SCEV *TotalOffset = getIntegerSCEV(0, IntPtrTy); 2708 gep_type_iterator GTI = gep_type_begin(GEP); 2709 for (GetElementPtrInst::op_iterator I = next(GEP->op_begin()), 2710 E = GEP->op_end(); 2711 I != E; ++I) { 2712 Value *Index = *I; 2713 // Compute the (potentially symbolic) offset in bytes for this index. 2714 if (const StructType *STy = dyn_cast<StructType>(*GTI++)) { 2715 // For a struct, add the member offset. 2716 unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue(); 2717 TotalOffset = getAddExpr(TotalOffset, 2718 getOffsetOfExpr(STy, FieldNo), 2719 /*HasNUW=*/false, /*HasNSW=*/InBounds); 2720 } else { 2721 // For an array, add the element offset, explicitly scaled. 2722 const SCEV *LocalOffset = getSCEV(Index); 2723 if (!isa<PointerType>(LocalOffset->getType())) 2724 // Getelementptr indicies are signed. 2725 LocalOffset = getTruncateOrSignExtend(LocalOffset, IntPtrTy); 2726 // Lower "inbounds" GEPs to NSW arithmetic. 2727 LocalOffset = getMulExpr(LocalOffset, getSizeOfExpr(*GTI), 2728 /*HasNUW=*/false, /*HasNSW=*/InBounds); 2729 TotalOffset = getAddExpr(TotalOffset, LocalOffset, 2730 /*HasNUW=*/false, /*HasNSW=*/InBounds); 2731 } 2732 } 2733 return getAddExpr(getSCEV(Base), TotalOffset, 2734 /*HasNUW=*/false, /*HasNSW=*/InBounds); 2735 } 2736 2737 /// GetMinTrailingZeros - Determine the minimum number of zero bits that S is 2738 /// guaranteed to end in (at every loop iteration). It is, at the same time, 2739 /// the minimum number of times S is divisible by 2. For example, given {4,+,8} 2740 /// it returns 2. If S is guaranteed to be 0, it returns the bitwidth of S. 2741 uint32_t 2742 ScalarEvolution::GetMinTrailingZeros(const SCEV *S) { 2743 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) 2744 return C->getValue()->getValue().countTrailingZeros(); 2745 2746 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(S)) 2747 return std::min(GetMinTrailingZeros(T->getOperand()), 2748 (uint32_t)getTypeSizeInBits(T->getType())); 2749 2750 if (const SCEVZeroExtendExpr *E = dyn_cast<SCEVZeroExtendExpr>(S)) { 2751 uint32_t OpRes = GetMinTrailingZeros(E->getOperand()); 2752 return OpRes == getTypeSizeInBits(E->getOperand()->getType()) ? 2753 getTypeSizeInBits(E->getType()) : OpRes; 2754 } 2755 2756 if (const SCEVSignExtendExpr *E = dyn_cast<SCEVSignExtendExpr>(S)) { 2757 uint32_t OpRes = GetMinTrailingZeros(E->getOperand()); 2758 return OpRes == getTypeSizeInBits(E->getOperand()->getType()) ? 2759 getTypeSizeInBits(E->getType()) : OpRes; 2760 } 2761 2762 if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(S)) { 2763 // The result is the min of all operands results. 2764 uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0)); 2765 for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i) 2766 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i))); 2767 return MinOpRes; 2768 } 2769 2770 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(S)) { 2771 // The result is the sum of all operands results. 2772 uint32_t SumOpRes = GetMinTrailingZeros(M->getOperand(0)); 2773 uint32_t BitWidth = getTypeSizeInBits(M->getType()); 2774 for (unsigned i = 1, e = M->getNumOperands(); 2775 SumOpRes != BitWidth && i != e; ++i) 2776 SumOpRes = std::min(SumOpRes + GetMinTrailingZeros(M->getOperand(i)), 2777 BitWidth); 2778 return SumOpRes; 2779 } 2780 2781 if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(S)) { 2782 // The result is the min of all operands results. 2783 uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0)); 2784 for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i) 2785 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i))); 2786 return MinOpRes; 2787 } 2788 2789 if (const SCEVSMaxExpr *M = dyn_cast<SCEVSMaxExpr>(S)) { 2790 // The result is the min of all operands results. 2791 uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0)); 2792 for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i) 2793 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i))); 2794 return MinOpRes; 2795 } 2796 2797 if (const SCEVUMaxExpr *M = dyn_cast<SCEVUMaxExpr>(S)) { 2798 // The result is the min of all operands results. 2799 uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0)); 2800 for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i) 2801 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i))); 2802 return MinOpRes; 2803 } 2804 2805 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) { 2806 // For a SCEVUnknown, ask ValueTracking. 2807 unsigned BitWidth = getTypeSizeInBits(U->getType()); 2808 APInt Mask = APInt::getAllOnesValue(BitWidth); 2809 APInt Zeros(BitWidth, 0), Ones(BitWidth, 0); 2810 ComputeMaskedBits(U->getValue(), Mask, Zeros, Ones); 2811 return Zeros.countTrailingOnes(); 2812 } 2813 2814 // SCEVUDivExpr 2815 return 0; 2816 } 2817 2818 /// getUnsignedRange - Determine the unsigned range for a particular SCEV. 2819 /// 2820 ConstantRange 2821 ScalarEvolution::getUnsignedRange(const SCEV *S) { 2822 2823 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) 2824 return ConstantRange(C->getValue()->getValue()); 2825 2826 unsigned BitWidth = getTypeSizeInBits(S->getType()); 2827 ConstantRange ConservativeResult(BitWidth, /*isFullSet=*/true); 2828 2829 // If the value has known zeros, the maximum unsigned value will have those 2830 // known zeros as well. 2831 uint32_t TZ = GetMinTrailingZeros(S); 2832 if (TZ != 0) 2833 ConservativeResult = 2834 ConstantRange(APInt::getMinValue(BitWidth), 2835 APInt::getMaxValue(BitWidth).lshr(TZ).shl(TZ) + 1); 2836 2837 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) { 2838 ConstantRange X = getUnsignedRange(Add->getOperand(0)); 2839 for (unsigned i = 1, e = Add->getNumOperands(); i != e; ++i) 2840 X = X.add(getUnsignedRange(Add->getOperand(i))); 2841 return ConservativeResult.intersectWith(X); 2842 } 2843 2844 if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) { 2845 ConstantRange X = getUnsignedRange(Mul->getOperand(0)); 2846 for (unsigned i = 1, e = Mul->getNumOperands(); i != e; ++i) 2847 X = X.multiply(getUnsignedRange(Mul->getOperand(i))); 2848 return ConservativeResult.intersectWith(X); 2849 } 2850 2851 if (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(S)) { 2852 ConstantRange X = getUnsignedRange(SMax->getOperand(0)); 2853 for (unsigned i = 1, e = SMax->getNumOperands(); i != e; ++i) 2854 X = X.smax(getUnsignedRange(SMax->getOperand(i))); 2855 return ConservativeResult.intersectWith(X); 2856 } 2857 2858 if (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(S)) { 2859 ConstantRange X = getUnsignedRange(UMax->getOperand(0)); 2860 for (unsigned i = 1, e = UMax->getNumOperands(); i != e; ++i) 2861 X = X.umax(getUnsignedRange(UMax->getOperand(i))); 2862 return ConservativeResult.intersectWith(X); 2863 } 2864 2865 if (const SCEVUDivExpr *UDiv = dyn_cast<SCEVUDivExpr>(S)) { 2866 ConstantRange X = getUnsignedRange(UDiv->getLHS()); 2867 ConstantRange Y = getUnsignedRange(UDiv->getRHS()); 2868 return ConservativeResult.intersectWith(X.udiv(Y)); 2869 } 2870 2871 if (const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(S)) { 2872 ConstantRange X = getUnsignedRange(ZExt->getOperand()); 2873 return ConservativeResult.intersectWith(X.zeroExtend(BitWidth)); 2874 } 2875 2876 if (const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(S)) { 2877 ConstantRange X = getUnsignedRange(SExt->getOperand()); 2878 return ConservativeResult.intersectWith(X.signExtend(BitWidth)); 2879 } 2880 2881 if (const SCEVTruncateExpr *Trunc = dyn_cast<SCEVTruncateExpr>(S)) { 2882 ConstantRange X = getUnsignedRange(Trunc->getOperand()); 2883 return ConservativeResult.intersectWith(X.truncate(BitWidth)); 2884 } 2885 2886 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(S)) { 2887 // If there's no unsigned wrap, the value will never be less than its 2888 // initial value. 2889 if (AddRec->hasNoUnsignedWrap()) 2890 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(AddRec->getStart())) 2891 ConservativeResult = 2892 ConstantRange(C->getValue()->getValue(), 2893 APInt(getTypeSizeInBits(C->getType()), 0)); 2894 2895 // TODO: non-affine addrec 2896 if (AddRec->isAffine()) { 2897 const Type *Ty = AddRec->getType(); 2898 const SCEV *MaxBECount = getMaxBackedgeTakenCount(AddRec->getLoop()); 2899 if (!isa<SCEVCouldNotCompute>(MaxBECount) && 2900 getTypeSizeInBits(MaxBECount->getType()) <= BitWidth) { 2901 MaxBECount = getNoopOrZeroExtend(MaxBECount, Ty); 2902 2903 const SCEV *Start = AddRec->getStart(); 2904 const SCEV *End = AddRec->evaluateAtIteration(MaxBECount, *this); 2905 2906 // Check for overflow. 2907 if (!AddRec->hasNoUnsignedWrap()) 2908 return ConservativeResult; 2909 2910 ConstantRange StartRange = getUnsignedRange(Start); 2911 ConstantRange EndRange = getUnsignedRange(End); 2912 APInt Min = APIntOps::umin(StartRange.getUnsignedMin(), 2913 EndRange.getUnsignedMin()); 2914 APInt Max = APIntOps::umax(StartRange.getUnsignedMax(), 2915 EndRange.getUnsignedMax()); 2916 if (Min.isMinValue() && Max.isMaxValue()) 2917 return ConservativeResult; 2918 return ConservativeResult.intersectWith(ConstantRange(Min, Max+1)); 2919 } 2920 } 2921 2922 return ConservativeResult; 2923 } 2924 2925 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) { 2926 // For a SCEVUnknown, ask ValueTracking. 2927 unsigned BitWidth = getTypeSizeInBits(U->getType()); 2928 APInt Mask = APInt::getAllOnesValue(BitWidth); 2929 APInt Zeros(BitWidth, 0), Ones(BitWidth, 0); 2930 ComputeMaskedBits(U->getValue(), Mask, Zeros, Ones, TD); 2931 if (Ones == ~Zeros + 1) 2932 return ConservativeResult; 2933 return ConservativeResult.intersectWith(ConstantRange(Ones, ~Zeros + 1)); 2934 } 2935 2936 return ConservativeResult; 2937 } 2938 2939 /// getSignedRange - Determine the signed range for a particular SCEV. 2940 /// 2941 ConstantRange 2942 ScalarEvolution::getSignedRange(const SCEV *S) { 2943 2944 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) 2945 return ConstantRange(C->getValue()->getValue()); 2946 2947 unsigned BitWidth = getTypeSizeInBits(S->getType()); 2948 ConstantRange ConservativeResult(BitWidth, /*isFullSet=*/true); 2949 2950 // If the value has known zeros, the maximum signed value will have those 2951 // known zeros as well. 2952 uint32_t TZ = GetMinTrailingZeros(S); 2953 if (TZ != 0) 2954 ConservativeResult = 2955 ConstantRange(APInt::getSignedMinValue(BitWidth), 2956 APInt::getSignedMaxValue(BitWidth).ashr(TZ).shl(TZ) + 1); 2957 2958 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) { 2959 ConstantRange X = getSignedRange(Add->getOperand(0)); 2960 for (unsigned i = 1, e = Add->getNumOperands(); i != e; ++i) 2961 X = X.add(getSignedRange(Add->getOperand(i))); 2962 return ConservativeResult.intersectWith(X); 2963 } 2964 2965 if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) { 2966 ConstantRange X = getSignedRange(Mul->getOperand(0)); 2967 for (unsigned i = 1, e = Mul->getNumOperands(); i != e; ++i) 2968 X = X.multiply(getSignedRange(Mul->getOperand(i))); 2969 return ConservativeResult.intersectWith(X); 2970 } 2971 2972 if (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(S)) { 2973 ConstantRange X = getSignedRange(SMax->getOperand(0)); 2974 for (unsigned i = 1, e = SMax->getNumOperands(); i != e; ++i) 2975 X = X.smax(getSignedRange(SMax->getOperand(i))); 2976 return ConservativeResult.intersectWith(X); 2977 } 2978 2979 if (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(S)) { 2980 ConstantRange X = getSignedRange(UMax->getOperand(0)); 2981 for (unsigned i = 1, e = UMax->getNumOperands(); i != e; ++i) 2982 X = X.umax(getSignedRange(UMax->getOperand(i))); 2983 return ConservativeResult.intersectWith(X); 2984 } 2985 2986 if (const SCEVUDivExpr *UDiv = dyn_cast<SCEVUDivExpr>(S)) { 2987 ConstantRange X = getSignedRange(UDiv->getLHS()); 2988 ConstantRange Y = getSignedRange(UDiv->getRHS()); 2989 return ConservativeResult.intersectWith(X.udiv(Y)); 2990 } 2991 2992 if (const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(S)) { 2993 ConstantRange X = getSignedRange(ZExt->getOperand()); 2994 return ConservativeResult.intersectWith(X.zeroExtend(BitWidth)); 2995 } 2996 2997 if (const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(S)) { 2998 ConstantRange X = getSignedRange(SExt->getOperand()); 2999 return ConservativeResult.intersectWith(X.signExtend(BitWidth)); 3000 } 3001 3002 if (const SCEVTruncateExpr *Trunc = dyn_cast<SCEVTruncateExpr>(S)) { 3003 ConstantRange X = getSignedRange(Trunc->getOperand()); 3004 return ConservativeResult.intersectWith(X.truncate(BitWidth)); 3005 } 3006 3007 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(S)) { 3008 // If there's no signed wrap, and all the operands have the same sign or 3009 // zero, the value won't ever change sign. 3010 if (AddRec->hasNoSignedWrap()) { 3011 bool AllNonNeg = true; 3012 bool AllNonPos = true; 3013 for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) { 3014 if (!isKnownNonNegative(AddRec->getOperand(i))) AllNonNeg = false; 3015 if (!isKnownNonPositive(AddRec->getOperand(i))) AllNonPos = false; 3016 } 3017 if (AllNonNeg) 3018 ConservativeResult = ConservativeResult.intersectWith( 3019 ConstantRange(APInt(BitWidth, 0), 3020 APInt::getSignedMinValue(BitWidth))); 3021 else if (AllNonPos) 3022 ConservativeResult = ConservativeResult.intersectWith( 3023 ConstantRange(APInt::getSignedMinValue(BitWidth), 3024 APInt(BitWidth, 1))); 3025 } 3026 3027 // TODO: non-affine addrec 3028 if (AddRec->isAffine()) { 3029 const Type *Ty = AddRec->getType(); 3030 const SCEV *MaxBECount = getMaxBackedgeTakenCount(AddRec->getLoop()); 3031 if (!isa<SCEVCouldNotCompute>(MaxBECount) && 3032 getTypeSizeInBits(MaxBECount->getType()) <= BitWidth) { 3033 MaxBECount = getNoopOrZeroExtend(MaxBECount, Ty); 3034 3035 const SCEV *Start = AddRec->getStart(); 3036 const SCEV *End = AddRec->evaluateAtIteration(MaxBECount, *this); 3037 3038 // Check for overflow. 3039 if (!AddRec->hasNoSignedWrap()) 3040 return ConservativeResult; 3041 3042 ConstantRange StartRange = getSignedRange(Start); 3043 ConstantRange EndRange = getSignedRange(End); 3044 APInt Min = APIntOps::smin(StartRange.getSignedMin(), 3045 EndRange.getSignedMin()); 3046 APInt Max = APIntOps::smax(StartRange.getSignedMax(), 3047 EndRange.getSignedMax()); 3048 if (Min.isMinSignedValue() && Max.isMaxSignedValue()) 3049 return ConservativeResult; 3050 return ConservativeResult.intersectWith(ConstantRange(Min, Max+1)); 3051 } 3052 } 3053 3054 return ConservativeResult; 3055 } 3056 3057 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) { 3058 // For a SCEVUnknown, ask ValueTracking. 3059 if (!U->getValue()->getType()->isInteger() && !TD) 3060 return ConservativeResult; 3061 unsigned NS = ComputeNumSignBits(U->getValue(), TD); 3062 if (NS == 1) 3063 return ConservativeResult; 3064 return ConservativeResult.intersectWith( 3065 ConstantRange(APInt::getSignedMinValue(BitWidth).ashr(NS - 1), 3066 APInt::getSignedMaxValue(BitWidth).ashr(NS - 1)+1)); 3067 } 3068 3069 return ConservativeResult; 3070 } 3071 3072 /// createSCEV - We know that there is no SCEV for the specified value. 3073 /// Analyze the expression. 3074 /// 3075 const SCEV *ScalarEvolution::createSCEV(Value *V) { 3076 if (!isSCEVable(V->getType())) 3077 return getUnknown(V); 3078 3079 unsigned Opcode = Instruction::UserOp1; 3080 if (Instruction *I = dyn_cast<Instruction>(V)) 3081 Opcode = I->getOpcode(); 3082 else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) 3083 Opcode = CE->getOpcode(); 3084 else if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) 3085 return getConstant(CI); 3086 else if (isa<ConstantPointerNull>(V)) 3087 return getIntegerSCEV(0, V->getType()); 3088 else if (isa<UndefValue>(V)) 3089 return getIntegerSCEV(0, V->getType()); 3090 else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) 3091 return GA->mayBeOverridden() ? getUnknown(V) : getSCEV(GA->getAliasee()); 3092 else 3093 return getUnknown(V); 3094 3095 Operator *U = cast<Operator>(V); 3096 switch (Opcode) { 3097 case Instruction::Add: 3098 // Don't transfer the NSW and NUW bits from the Add instruction to the 3099 // Add expression, because the Instruction may be guarded by control 3100 // flow and the no-overflow bits may not be valid for the expression in 3101 // any context. 3102 return getAddExpr(getSCEV(U->getOperand(0)), 3103 getSCEV(U->getOperand(1))); 3104 case Instruction::Mul: 3105 // Don't transfer the NSW and NUW bits from the Mul instruction to the 3106 // Mul expression, as with Add. 3107 return getMulExpr(getSCEV(U->getOperand(0)), 3108 getSCEV(U->getOperand(1))); 3109 case Instruction::UDiv: 3110 return getUDivExpr(getSCEV(U->getOperand(0)), 3111 getSCEV(U->getOperand(1))); 3112 case Instruction::Sub: 3113 return getMinusSCEV(getSCEV(U->getOperand(0)), 3114 getSCEV(U->getOperand(1))); 3115 case Instruction::And: 3116 // For an expression like x&255 that merely masks off the high bits, 3117 // use zext(trunc(x)) as the SCEV expression. 3118 if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1))) { 3119 if (CI->isNullValue()) 3120 return getSCEV(U->getOperand(1)); 3121 if (CI->isAllOnesValue()) 3122 return getSCEV(U->getOperand(0)); 3123 const APInt &A = CI->getValue(); 3124 3125 // Instcombine's ShrinkDemandedConstant may strip bits out of 3126 // constants, obscuring what would otherwise be a low-bits mask. 3127 // Use ComputeMaskedBits to compute what ShrinkDemandedConstant 3128 // knew about to reconstruct a low-bits mask value. 3129 unsigned LZ = A.countLeadingZeros(); 3130 unsigned BitWidth = A.getBitWidth(); 3131 APInt AllOnes = APInt::getAllOnesValue(BitWidth); 3132 APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0); 3133 ComputeMaskedBits(U->getOperand(0), AllOnes, KnownZero, KnownOne, TD); 3134 3135 APInt EffectiveMask = APInt::getLowBitsSet(BitWidth, BitWidth - LZ); 3136 3137 if (LZ != 0 && !((~A & ~KnownZero) & EffectiveMask)) 3138 return 3139 getZeroExtendExpr(getTruncateExpr(getSCEV(U->getOperand(0)), 3140 IntegerType::get(getContext(), BitWidth - LZ)), 3141 U->getType()); 3142 } 3143 break; 3144 3145 case Instruction::Or: 3146 // If the RHS of the Or is a constant, we may have something like: 3147 // X*4+1 which got turned into X*4|1. Handle this as an Add so loop 3148 // optimizations will transparently handle this case. 3149 // 3150 // In order for this transformation to be safe, the LHS must be of the 3151 // form X*(2^n) and the Or constant must be less than 2^n. 3152 if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1))) { 3153 const SCEV *LHS = getSCEV(U->getOperand(0)); 3154 const APInt &CIVal = CI->getValue(); 3155 if (GetMinTrailingZeros(LHS) >= 3156 (CIVal.getBitWidth() - CIVal.countLeadingZeros())) { 3157 // Build a plain add SCEV. 3158 const SCEV *S = getAddExpr(LHS, getSCEV(CI)); 3159 // If the LHS of the add was an addrec and it has no-wrap flags, 3160 // transfer the no-wrap flags, since an or won't introduce a wrap. 3161 if (const SCEVAddRecExpr *NewAR = dyn_cast<SCEVAddRecExpr>(S)) { 3162 const SCEVAddRecExpr *OldAR = cast<SCEVAddRecExpr>(LHS); 3163 if (OldAR->hasNoUnsignedWrap()) 3164 const_cast<SCEVAddRecExpr *>(NewAR)->setHasNoUnsignedWrap(true); 3165 if (OldAR->hasNoSignedWrap()) 3166 const_cast<SCEVAddRecExpr *>(NewAR)->setHasNoSignedWrap(true); 3167 } 3168 return S; 3169 } 3170 } 3171 break; 3172 case Instruction::Xor: 3173 if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1))) { 3174 // If the RHS of the xor is a signbit, then this is just an add. 3175 // Instcombine turns add of signbit into xor as a strength reduction step. 3176 if (CI->getValue().isSignBit()) 3177 return getAddExpr(getSCEV(U->getOperand(0)), 3178 getSCEV(U->getOperand(1))); 3179 3180 // If the RHS of xor is -1, then this is a not operation. 3181 if (CI->isAllOnesValue()) 3182 return getNotSCEV(getSCEV(U->getOperand(0))); 3183 3184 // Model xor(and(x, C), C) as and(~x, C), if C is a low-bits mask. 3185 // This is a variant of the check for xor with -1, and it handles 3186 // the case where instcombine has trimmed non-demanded bits out 3187 // of an xor with -1. 3188 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(U->getOperand(0))) 3189 if (ConstantInt *LCI = dyn_cast<ConstantInt>(BO->getOperand(1))) 3190 if (BO->getOpcode() == Instruction::And && 3191 LCI->getValue() == CI->getValue()) 3192 if (const SCEVZeroExtendExpr *Z = 3193 dyn_cast<SCEVZeroExtendExpr>(getSCEV(U->getOperand(0)))) { 3194 const Type *UTy = U->getType(); 3195 const SCEV *Z0 = Z->getOperand(); 3196 const Type *Z0Ty = Z0->getType(); 3197 unsigned Z0TySize = getTypeSizeInBits(Z0Ty); 3198 3199 // If C is a low-bits mask, the zero extend is zerving to 3200 // mask off the high bits. Complement the operand and 3201 // re-apply the zext. 3202 if (APIntOps::isMask(Z0TySize, CI->getValue())) 3203 return getZeroExtendExpr(getNotSCEV(Z0), UTy); 3204 3205 // If C is a single bit, it may be in the sign-bit position 3206 // before the zero-extend. In this case, represent the xor 3207 // using an add, which is equivalent, and re-apply the zext. 3208 APInt Trunc = APInt(CI->getValue()).trunc(Z0TySize); 3209 if (APInt(Trunc).zext(getTypeSizeInBits(UTy)) == CI->getValue() && 3210 Trunc.isSignBit()) 3211 return getZeroExtendExpr(getAddExpr(Z0, getConstant(Trunc)), 3212 UTy); 3213 } 3214 } 3215 break; 3216 3217 case Instruction::Shl: 3218 // Turn shift left of a constant amount into a multiply. 3219 if (ConstantInt *SA = dyn_cast<ConstantInt>(U->getOperand(1))) { 3220 uint32_t BitWidth = cast<IntegerType>(U->getType())->getBitWidth(); 3221 Constant *X = ConstantInt::get(getContext(), 3222 APInt(BitWidth, 1).shl(SA->getLimitedValue(BitWidth))); 3223 return getMulExpr(getSCEV(U->getOperand(0)), getSCEV(X)); 3224 } 3225 break; 3226 3227 case Instruction::LShr: 3228 // Turn logical shift right of a constant into a unsigned divide. 3229 if (ConstantInt *SA = dyn_cast<ConstantInt>(U->getOperand(1))) { 3230 uint32_t BitWidth = cast<IntegerType>(U->getType())->getBitWidth(); 3231 Constant *X = ConstantInt::get(getContext(), 3232 APInt(BitWidth, 1).shl(SA->getLimitedValue(BitWidth))); 3233 return getUDivExpr(getSCEV(U->getOperand(0)), getSCEV(X)); 3234 } 3235 break; 3236 3237 case Instruction::AShr: 3238 // For a two-shift sext-inreg, use sext(trunc(x)) as the SCEV expression. 3239 if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1))) 3240 if (Instruction *L = dyn_cast<Instruction>(U->getOperand(0))) 3241 if (L->getOpcode() == Instruction::Shl && 3242 L->getOperand(1) == U->getOperand(1)) { 3243 unsigned BitWidth = getTypeSizeInBits(U->getType()); 3244 uint64_t Amt = BitWidth - CI->getZExtValue(); 3245 if (Amt == BitWidth) 3246 return getSCEV(L->getOperand(0)); // shift by zero --> noop 3247 if (Amt > BitWidth) 3248 return getIntegerSCEV(0, U->getType()); // value is undefined 3249 return 3250 getSignExtendExpr(getTruncateExpr(getSCEV(L->getOperand(0)), 3251 IntegerType::get(getContext(), Amt)), 3252 U->getType()); 3253 } 3254 break; 3255 3256 case Instruction::Trunc: 3257 return getTruncateExpr(getSCEV(U->getOperand(0)), U->getType()); 3258 3259 case Instruction::ZExt: 3260 return getZeroExtendExpr(getSCEV(U->getOperand(0)), U->getType()); 3261 3262 case Instruction::SExt: 3263 return getSignExtendExpr(getSCEV(U->getOperand(0)), U->getType()); 3264 3265 case Instruction::BitCast: 3266 // BitCasts are no-op casts so we just eliminate the cast. 3267 if (isSCEVable(U->getType()) && isSCEVable(U->getOperand(0)->getType())) 3268 return getSCEV(U->getOperand(0)); 3269 break; 3270 3271 // It's tempting to handle inttoptr and ptrtoint as no-ops, however this can 3272 // lead to pointer expressions which cannot safely be expanded to GEPs, 3273 // because ScalarEvolution doesn't respect the GEP aliasing rules when 3274 // simplifying integer expressions. 3275 3276 case Instruction::GetElementPtr: 3277 return createNodeForGEP(cast<GEPOperator>(U)); 3278 3279 case Instruction::PHI: 3280 return createNodeForPHI(cast<PHINode>(U)); 3281 3282 case Instruction::Select: 3283 // This could be a smax or umax that was lowered earlier. 3284 // Try to recover it. 3285 if (ICmpInst *ICI = dyn_cast<ICmpInst>(U->getOperand(0))) { 3286 Value *LHS = ICI->getOperand(0); 3287 Value *RHS = ICI->getOperand(1); 3288 switch (ICI->getPredicate()) { 3289 case ICmpInst::ICMP_SLT: 3290 case ICmpInst::ICMP_SLE: 3291 std::swap(LHS, RHS); 3292 // fall through 3293 case ICmpInst::ICMP_SGT: 3294 case ICmpInst::ICMP_SGE: 3295 if (LHS == U->getOperand(1) && RHS == U->getOperand(2)) 3296 return getSMaxExpr(getSCEV(LHS), getSCEV(RHS)); 3297 else if (LHS == U->getOperand(2) && RHS == U->getOperand(1)) 3298 return getSMinExpr(getSCEV(LHS), getSCEV(RHS)); 3299 break; 3300 case ICmpInst::ICMP_ULT: 3301 case ICmpInst::ICMP_ULE: 3302 std::swap(LHS, RHS); 3303 // fall through 3304 case ICmpInst::ICMP_UGT: 3305 case ICmpInst::ICMP_UGE: 3306 if (LHS == U->getOperand(1) && RHS == U->getOperand(2)) 3307 return getUMaxExpr(getSCEV(LHS), getSCEV(RHS)); 3308 else if (LHS == U->getOperand(2) && RHS == U->getOperand(1)) 3309 return getUMinExpr(getSCEV(LHS), getSCEV(RHS)); 3310 break; 3311 case ICmpInst::ICMP_NE: 3312 // n != 0 ? n : 1 -> umax(n, 1) 3313 if (LHS == U->getOperand(1) && 3314 isa<ConstantInt>(U->getOperand(2)) && 3315 cast<ConstantInt>(U->getOperand(2))->isOne() && 3316 isa<ConstantInt>(RHS) && 3317 cast<ConstantInt>(RHS)->isZero()) 3318 return getUMaxExpr(getSCEV(LHS), getSCEV(U->getOperand(2))); 3319 break; 3320 case ICmpInst::ICMP_EQ: 3321 // n == 0 ? 1 : n -> umax(n, 1) 3322 if (LHS == U->getOperand(2) && 3323 isa<ConstantInt>(U->getOperand(1)) && 3324 cast<ConstantInt>(U->getOperand(1))->isOne() && 3325 isa<ConstantInt>(RHS) && 3326 cast<ConstantInt>(RHS)->isZero()) 3327 return getUMaxExpr(getSCEV(LHS), getSCEV(U->getOperand(1))); 3328 break; 3329 default: 3330 break; 3331 } 3332 } 3333 3334 default: // We cannot analyze this expression. 3335 break; 3336 } 3337 3338 return getUnknown(V); 3339 } 3340 3341 3342 3343 //===----------------------------------------------------------------------===// 3344 // Iteration Count Computation Code 3345 // 3346 3347 /// getBackedgeTakenCount - If the specified loop has a predictable 3348 /// backedge-taken count, return it, otherwise return a SCEVCouldNotCompute 3349 /// object. The backedge-taken count is the number of times the loop header 3350 /// will be branched to from within the loop. This is one less than the 3351 /// trip count of the loop, since it doesn't count the first iteration, 3352 /// when the header is branched to from outside the loop. 3353 /// 3354 /// Note that it is not valid to call this method on a loop without a 3355 /// loop-invariant backedge-taken count (see 3356 /// hasLoopInvariantBackedgeTakenCount). 3357 /// 3358 const SCEV *ScalarEvolution::getBackedgeTakenCount(const Loop *L) { 3359 return getBackedgeTakenInfo(L).Exact; 3360 } 3361 3362 /// getMaxBackedgeTakenCount - Similar to getBackedgeTakenCount, except 3363 /// return the least SCEV value that is known never to be less than the 3364 /// actual backedge taken count. 3365 const SCEV *ScalarEvolution::getMaxBackedgeTakenCount(const Loop *L) { 3366 return getBackedgeTakenInfo(L).Max; 3367 } 3368 3369 /// PushLoopPHIs - Push PHI nodes in the header of the given loop 3370 /// onto the given Worklist. 3371 static void 3372 PushLoopPHIs(const Loop *L, SmallVectorImpl<Instruction *> &Worklist) { 3373 BasicBlock *Header = L->getHeader(); 3374 3375 // Push all Loop-header PHIs onto the Worklist stack. 3376 for (BasicBlock::iterator I = Header->begin(); 3377 PHINode *PN = dyn_cast<PHINode>(I); ++I) 3378 Worklist.push_back(PN); 3379 } 3380 3381 const ScalarEvolution::BackedgeTakenInfo & 3382 ScalarEvolution::getBackedgeTakenInfo(const Loop *L) { 3383 // Initially insert a CouldNotCompute for this loop. If the insertion 3384 // succeeds, procede to actually compute a backedge-taken count and 3385 // update the value. The temporary CouldNotCompute value tells SCEV 3386 // code elsewhere that it shouldn't attempt to request a new 3387 // backedge-taken count, which could result in infinite recursion. 3388 std::pair<std::map<const Loop *, BackedgeTakenInfo>::iterator, bool> Pair = 3389 BackedgeTakenCounts.insert(std::make_pair(L, getCouldNotCompute())); 3390 if (Pair.second) { 3391 BackedgeTakenInfo BECount = ComputeBackedgeTakenCount(L); 3392 if (BECount.Exact != getCouldNotCompute()) { 3393 assert(BECount.Exact->isLoopInvariant(L) && 3394 BECount.Max->isLoopInvariant(L) && 3395 "Computed backedge-taken count isn't loop invariant for loop!"); 3396 ++NumTripCountsComputed; 3397 3398 // Update the value in the map. 3399 Pair.first->second = BECount; 3400 } else { 3401 if (BECount.Max != getCouldNotCompute()) 3402 // Update the value in the map. 3403 Pair.first->second = BECount; 3404 if (isa<PHINode>(L->getHeader()->begin())) 3405 // Only count loops that have phi nodes as not being computable. 3406 ++NumTripCountsNotComputed; 3407 } 3408 3409 // Now that we know more about the trip count for this loop, forget any 3410 // existing SCEV values for PHI nodes in this loop since they are only 3411 // conservative estimates made without the benefit of trip count 3412 // information. This is similar to the code in forgetLoop, except that 3413 // it handles SCEVUnknown PHI nodes specially. 3414 if (BECount.hasAnyInfo()) { 3415 SmallVector<Instruction *, 16> Worklist; 3416 PushLoopPHIs(L, Worklist); 3417 3418 SmallPtrSet<Instruction *, 8> Visited; 3419 while (!Worklist.empty()) { 3420 Instruction *I = Worklist.pop_back_val(); 3421 if (!Visited.insert(I)) continue; 3422 3423 std::map<SCEVCallbackVH, const SCEV *>::iterator It = 3424 Scalars.find(static_cast<Value *>(I)); 3425 if (It != Scalars.end()) { 3426 // SCEVUnknown for a PHI either means that it has an unrecognized 3427 // structure, or it's a PHI that's in the progress of being computed 3428 // by createNodeForPHI. In the former case, additional loop trip 3429 // count information isn't going to change anything. In the later 3430 // case, createNodeForPHI will perform the necessary updates on its 3431 // own when it gets to that point. 3432 if (!isa<PHINode>(I) || !isa<SCEVUnknown>(It->second)) { 3433 ValuesAtScopes.erase(It->second); 3434 Scalars.erase(It); 3435 } 3436 if (PHINode *PN = dyn_cast<PHINode>(I)) 3437 ConstantEvolutionLoopExitValue.erase(PN); 3438 } 3439 3440 PushDefUseChildren(I, Worklist); 3441 } 3442 } 3443 } 3444 return Pair.first->second; 3445 } 3446 3447 /// forgetLoop - This method should be called by the client when it has 3448 /// changed a loop in a way that may effect ScalarEvolution's ability to 3449 /// compute a trip count, or if the loop is deleted. 3450 void ScalarEvolution::forgetLoop(const Loop *L) { 3451 // Drop any stored trip count value. 3452 BackedgeTakenCounts.erase(L); 3453 3454 // Drop information about expressions based on loop-header PHIs. 3455 SmallVector<Instruction *, 16> Worklist; 3456 PushLoopPHIs(L, Worklist); 3457 3458 SmallPtrSet<Instruction *, 8> Visited; 3459 while (!Worklist.empty()) { 3460 Instruction *I = Worklist.pop_back_val(); 3461 if (!Visited.insert(I)) continue; 3462 3463 std::map<SCEVCallbackVH, const SCEV *>::iterator It = 3464 Scalars.find(static_cast<Value *>(I)); 3465 if (It != Scalars.end()) { 3466 ValuesAtScopes.erase(It->second); 3467 Scalars.erase(It); 3468 if (PHINode *PN = dyn_cast<PHINode>(I)) 3469 ConstantEvolutionLoopExitValue.erase(PN); 3470 } 3471 3472 PushDefUseChildren(I, Worklist); 3473 } 3474 } 3475 3476 /// ComputeBackedgeTakenCount - Compute the number of times the backedge 3477 /// of the specified loop will execute. 3478 ScalarEvolution::BackedgeTakenInfo 3479 ScalarEvolution::ComputeBackedgeTakenCount(const Loop *L) { 3480 SmallVector<BasicBlock *, 8> ExitingBlocks; 3481 L->getExitingBlocks(ExitingBlocks); 3482 3483 // Examine all exits and pick the most conservative values. 3484 const SCEV *BECount = getCouldNotCompute(); 3485 const SCEV *MaxBECount = getCouldNotCompute(); 3486 bool CouldNotComputeBECount = false; 3487 for (unsigned i = 0, e = ExitingBlocks.size(); i != e; ++i) { 3488 BackedgeTakenInfo NewBTI = 3489 ComputeBackedgeTakenCountFromExit(L, ExitingBlocks[i]); 3490 3491 if (NewBTI.Exact == getCouldNotCompute()) { 3492 // We couldn't compute an exact value for this exit, so 3493 // we won't be able to compute an exact value for the loop. 3494 CouldNotComputeBECount = true; 3495 BECount = getCouldNotCompute(); 3496 } else if (!CouldNotComputeBECount) { 3497 if (BECount == getCouldNotCompute()) 3498 BECount = NewBTI.Exact; 3499 else 3500 BECount = getUMinFromMismatchedTypes(BECount, NewBTI.Exact); 3501 } 3502 if (MaxBECount == getCouldNotCompute()) 3503 MaxBECount = NewBTI.Max; 3504 else if (NewBTI.Max != getCouldNotCompute()) 3505 MaxBECount = getUMinFromMismatchedTypes(MaxBECount, NewBTI.Max); 3506 } 3507 3508 return BackedgeTakenInfo(BECount, MaxBECount); 3509 } 3510 3511 /// ComputeBackedgeTakenCountFromExit - Compute the number of times the backedge 3512 /// of the specified loop will execute if it exits via the specified block. 3513 ScalarEvolution::BackedgeTakenInfo 3514 ScalarEvolution::ComputeBackedgeTakenCountFromExit(const Loop *L, 3515 BasicBlock *ExitingBlock) { 3516 3517 // Okay, we've chosen an exiting block. See what condition causes us to 3518 // exit at this block. 3519 // 3520 // FIXME: we should be able to handle switch instructions (with a single exit) 3521 BranchInst *ExitBr = dyn_cast<BranchInst>(ExitingBlock->getTerminator()); 3522 if (ExitBr == 0) return getCouldNotCompute(); 3523 assert(ExitBr->isConditional() && "If unconditional, it can't be in loop!"); 3524 3525 // At this point, we know we have a conditional branch that determines whether 3526 // the loop is exited. However, we don't know if the branch is executed each 3527 // time through the loop. If not, then the execution count of the branch will 3528 // not be equal to the trip count of the loop. 3529 // 3530 // Currently we check for this by checking to see if the Exit branch goes to 3531 // the loop header. If so, we know it will always execute the same number of 3532 // times as the loop. We also handle the case where the exit block *is* the 3533 // loop header. This is common for un-rotated loops. 3534 // 3535 // If both of those tests fail, walk up the unique predecessor chain to the 3536 // header, stopping if there is an edge that doesn't exit the loop. If the 3537 // header is reached, the execution count of the branch will be equal to the 3538 // trip count of the loop. 3539 // 3540 // More extensive analysis could be done to handle more cases here. 3541 // 3542 if (ExitBr->getSuccessor(0) != L->getHeader() && 3543 ExitBr->getSuccessor(1) != L->getHeader() && 3544 ExitBr->getParent() != L->getHeader()) { 3545 // The simple checks failed, try climbing the unique predecessor chain 3546 // up to the header. 3547 bool Ok = false; 3548 for (BasicBlock *BB = ExitBr->getParent(); BB; ) { 3549 BasicBlock *Pred = BB->getUniquePredecessor(); 3550 if (!Pred) 3551 return getCouldNotCompute(); 3552 TerminatorInst *PredTerm = Pred->getTerminator(); 3553 for (unsigned i = 0, e = PredTerm->getNumSuccessors(); i != e; ++i) { 3554 BasicBlock *PredSucc = PredTerm->getSuccessor(i); 3555 if (PredSucc == BB) 3556 continue; 3557 // If the predecessor has a successor that isn't BB and isn't 3558 // outside the loop, assume the worst. 3559 if (L->contains(PredSucc)) 3560 return getCouldNotCompute(); 3561 } 3562 if (Pred == L->getHeader()) { 3563 Ok = true; 3564 break; 3565 } 3566 BB = Pred; 3567 } 3568 if (!Ok) 3569 return getCouldNotCompute(); 3570 } 3571 3572 // Procede to the next level to examine the exit condition expression. 3573 return ComputeBackedgeTakenCountFromExitCond(L, ExitBr->getCondition(), 3574 ExitBr->getSuccessor(0), 3575 ExitBr->getSuccessor(1)); 3576 } 3577 3578 /// ComputeBackedgeTakenCountFromExitCond - Compute the number of times the 3579 /// backedge of the specified loop will execute if its exit condition 3580 /// were a conditional branch of ExitCond, TBB, and FBB. 3581 ScalarEvolution::BackedgeTakenInfo 3582 ScalarEvolution::ComputeBackedgeTakenCountFromExitCond(const Loop *L, 3583 Value *ExitCond, 3584 BasicBlock *TBB, 3585 BasicBlock *FBB) { 3586 // Check if the controlling expression for this loop is an And or Or. 3587 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(ExitCond)) { 3588 if (BO->getOpcode() == Instruction::And) { 3589 // Recurse on the operands of the and. 3590 BackedgeTakenInfo BTI0 = 3591 ComputeBackedgeTakenCountFromExitCond(L, BO->getOperand(0), TBB, FBB); 3592 BackedgeTakenInfo BTI1 = 3593 ComputeBackedgeTakenCountFromExitCond(L, BO->getOperand(1), TBB, FBB); 3594 const SCEV *BECount = getCouldNotCompute(); 3595 const SCEV *MaxBECount = getCouldNotCompute(); 3596 if (L->contains(TBB)) { 3597 // Both conditions must be true for the loop to continue executing. 3598 // Choose the less conservative count. 3599 if (BTI0.Exact == getCouldNotCompute() || 3600 BTI1.Exact == getCouldNotCompute()) 3601 BECount = getCouldNotCompute(); 3602 else 3603 BECount = getUMinFromMismatchedTypes(BTI0.Exact, BTI1.Exact); 3604 if (BTI0.Max == getCouldNotCompute()) 3605 MaxBECount = BTI1.Max; 3606 else if (BTI1.Max == getCouldNotCompute()) 3607 MaxBECount = BTI0.Max; 3608 else 3609 MaxBECount = getUMinFromMismatchedTypes(BTI0.Max, BTI1.Max); 3610 } else { 3611 // Both conditions must be true for the loop to exit. 3612 assert(L->contains(FBB) && "Loop block has no successor in loop!"); 3613 if (BTI0.Exact != getCouldNotCompute() && 3614 BTI1.Exact != getCouldNotCompute()) 3615 BECount = getUMaxFromMismatchedTypes(BTI0.Exact, BTI1.Exact); 3616 if (BTI0.Max != getCouldNotCompute() && 3617 BTI1.Max != getCouldNotCompute()) 3618 MaxBECount = getUMaxFromMismatchedTypes(BTI0.Max, BTI1.Max); 3619 } 3620 3621 return BackedgeTakenInfo(BECount, MaxBECount); 3622 } 3623 if (BO->getOpcode() == Instruction::Or) { 3624 // Recurse on the operands of the or. 3625 BackedgeTakenInfo BTI0 = 3626 ComputeBackedgeTakenCountFromExitCond(L, BO->getOperand(0), TBB, FBB); 3627 BackedgeTakenInfo BTI1 = 3628 ComputeBackedgeTakenCountFromExitCond(L, BO->getOperand(1), TBB, FBB); 3629 const SCEV *BECount = getCouldNotCompute(); 3630 const SCEV *MaxBECount = getCouldNotCompute(); 3631 if (L->contains(FBB)) { 3632 // Both conditions must be false for the loop to continue executing. 3633 // Choose the less conservative count. 3634 if (BTI0.Exact == getCouldNotCompute() || 3635 BTI1.Exact == getCouldNotCompute()) 3636 BECount = getCouldNotCompute(); 3637 else 3638 BECount = getUMinFromMismatchedTypes(BTI0.Exact, BTI1.Exact); 3639 if (BTI0.Max == getCouldNotCompute()) 3640 MaxBECount = BTI1.Max; 3641 else if (BTI1.Max == getCouldNotCompute()) 3642 MaxBECount = BTI0.Max; 3643 else 3644 MaxBECount = getUMinFromMismatchedTypes(BTI0.Max, BTI1.Max); 3645 } else { 3646 // Both conditions must be false for the loop to exit. 3647 assert(L->contains(TBB) && "Loop block has no successor in loop!"); 3648 if (BTI0.Exact != getCouldNotCompute() && 3649 BTI1.Exact != getCouldNotCompute()) 3650 BECount = getUMaxFromMismatchedTypes(BTI0.Exact, BTI1.Exact); 3651 if (BTI0.Max != getCouldNotCompute() && 3652 BTI1.Max != getCouldNotCompute()) 3653 MaxBECount = getUMaxFromMismatchedTypes(BTI0.Max, BTI1.Max); 3654 } 3655 3656 return BackedgeTakenInfo(BECount, MaxBECount); 3657 } 3658 } 3659 3660 // With an icmp, it may be feasible to compute an exact backedge-taken count. 3661 // Procede to the next level to examine the icmp. 3662 if (ICmpInst *ExitCondICmp = dyn_cast<ICmpInst>(ExitCond)) 3663 return ComputeBackedgeTakenCountFromExitCondICmp(L, ExitCondICmp, TBB, FBB); 3664 3665 // If it's not an integer or pointer comparison then compute it the hard way. 3666 return ComputeBackedgeTakenCountExhaustively(L, ExitCond, !L->contains(TBB)); 3667 } 3668 3669 /// ComputeBackedgeTakenCountFromExitCondICmp - Compute the number of times the 3670 /// backedge of the specified loop will execute if its exit condition 3671 /// were a conditional branch of the ICmpInst ExitCond, TBB, and FBB. 3672 ScalarEvolution::BackedgeTakenInfo 3673 ScalarEvolution::ComputeBackedgeTakenCountFromExitCondICmp(const Loop *L, 3674 ICmpInst *ExitCond, 3675 BasicBlock *TBB, 3676 BasicBlock *FBB) { 3677 3678 // If the condition was exit on true, convert the condition to exit on false 3679 ICmpInst::Predicate Cond; 3680 if (!L->contains(FBB)) 3681 Cond = ExitCond->getPredicate(); 3682 else 3683 Cond = ExitCond->getInversePredicate(); 3684 3685 // Handle common loops like: for (X = "string"; *X; ++X) 3686 if (LoadInst *LI = dyn_cast<LoadInst>(ExitCond->getOperand(0))) 3687 if (Constant *RHS = dyn_cast<Constant>(ExitCond->getOperand(1))) { 3688 const SCEV *ItCnt = 3689 ComputeLoadConstantCompareBackedgeTakenCount(LI, RHS, L, Cond); 3690 if (!isa<SCEVCouldNotCompute>(ItCnt)) { 3691 unsigned BitWidth = getTypeSizeInBits(ItCnt->getType()); 3692 return BackedgeTakenInfo(ItCnt, 3693 isa<SCEVConstant>(ItCnt) ? ItCnt : 3694 getConstant(APInt::getMaxValue(BitWidth)-1)); 3695 } 3696 } 3697 3698 const SCEV *LHS = getSCEV(ExitCond->getOperand(0)); 3699 const SCEV *RHS = getSCEV(ExitCond->getOperand(1)); 3700 3701 // Try to evaluate any dependencies out of the loop. 3702 LHS = getSCEVAtScope(LHS, L); 3703 RHS = getSCEVAtScope(RHS, L); 3704 3705 // At this point, we would like to compute how many iterations of the 3706 // loop the predicate will return true for these inputs. 3707 if (LHS->isLoopInvariant(L) && !RHS->isLoopInvariant(L)) { 3708 // If there is a loop-invariant, force it into the RHS. 3709 std::swap(LHS, RHS); 3710 Cond = ICmpInst::getSwappedPredicate(Cond); 3711 } 3712 3713 // If we have a comparison of a chrec against a constant, try to use value 3714 // ranges to answer this query. 3715 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) 3716 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(LHS)) 3717 if (AddRec->getLoop() == L) { 3718 // Form the constant range. 3719 ConstantRange CompRange( 3720 ICmpInst::makeConstantRange(Cond, RHSC->getValue()->getValue())); 3721 3722 const SCEV *Ret = AddRec->getNumIterationsInRange(CompRange, *this); 3723 if (!isa<SCEVCouldNotCompute>(Ret)) return Ret; 3724 } 3725 3726 switch (Cond) { 3727 case ICmpInst::ICMP_NE: { // while (X != Y) 3728 // Convert to: while (X-Y != 0) 3729 const SCEV *TC = HowFarToZero(getMinusSCEV(LHS, RHS), L); 3730 if (!isa<SCEVCouldNotCompute>(TC)) return TC; 3731 break; 3732 } 3733 case ICmpInst::ICMP_EQ: { // while (X == Y) 3734 // Convert to: while (X-Y == 0) 3735 const SCEV *TC = HowFarToNonZero(getMinusSCEV(LHS, RHS), L); 3736 if (!isa<SCEVCouldNotCompute>(TC)) return TC; 3737 break; 3738 } 3739 case ICmpInst::ICMP_SLT: { 3740 BackedgeTakenInfo BTI = HowManyLessThans(LHS, RHS, L, true); 3741 if (BTI.hasAnyInfo()) return BTI; 3742 break; 3743 } 3744 case ICmpInst::ICMP_SGT: { 3745 BackedgeTakenInfo BTI = HowManyLessThans(getNotSCEV(LHS), 3746 getNotSCEV(RHS), L, true); 3747 if (BTI.hasAnyInfo()) return BTI; 3748 break; 3749 } 3750 case ICmpInst::ICMP_ULT: { 3751 BackedgeTakenInfo BTI = HowManyLessThans(LHS, RHS, L, false); 3752 if (BTI.hasAnyInfo()) return BTI; 3753 break; 3754 } 3755 case ICmpInst::ICMP_UGT: { 3756 BackedgeTakenInfo BTI = HowManyLessThans(getNotSCEV(LHS), 3757 getNotSCEV(RHS), L, false); 3758 if (BTI.hasAnyInfo()) return BTI; 3759 break; 3760 } 3761 default: 3762 #if 0 3763 dbgs() << "ComputeBackedgeTakenCount "; 3764 if (ExitCond->getOperand(0)->getType()->isUnsigned()) 3765 dbgs() << "[unsigned] "; 3766 dbgs() << *LHS << " " 3767 << Instruction::getOpcodeName(Instruction::ICmp) 3768 << " " << *RHS << "\n"; 3769 #endif 3770 break; 3771 } 3772 return 3773 ComputeBackedgeTakenCountExhaustively(L, ExitCond, !L->contains(TBB)); 3774 } 3775 3776 static ConstantInt * 3777 EvaluateConstantChrecAtConstant(const SCEVAddRecExpr *AddRec, ConstantInt *C, 3778 ScalarEvolution &SE) { 3779 const SCEV *InVal = SE.getConstant(C); 3780 const SCEV *Val = AddRec->evaluateAtIteration(InVal, SE); 3781 assert(isa<SCEVConstant>(Val) && 3782 "Evaluation of SCEV at constant didn't fold correctly?"); 3783 return cast<SCEVConstant>(Val)->getValue(); 3784 } 3785 3786 /// GetAddressedElementFromGlobal - Given a global variable with an initializer 3787 /// and a GEP expression (missing the pointer index) indexing into it, return 3788 /// the addressed element of the initializer or null if the index expression is 3789 /// invalid. 3790 static Constant * 3791 GetAddressedElementFromGlobal(GlobalVariable *GV, 3792 const std::vector<ConstantInt*> &Indices) { 3793 Constant *Init = GV->getInitializer(); 3794 for (unsigned i = 0, e = Indices.size(); i != e; ++i) { 3795 uint64_t Idx = Indices[i]->getZExtValue(); 3796 if (ConstantStruct *CS = dyn_cast<ConstantStruct>(Init)) { 3797 assert(Idx < CS->getNumOperands() && "Bad struct index!"); 3798 Init = cast<Constant>(CS->getOperand(Idx)); 3799 } else if (ConstantArray *CA = dyn_cast<ConstantArray>(Init)) { 3800 if (Idx >= CA->getNumOperands()) return 0; // Bogus program 3801 Init = cast<Constant>(CA->getOperand(Idx)); 3802 } else if (isa<ConstantAggregateZero>(Init)) { 3803 if (const StructType *STy = dyn_cast<StructType>(Init->getType())) { 3804 assert(Idx < STy->getNumElements() && "Bad struct index!"); 3805 Init = Constant::getNullValue(STy->getElementType(Idx)); 3806 } else if (const ArrayType *ATy = dyn_cast<ArrayType>(Init->getType())) { 3807 if (Idx >= ATy->getNumElements()) return 0; // Bogus program 3808 Init = Constant::getNullValue(ATy->getElementType()); 3809 } else { 3810 llvm_unreachable("Unknown constant aggregate type!"); 3811 } 3812 return 0; 3813 } else { 3814 return 0; // Unknown initializer type 3815 } 3816 } 3817 return Init; 3818 } 3819 3820 /// ComputeLoadConstantCompareBackedgeTakenCount - Given an exit condition of 3821 /// 'icmp op load X, cst', try to see if we can compute the backedge 3822 /// execution count. 3823 const SCEV * 3824 ScalarEvolution::ComputeLoadConstantCompareBackedgeTakenCount( 3825 LoadInst *LI, 3826 Constant *RHS, 3827 const Loop *L, 3828 ICmpInst::Predicate predicate) { 3829 if (LI->isVolatile()) return getCouldNotCompute(); 3830 3831 // Check to see if the loaded pointer is a getelementptr of a global. 3832 GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(LI->getOperand(0)); 3833 if (!GEP) return getCouldNotCompute(); 3834 3835 // Make sure that it is really a constant global we are gepping, with an 3836 // initializer, and make sure the first IDX is really 0. 3837 GlobalVariable *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0)); 3838 if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer() || 3839 GEP->getNumOperands() < 3 || !isa<Constant>(GEP->getOperand(1)) || 3840 !cast<Constant>(GEP->getOperand(1))->isNullValue()) 3841 return getCouldNotCompute(); 3842 3843 // Okay, we allow one non-constant index into the GEP instruction. 3844 Value *VarIdx = 0; 3845 std::vector<ConstantInt*> Indexes; 3846 unsigned VarIdxNum = 0; 3847 for (unsigned i = 2, e = GEP->getNumOperands(); i != e; ++i) 3848 if (ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(i))) { 3849 Indexes.push_back(CI); 3850 } else if (!isa<ConstantInt>(GEP->getOperand(i))) { 3851 if (VarIdx) return getCouldNotCompute(); // Multiple non-constant idx's. 3852 VarIdx = GEP->getOperand(i); 3853 VarIdxNum = i-2; 3854 Indexes.push_back(0); 3855 } 3856 3857 // Okay, we know we have a (load (gep GV, 0, X)) comparison with a constant. 3858 // Check to see if X is a loop variant variable value now. 3859 const SCEV *Idx = getSCEV(VarIdx); 3860 Idx = getSCEVAtScope(Idx, L); 3861 3862 // We can only recognize very limited forms of loop index expressions, in 3863 // particular, only affine AddRec's like {C1,+,C2}. 3864 const SCEVAddRecExpr *IdxExpr = dyn_cast<SCEVAddRecExpr>(Idx); 3865 if (!IdxExpr || !IdxExpr->isAffine() || IdxExpr->isLoopInvariant(L) || 3866 !isa<SCEVConstant>(IdxExpr->getOperand(0)) || 3867 !isa<SCEVConstant>(IdxExpr->getOperand(1))) 3868 return getCouldNotCompute(); 3869 3870 unsigned MaxSteps = MaxBruteForceIterations; 3871 for (unsigned IterationNum = 0; IterationNum != MaxSteps; ++IterationNum) { 3872 ConstantInt *ItCst = ConstantInt::get( 3873 cast<IntegerType>(IdxExpr->getType()), IterationNum); 3874 ConstantInt *Val = EvaluateConstantChrecAtConstant(IdxExpr, ItCst, *this); 3875 3876 // Form the GEP offset. 3877 Indexes[VarIdxNum] = Val; 3878 3879 Constant *Result = GetAddressedElementFromGlobal(GV, Indexes); 3880 if (Result == 0) break; // Cannot compute! 3881 3882 // Evaluate the condition for this iteration. 3883 Result = ConstantExpr::getICmp(predicate, Result, RHS); 3884 if (!isa<ConstantInt>(Result)) break; // Couldn't decide for sure 3885 if (cast<ConstantInt>(Result)->getValue().isMinValue()) { 3886 #if 0 3887 dbgs() << "\n***\n*** Computed loop count " << *ItCst 3888 << "\n*** From global " << *GV << "*** BB: " << *L->getHeader() 3889 << "***\n"; 3890 #endif 3891 ++NumArrayLenItCounts; 3892 return getConstant(ItCst); // Found terminating iteration! 3893 } 3894 } 3895 return getCouldNotCompute(); 3896 } 3897 3898 3899 /// CanConstantFold - Return true if we can constant fold an instruction of the 3900 /// specified type, assuming that all operands were constants. 3901 static bool CanConstantFold(const Instruction *I) { 3902 if (isa<BinaryOperator>(I) || isa<CmpInst>(I) || 3903 isa<SelectInst>(I) || isa<CastInst>(I) || isa<GetElementPtrInst>(I)) 3904 return true; 3905 3906 if (const CallInst *CI = dyn_cast<CallInst>(I)) 3907 if (const Function *F = CI->getCalledFunction()) 3908 return canConstantFoldCallTo(F); 3909 return false; 3910 } 3911 3912 /// getConstantEvolvingPHI - Given an LLVM value and a loop, return a PHI node 3913 /// in the loop that V is derived from. We allow arbitrary operations along the 3914 /// way, but the operands of an operation must either be constants or a value 3915 /// derived from a constant PHI. If this expression does not fit with these 3916 /// constraints, return null. 3917 static PHINode *getConstantEvolvingPHI(Value *V, const Loop *L) { 3918 // If this is not an instruction, or if this is an instruction outside of the 3919 // loop, it can't be derived from a loop PHI. 3920 Instruction *I = dyn_cast<Instruction>(V); 3921 if (I == 0 || !L->contains(I)) return 0; 3922 3923 if (PHINode *PN = dyn_cast<PHINode>(I)) { 3924 if (L->getHeader() == I->getParent()) 3925 return PN; 3926 else 3927 // We don't currently keep track of the control flow needed to evaluate 3928 // PHIs, so we cannot handle PHIs inside of loops. 3929 return 0; 3930 } 3931 3932 // If we won't be able to constant fold this expression even if the operands 3933 // are constants, return early. 3934 if (!CanConstantFold(I)) return 0; 3935 3936 // Otherwise, we can evaluate this instruction if all of its operands are 3937 // constant or derived from a PHI node themselves. 3938 PHINode *PHI = 0; 3939 for (unsigned Op = 0, e = I->getNumOperands(); Op != e; ++Op) 3940 if (!(isa<Constant>(I->getOperand(Op)) || 3941 isa<GlobalValue>(I->getOperand(Op)))) { 3942 PHINode *P = getConstantEvolvingPHI(I->getOperand(Op), L); 3943 if (P == 0) return 0; // Not evolving from PHI 3944 if (PHI == 0) 3945 PHI = P; 3946 else if (PHI != P) 3947 return 0; // Evolving from multiple different PHIs. 3948 } 3949 3950 // This is a expression evolving from a constant PHI! 3951 return PHI; 3952 } 3953 3954 /// EvaluateExpression - Given an expression that passes the 3955 /// getConstantEvolvingPHI predicate, evaluate its value assuming the PHI node 3956 /// in the loop has the value PHIVal. If we can't fold this expression for some 3957 /// reason, return null. 3958 static Constant *EvaluateExpression(Value *V, Constant *PHIVal, 3959 const TargetData *TD) { 3960 if (isa<PHINode>(V)) return PHIVal; 3961 if (Constant *C = dyn_cast<Constant>(V)) return C; 3962 if (GlobalValue *GV = dyn_cast<GlobalValue>(V)) return GV; 3963 Instruction *I = cast<Instruction>(V); 3964 3965 std::vector<Constant*> Operands; 3966 Operands.resize(I->getNumOperands()); 3967 3968 for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) { 3969 Operands[i] = EvaluateExpression(I->getOperand(i), PHIVal, TD); 3970 if (Operands[i] == 0) return 0; 3971 } 3972 3973 if (const CmpInst *CI = dyn_cast<CmpInst>(I)) 3974 return ConstantFoldCompareInstOperands(CI->getPredicate(), Operands[0], 3975 Operands[1], TD); 3976 return ConstantFoldInstOperands(I->getOpcode(), I->getType(), 3977 &Operands[0], Operands.size(), TD); 3978 } 3979 3980 /// getConstantEvolutionLoopExitValue - If we know that the specified Phi is 3981 /// in the header of its containing loop, we know the loop executes a 3982 /// constant number of times, and the PHI node is just a recurrence 3983 /// involving constants, fold it. 3984 Constant * 3985 ScalarEvolution::getConstantEvolutionLoopExitValue(PHINode *PN, 3986 const APInt &BEs, 3987 const Loop *L) { 3988 std::map<PHINode*, Constant*>::iterator I = 3989 ConstantEvolutionLoopExitValue.find(PN); 3990 if (I != ConstantEvolutionLoopExitValue.end()) 3991 return I->second; 3992 3993 if (BEs.ugt(APInt(BEs.getBitWidth(),MaxBruteForceIterations))) 3994 return ConstantEvolutionLoopExitValue[PN] = 0; // Not going to evaluate it. 3995 3996 Constant *&RetVal = ConstantEvolutionLoopExitValue[PN]; 3997 3998 // Since the loop is canonicalized, the PHI node must have two entries. One 3999 // entry must be a constant (coming in from outside of the loop), and the 4000 // second must be derived from the same PHI. 4001 bool SecondIsBackedge = L->contains(PN->getIncomingBlock(1)); 4002 Constant *StartCST = 4003 dyn_cast<Constant>(PN->getIncomingValue(!SecondIsBackedge)); 4004 if (StartCST == 0) 4005 return RetVal = 0; // Must be a constant. 4006 4007 Value *BEValue = PN->getIncomingValue(SecondIsBackedge); 4008 PHINode *PN2 = getConstantEvolvingPHI(BEValue, L); 4009 if (PN2 != PN) 4010 return RetVal = 0; // Not derived from same PHI. 4011 4012 // Execute the loop symbolically to determine the exit value. 4013 if (BEs.getActiveBits() >= 32) 4014 return RetVal = 0; // More than 2^32-1 iterations?? Not doing it! 4015 4016 unsigned NumIterations = BEs.getZExtValue(); // must be in range 4017 unsigned IterationNum = 0; 4018 for (Constant *PHIVal = StartCST; ; ++IterationNum) { 4019 if (IterationNum == NumIterations) 4020 return RetVal = PHIVal; // Got exit value! 4021 4022 // Compute the value of the PHI node for the next iteration. 4023 Constant *NextPHI = EvaluateExpression(BEValue, PHIVal, TD); 4024 if (NextPHI == PHIVal) 4025 return RetVal = NextPHI; // Stopped evolving! 4026 if (NextPHI == 0) 4027 return 0; // Couldn't evaluate! 4028 PHIVal = NextPHI; 4029 } 4030 } 4031 4032 /// ComputeBackedgeTakenCountExhaustively - If the loop is known to execute a 4033 /// constant number of times (the condition evolves only from constants), 4034 /// try to evaluate a few iterations of the loop until we get the exit 4035 /// condition gets a value of ExitWhen (true or false). If we cannot 4036 /// evaluate the trip count of the loop, return getCouldNotCompute(). 4037 const SCEV * 4038 ScalarEvolution::ComputeBackedgeTakenCountExhaustively(const Loop *L, 4039 Value *Cond, 4040 bool ExitWhen) { 4041 PHINode *PN = getConstantEvolvingPHI(Cond, L); 4042 if (PN == 0) return getCouldNotCompute(); 4043 4044 // Since the loop is canonicalized, the PHI node must have two entries. One 4045 // entry must be a constant (coming in from outside of the loop), and the 4046 // second must be derived from the same PHI. 4047 bool SecondIsBackedge = L->contains(PN->getIncomingBlock(1)); 4048 Constant *StartCST = 4049 dyn_cast<Constant>(PN->getIncomingValue(!SecondIsBackedge)); 4050 if (StartCST == 0) return getCouldNotCompute(); // Must be a constant. 4051 4052 Value *BEValue = PN->getIncomingValue(SecondIsBackedge); 4053 PHINode *PN2 = getConstantEvolvingPHI(BEValue, L); 4054 if (PN2 != PN) return getCouldNotCompute(); // Not derived from same PHI. 4055 4056 // Okay, we find a PHI node that defines the trip count of this loop. Execute 4057 // the loop symbolically to determine when the condition gets a value of 4058 // "ExitWhen". 4059 unsigned IterationNum = 0; 4060 unsigned MaxIterations = MaxBruteForceIterations; // Limit analysis. 4061 for (Constant *PHIVal = StartCST; 4062 IterationNum != MaxIterations; ++IterationNum) { 4063 ConstantInt *CondVal = 4064 dyn_cast_or_null<ConstantInt>(EvaluateExpression(Cond, PHIVal, TD)); 4065 4066 // Couldn't symbolically evaluate. 4067 if (!CondVal) return getCouldNotCompute(); 4068 4069 if (CondVal->getValue() == uint64_t(ExitWhen)) { 4070 ++NumBruteForceTripCountsComputed; 4071 return getConstant(Type::getInt32Ty(getContext()), IterationNum); 4072 } 4073 4074 // Compute the value of the PHI node for the next iteration. 4075 Constant *NextPHI = EvaluateExpression(BEValue, PHIVal, TD); 4076 if (NextPHI == 0 || NextPHI == PHIVal) 4077 return getCouldNotCompute();// Couldn't evaluate or not making progress... 4078 PHIVal = NextPHI; 4079 } 4080 4081 // Too many iterations were needed to evaluate. 4082 return getCouldNotCompute(); 4083 } 4084 4085 /// getSCEVAtScope - Return a SCEV expression for the specified value 4086 /// at the specified scope in the program. The L value specifies a loop 4087 /// nest to evaluate the expression at, where null is the top-level or a 4088 /// specified loop is immediately inside of the loop. 4089 /// 4090 /// This method can be used to compute the exit value for a variable defined 4091 /// in a loop by querying what the value will hold in the parent loop. 4092 /// 4093 /// In the case that a relevant loop exit value cannot be computed, the 4094 /// original value V is returned. 4095 const SCEV *ScalarEvolution::getSCEVAtScope(const SCEV *V, const Loop *L) { 4096 // Check to see if we've folded this expression at this loop before. 4097 std::map<const Loop *, const SCEV *> &Values = ValuesAtScopes[V]; 4098 std::pair<std::map<const Loop *, const SCEV *>::iterator, bool> Pair = 4099 Values.insert(std::make_pair(L, static_cast<const SCEV *>(0))); 4100 if (!Pair.second) 4101 return Pair.first->second ? Pair.first->second : V; 4102 4103 // Otherwise compute it. 4104 const SCEV *C = computeSCEVAtScope(V, L); 4105 ValuesAtScopes[V][L] = C; 4106 return C; 4107 } 4108 4109 const SCEV *ScalarEvolution::computeSCEVAtScope(const SCEV *V, const Loop *L) { 4110 if (isa<SCEVConstant>(V)) return V; 4111 4112 // If this instruction is evolved from a constant-evolving PHI, compute the 4113 // exit value from the loop without using SCEVs. 4114 if (const SCEVUnknown *SU = dyn_cast<SCEVUnknown>(V)) { 4115 if (Instruction *I = dyn_cast<Instruction>(SU->getValue())) { 4116 const Loop *LI = (*this->LI)[I->getParent()]; 4117 if (LI && LI->getParentLoop() == L) // Looking for loop exit value. 4118 if (PHINode *PN = dyn_cast<PHINode>(I)) 4119 if (PN->getParent() == LI->getHeader()) { 4120 // Okay, there is no closed form solution for the PHI node. Check 4121 // to see if the loop that contains it has a known backedge-taken 4122 // count. If so, we may be able to force computation of the exit 4123 // value. 4124 const SCEV *BackedgeTakenCount = getBackedgeTakenCount(LI); 4125 if (const SCEVConstant *BTCC = 4126 dyn_cast<SCEVConstant>(BackedgeTakenCount)) { 4127 // Okay, we know how many times the containing loop executes. If 4128 // this is a constant evolving PHI node, get the final value at 4129 // the specified iteration number. 4130 Constant *RV = getConstantEvolutionLoopExitValue(PN, 4131 BTCC->getValue()->getValue(), 4132 LI); 4133 if (RV) return getSCEV(RV); 4134 } 4135 } 4136 4137 // Okay, this is an expression that we cannot symbolically evaluate 4138 // into a SCEV. Check to see if it's possible to symbolically evaluate 4139 // the arguments into constants, and if so, try to constant propagate the 4140 // result. This is particularly useful for computing loop exit values. 4141 if (CanConstantFold(I)) { 4142 std::vector<Constant*> Operands; 4143 Operands.reserve(I->getNumOperands()); 4144 for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) { 4145 Value *Op = I->getOperand(i); 4146 if (Constant *C = dyn_cast<Constant>(Op)) { 4147 Operands.push_back(C); 4148 } else { 4149 // If any of the operands is non-constant and if they are 4150 // non-integer and non-pointer, don't even try to analyze them 4151 // with scev techniques. 4152 if (!isSCEVable(Op->getType())) 4153 return V; 4154 4155 const SCEV *OpV = getSCEVAtScope(Op, L); 4156 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(OpV)) { 4157 Constant *C = SC->getValue(); 4158 if (C->getType() != Op->getType()) 4159 C = ConstantExpr::getCast(CastInst::getCastOpcode(C, false, 4160 Op->getType(), 4161 false), 4162 C, Op->getType()); 4163 Operands.push_back(C); 4164 } else if (const SCEVUnknown *SU = dyn_cast<SCEVUnknown>(OpV)) { 4165 if (Constant *C = dyn_cast<Constant>(SU->getValue())) { 4166 if (C->getType() != Op->getType()) 4167 C = 4168 ConstantExpr::getCast(CastInst::getCastOpcode(C, false, 4169 Op->getType(), 4170 false), 4171 C, Op->getType()); 4172 Operands.push_back(C); 4173 } else 4174 return V; 4175 } else { 4176 return V; 4177 } 4178 } 4179 } 4180 4181 Constant *C; 4182 if (const CmpInst *CI = dyn_cast<CmpInst>(I)) 4183 C = ConstantFoldCompareInstOperands(CI->getPredicate(), 4184 Operands[0], Operands[1], TD); 4185 else 4186 C = ConstantFoldInstOperands(I->getOpcode(), I->getType(), 4187 &Operands[0], Operands.size(), TD); 4188 return getSCEV(C); 4189 } 4190 } 4191 4192 // This is some other type of SCEVUnknown, just return it. 4193 return V; 4194 } 4195 4196 if (const SCEVCommutativeExpr *Comm = dyn_cast<SCEVCommutativeExpr>(V)) { 4197 // Avoid performing the look-up in the common case where the specified 4198 // expression has no loop-variant portions. 4199 for (unsigned i = 0, e = Comm->getNumOperands(); i != e; ++i) { 4200 const SCEV *OpAtScope = getSCEVAtScope(Comm->getOperand(i), L); 4201 if (OpAtScope != Comm->getOperand(i)) { 4202 // Okay, at least one of these operands is loop variant but might be 4203 // foldable. Build a new instance of the folded commutative expression. 4204 SmallVector<const SCEV *, 8> NewOps(Comm->op_begin(), 4205 Comm->op_begin()+i); 4206 NewOps.push_back(OpAtScope); 4207 4208 for (++i; i != e; ++i) { 4209 OpAtScope = getSCEVAtScope(Comm->getOperand(i), L); 4210 NewOps.push_back(OpAtScope); 4211 } 4212 if (isa<SCEVAddExpr>(Comm)) 4213 return getAddExpr(NewOps); 4214 if (isa<SCEVMulExpr>(Comm)) 4215 return getMulExpr(NewOps); 4216 if (isa<SCEVSMaxExpr>(Comm)) 4217 return getSMaxExpr(NewOps); 4218 if (isa<SCEVUMaxExpr>(Comm)) 4219 return getUMaxExpr(NewOps); 4220 llvm_unreachable("Unknown commutative SCEV type!"); 4221 } 4222 } 4223 // If we got here, all operands are loop invariant. 4224 return Comm; 4225 } 4226 4227 if (const SCEVUDivExpr *Div = dyn_cast<SCEVUDivExpr>(V)) { 4228 const SCEV *LHS = getSCEVAtScope(Div->getLHS(), L); 4229 const SCEV *RHS = getSCEVAtScope(Div->getRHS(), L); 4230 if (LHS == Div->getLHS() && RHS == Div->getRHS()) 4231 return Div; // must be loop invariant 4232 return getUDivExpr(LHS, RHS); 4233 } 4234 4235 // If this is a loop recurrence for a loop that does not contain L, then we 4236 // are dealing with the final value computed by the loop. 4237 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(V)) { 4238 if (!L || !AddRec->getLoop()->contains(L)) { 4239 // To evaluate this recurrence, we need to know how many times the AddRec 4240 // loop iterates. Compute this now. 4241 const SCEV *BackedgeTakenCount = getBackedgeTakenCount(AddRec->getLoop()); 4242 if (BackedgeTakenCount == getCouldNotCompute()) return AddRec; 4243 4244 // Then, evaluate the AddRec. 4245 return AddRec->evaluateAtIteration(BackedgeTakenCount, *this); 4246 } 4247 return AddRec; 4248 } 4249 4250 if (const SCEVZeroExtendExpr *Cast = dyn_cast<SCEVZeroExtendExpr>(V)) { 4251 const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L); 4252 if (Op == Cast->getOperand()) 4253 return Cast; // must be loop invariant 4254 return getZeroExtendExpr(Op, Cast->getType()); 4255 } 4256 4257 if (const SCEVSignExtendExpr *Cast = dyn_cast<SCEVSignExtendExpr>(V)) { 4258 const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L); 4259 if (Op == Cast->getOperand()) 4260 return Cast; // must be loop invariant 4261 return getSignExtendExpr(Op, Cast->getType()); 4262 } 4263 4264 if (const SCEVTruncateExpr *Cast = dyn_cast<SCEVTruncateExpr>(V)) { 4265 const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L); 4266 if (Op == Cast->getOperand()) 4267 return Cast; // must be loop invariant 4268 return getTruncateExpr(Op, Cast->getType()); 4269 } 4270 4271 llvm_unreachable("Unknown SCEV type!"); 4272 return 0; 4273 } 4274 4275 /// getSCEVAtScope - This is a convenience function which does 4276 /// getSCEVAtScope(getSCEV(V), L). 4277 const SCEV *ScalarEvolution::getSCEVAtScope(Value *V, const Loop *L) { 4278 return getSCEVAtScope(getSCEV(V), L); 4279 } 4280 4281 /// SolveLinEquationWithOverflow - Finds the minimum unsigned root of the 4282 /// following equation: 4283 /// 4284 /// A * X = B (mod N) 4285 /// 4286 /// where N = 2^BW and BW is the common bit width of A and B. The signedness of 4287 /// A and B isn't important. 4288 /// 4289 /// If the equation does not have a solution, SCEVCouldNotCompute is returned. 4290 static const SCEV *SolveLinEquationWithOverflow(const APInt &A, const APInt &B, 4291 ScalarEvolution &SE) { 4292 uint32_t BW = A.getBitWidth(); 4293 assert(BW == B.getBitWidth() && "Bit widths must be the same."); 4294 assert(A != 0 && "A must be non-zero."); 4295 4296 // 1. D = gcd(A, N) 4297 // 4298 // The gcd of A and N may have only one prime factor: 2. The number of 4299 // trailing zeros in A is its multiplicity 4300 uint32_t Mult2 = A.countTrailingZeros(); 4301 // D = 2^Mult2 4302 4303 // 2. Check if B is divisible by D. 4304 // 4305 // B is divisible by D if and only if the multiplicity of prime factor 2 for B 4306 // is not less than multiplicity of this prime factor for D. 4307 if (B.countTrailingZeros() < Mult2) 4308 return SE.getCouldNotCompute(); 4309 4310 // 3. Compute I: the multiplicative inverse of (A / D) in arithmetic 4311 // modulo (N / D). 4312 // 4313 // (N / D) may need BW+1 bits in its representation. Hence, we'll use this 4314 // bit width during computations. 4315 APInt AD = A.lshr(Mult2).zext(BW + 1); // AD = A / D 4316 APInt Mod(BW + 1, 0); 4317 Mod.set(BW - Mult2); // Mod = N / D 4318 APInt I = AD.multiplicativeInverse(Mod); 4319 4320 // 4. Compute the minimum unsigned root of the equation: 4321 // I * (B / D) mod (N / D) 4322 APInt Result = (I * B.lshr(Mult2).zext(BW + 1)).urem(Mod); 4323 4324 // The result is guaranteed to be less than 2^BW so we may truncate it to BW 4325 // bits. 4326 return SE.getConstant(Result.trunc(BW)); 4327 } 4328 4329 /// SolveQuadraticEquation - Find the roots of the quadratic equation for the 4330 /// given quadratic chrec {L,+,M,+,N}. This returns either the two roots (which 4331 /// might be the same) or two SCEVCouldNotCompute objects. 4332 /// 4333 static std::pair<const SCEV *,const SCEV *> 4334 SolveQuadraticEquation(const SCEVAddRecExpr *AddRec, ScalarEvolution &SE) { 4335 assert(AddRec->getNumOperands() == 3 && "This is not a quadratic chrec!"); 4336 const SCEVConstant *LC = dyn_cast<SCEVConstant>(AddRec->getOperand(0)); 4337 const SCEVConstant *MC = dyn_cast<SCEVConstant>(AddRec->getOperand(1)); 4338 const SCEVConstant *NC = dyn_cast<SCEVConstant>(AddRec->getOperand(2)); 4339 4340 // We currently can only solve this if the coefficients are constants. 4341 if (!LC || !MC || !NC) { 4342 const SCEV *CNC = SE.getCouldNotCompute(); 4343 return std::make_pair(CNC, CNC); 4344 } 4345 4346 uint32_t BitWidth = LC->getValue()->getValue().getBitWidth(); 4347 const APInt &L = LC->getValue()->getValue(); 4348 const APInt &M = MC->getValue()->getValue(); 4349 const APInt &N = NC->getValue()->getValue(); 4350 APInt Two(BitWidth, 2); 4351 APInt Four(BitWidth, 4); 4352 4353 { 4354 using namespace APIntOps; 4355 const APInt& C = L; 4356 // Convert from chrec coefficients to polynomial coefficients AX^2+BX+C 4357 // The B coefficient is M-N/2 4358 APInt B(M); 4359 B -= sdiv(N,Two); 4360 4361 // The A coefficient is N/2 4362 APInt A(N.sdiv(Two)); 4363 4364 // Compute the B^2-4ac term. 4365 APInt SqrtTerm(B); 4366 SqrtTerm *= B; 4367 SqrtTerm -= Four * (A * C); 4368 4369 // Compute sqrt(B^2-4ac). This is guaranteed to be the nearest 4370 // integer value or else APInt::sqrt() will assert. 4371 APInt SqrtVal(SqrtTerm.sqrt()); 4372 4373 // Compute the two solutions for the quadratic formula. 4374 // The divisions must be performed as signed divisions. 4375 APInt NegB(-B); 4376 APInt TwoA( A << 1 ); 4377 if (TwoA.isMinValue()) { 4378 const SCEV *CNC = SE.getCouldNotCompute(); 4379 return std::make_pair(CNC, CNC); 4380 } 4381 4382 LLVMContext &Context = SE.getContext(); 4383 4384 ConstantInt *Solution1 = 4385 ConstantInt::get(Context, (NegB + SqrtVal).sdiv(TwoA)); 4386 ConstantInt *Solution2 = 4387 ConstantInt::get(Context, (NegB - SqrtVal).sdiv(TwoA)); 4388 4389 return std::make_pair(SE.getConstant(Solution1), 4390 SE.getConstant(Solution2)); 4391 } // end APIntOps namespace 4392 } 4393 4394 /// HowFarToZero - Return the number of times a backedge comparing the specified 4395 /// value to zero will execute. If not computable, return CouldNotCompute. 4396 const SCEV *ScalarEvolution::HowFarToZero(const SCEV *V, const Loop *L) { 4397 // If the value is a constant 4398 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) { 4399 // If the value is already zero, the branch will execute zero times. 4400 if (C->getValue()->isZero()) return C; 4401 return getCouldNotCompute(); // Otherwise it will loop infinitely. 4402 } 4403 4404 const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(V); 4405 if (!AddRec || AddRec->getLoop() != L) 4406 return getCouldNotCompute(); 4407 4408 if (AddRec->isAffine()) { 4409 // If this is an affine expression, the execution count of this branch is 4410 // the minimum unsigned root of the following equation: 4411 // 4412 // Start + Step*N = 0 (mod 2^BW) 4413 // 4414 // equivalent to: 4415 // 4416 // Step*N = -Start (mod 2^BW) 4417 // 4418 // where BW is the common bit width of Start and Step. 4419 4420 // Get the initial value for the loop. 4421 const SCEV *Start = getSCEVAtScope(AddRec->getStart(), 4422 L->getParentLoop()); 4423 const SCEV *Step = getSCEVAtScope(AddRec->getOperand(1), 4424 L->getParentLoop()); 4425 4426 if (const SCEVConstant *StepC = dyn_cast<SCEVConstant>(Step)) { 4427 // For now we handle only constant steps. 4428 4429 // First, handle unitary steps. 4430 if (StepC->getValue()->equalsInt(1)) // 1*N = -Start (mod 2^BW), so: 4431 return getNegativeSCEV(Start); // N = -Start (as unsigned) 4432 if (StepC->getValue()->isAllOnesValue()) // -1*N = -Start (mod 2^BW), so: 4433 return Start; // N = Start (as unsigned) 4434 4435 // Then, try to solve the above equation provided that Start is constant. 4436 if (const SCEVConstant *StartC = dyn_cast<SCEVConstant>(Start)) 4437 return SolveLinEquationWithOverflow(StepC->getValue()->getValue(), 4438 -StartC->getValue()->getValue(), 4439 *this); 4440 } 4441 } else if (AddRec->isQuadratic() && AddRec->getType()->isInteger()) { 4442 // If this is a quadratic (3-term) AddRec {L,+,M,+,N}, find the roots of 4443 // the quadratic equation to solve it. 4444 std::pair<const SCEV *,const SCEV *> Roots = SolveQuadraticEquation(AddRec, 4445 *this); 4446 const SCEVConstant *R1 = dyn_cast<SCEVConstant>(Roots.first); 4447 const SCEVConstant *R2 = dyn_cast<SCEVConstant>(Roots.second); 4448 if (R1) { 4449 #if 0 4450 dbgs() << "HFTZ: " << *V << " - sol#1: " << *R1 4451 << " sol#2: " << *R2 << "\n"; 4452 #endif 4453 // Pick the smallest positive root value. 4454 if (ConstantInt *CB = 4455 dyn_cast<ConstantInt>(ConstantExpr::getICmp(ICmpInst::ICMP_ULT, 4456 R1->getValue(), R2->getValue()))) { 4457 if (CB->getZExtValue() == false) 4458 std::swap(R1, R2); // R1 is the minimum root now. 4459 4460 // We can only use this value if the chrec ends up with an exact zero 4461 // value at this index. When solving for "X*X != 5", for example, we 4462 // should not accept a root of 2. 4463 const SCEV *Val = AddRec->evaluateAtIteration(R1, *this); 4464 if (Val->isZero()) 4465 return R1; // We found a quadratic root! 4466 } 4467 } 4468 } 4469 4470 return getCouldNotCompute(); 4471 } 4472 4473 /// HowFarToNonZero - Return the number of times a backedge checking the 4474 /// specified value for nonzero will execute. If not computable, return 4475 /// CouldNotCompute 4476 const SCEV *ScalarEvolution::HowFarToNonZero(const SCEV *V, const Loop *L) { 4477 // Loops that look like: while (X == 0) are very strange indeed. We don't 4478 // handle them yet except for the trivial case. This could be expanded in the 4479 // future as needed. 4480 4481 // If the value is a constant, check to see if it is known to be non-zero 4482 // already. If so, the backedge will execute zero times. 4483 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) { 4484 if (!C->getValue()->isNullValue()) 4485 return getIntegerSCEV(0, C->getType()); 4486 return getCouldNotCompute(); // Otherwise it will loop infinitely. 4487 } 4488 4489 // We could implement others, but I really doubt anyone writes loops like 4490 // this, and if they did, they would already be constant folded. 4491 return getCouldNotCompute(); 4492 } 4493 4494 /// getLoopPredecessor - If the given loop's header has exactly one unique 4495 /// predecessor outside the loop, return it. Otherwise return null. 4496 /// 4497 BasicBlock *ScalarEvolution::getLoopPredecessor(const Loop *L) { 4498 BasicBlock *Header = L->getHeader(); 4499 BasicBlock *Pred = 0; 4500 for (pred_iterator PI = pred_begin(Header), E = pred_end(Header); 4501 PI != E; ++PI) 4502 if (!L->contains(*PI)) { 4503 if (Pred && Pred != *PI) return 0; // Multiple predecessors. 4504 Pred = *PI; 4505 } 4506 return Pred; 4507 } 4508 4509 /// getPredecessorWithUniqueSuccessorForBB - Return a predecessor of BB 4510 /// (which may not be an immediate predecessor) which has exactly one 4511 /// successor from which BB is reachable, or null if no such block is 4512 /// found. 4513 /// 4514 BasicBlock * 4515 ScalarEvolution::getPredecessorWithUniqueSuccessorForBB(BasicBlock *BB) { 4516 // If the block has a unique predecessor, then there is no path from the 4517 // predecessor to the block that does not go through the direct edge 4518 // from the predecessor to the block. 4519 if (BasicBlock *Pred = BB->getSinglePredecessor()) 4520 return Pred; 4521 4522 // A loop's header is defined to be a block that dominates the loop. 4523 // If the header has a unique predecessor outside the loop, it must be 4524 // a block that has exactly one successor that can reach the loop. 4525 if (Loop *L = LI->getLoopFor(BB)) 4526 return getLoopPredecessor(L); 4527 4528 return 0; 4529 } 4530 4531 /// HasSameValue - SCEV structural equivalence is usually sufficient for 4532 /// testing whether two expressions are equal, however for the purposes of 4533 /// looking for a condition guarding a loop, it can be useful to be a little 4534 /// more general, since a front-end may have replicated the controlling 4535 /// expression. 4536 /// 4537 static bool HasSameValue(const SCEV *A, const SCEV *B) { 4538 // Quick check to see if they are the same SCEV. 4539 if (A == B) return true; 4540 4541 // Otherwise, if they're both SCEVUnknown, it's possible that they hold 4542 // two different instructions with the same value. Check for this case. 4543 if (const SCEVUnknown *AU = dyn_cast<SCEVUnknown>(A)) 4544 if (const SCEVUnknown *BU = dyn_cast<SCEVUnknown>(B)) 4545 if (const Instruction *AI = dyn_cast<Instruction>(AU->getValue())) 4546 if (const Instruction *BI = dyn_cast<Instruction>(BU->getValue())) 4547 if (AI->isIdenticalTo(BI) && !AI->mayReadFromMemory()) 4548 return true; 4549 4550 // Otherwise assume they may have a different value. 4551 return false; 4552 } 4553 4554 bool ScalarEvolution::isKnownNegative(const SCEV *S) { 4555 return getSignedRange(S).getSignedMax().isNegative(); 4556 } 4557 4558 bool ScalarEvolution::isKnownPositive(const SCEV *S) { 4559 return getSignedRange(S).getSignedMin().isStrictlyPositive(); 4560 } 4561 4562 bool ScalarEvolution::isKnownNonNegative(const SCEV *S) { 4563 return !getSignedRange(S).getSignedMin().isNegative(); 4564 } 4565 4566 bool ScalarEvolution::isKnownNonPositive(const SCEV *S) { 4567 return !getSignedRange(S).getSignedMax().isStrictlyPositive(); 4568 } 4569 4570 bool ScalarEvolution::isKnownNonZero(const SCEV *S) { 4571 return isKnownNegative(S) || isKnownPositive(S); 4572 } 4573 4574 bool ScalarEvolution::isKnownPredicate(ICmpInst::Predicate Pred, 4575 const SCEV *LHS, const SCEV *RHS) { 4576 4577 if (HasSameValue(LHS, RHS)) 4578 return ICmpInst::isTrueWhenEqual(Pred); 4579 4580 switch (Pred) { 4581 default: 4582 llvm_unreachable("Unexpected ICmpInst::Predicate value!"); 4583 break; 4584 case ICmpInst::ICMP_SGT: 4585 Pred = ICmpInst::ICMP_SLT; 4586 std::swap(LHS, RHS); 4587 case ICmpInst::ICMP_SLT: { 4588 ConstantRange LHSRange = getSignedRange(LHS); 4589 ConstantRange RHSRange = getSignedRange(RHS); 4590 if (LHSRange.getSignedMax().slt(RHSRange.getSignedMin())) 4591 return true; 4592 if (LHSRange.getSignedMin().sge(RHSRange.getSignedMax())) 4593 return false; 4594 break; 4595 } 4596 case ICmpInst::ICMP_SGE: 4597 Pred = ICmpInst::ICMP_SLE; 4598 std::swap(LHS, RHS); 4599 case ICmpInst::ICMP_SLE: { 4600 ConstantRange LHSRange = getSignedRange(LHS); 4601 ConstantRange RHSRange = getSignedRange(RHS); 4602 if (LHSRange.getSignedMax().sle(RHSRange.getSignedMin())) 4603 return true; 4604 if (LHSRange.getSignedMin().sgt(RHSRange.getSignedMax())) 4605 return false; 4606 break; 4607 } 4608 case ICmpInst::ICMP_UGT: 4609 Pred = ICmpInst::ICMP_ULT; 4610 std::swap(LHS, RHS); 4611 case ICmpInst::ICMP_ULT: { 4612 ConstantRange LHSRange = getUnsignedRange(LHS); 4613 ConstantRange RHSRange = getUnsignedRange(RHS); 4614 if (LHSRange.getUnsignedMax().ult(RHSRange.getUnsignedMin())) 4615 return true; 4616 if (LHSRange.getUnsignedMin().uge(RHSRange.getUnsignedMax())) 4617 return false; 4618 break; 4619 } 4620 case ICmpInst::ICMP_UGE: 4621 Pred = ICmpInst::ICMP_ULE; 4622 std::swap(LHS, RHS); 4623 case ICmpInst::ICMP_ULE: { 4624 ConstantRange LHSRange = getUnsignedRange(LHS); 4625 ConstantRange RHSRange = getUnsignedRange(RHS); 4626 if (LHSRange.getUnsignedMax().ule(RHSRange.getUnsignedMin())) 4627 return true; 4628 if (LHSRange.getUnsignedMin().ugt(RHSRange.getUnsignedMax())) 4629 return false; 4630 break; 4631 } 4632 case ICmpInst::ICMP_NE: { 4633 if (getUnsignedRange(LHS).intersectWith(getUnsignedRange(RHS)).isEmptySet()) 4634 return true; 4635 if (getSignedRange(LHS).intersectWith(getSignedRange(RHS)).isEmptySet()) 4636 return true; 4637 4638 const SCEV *Diff = getMinusSCEV(LHS, RHS); 4639 if (isKnownNonZero(Diff)) 4640 return true; 4641 break; 4642 } 4643 case ICmpInst::ICMP_EQ: 4644 // The check at the top of the function catches the case where 4645 // the values are known to be equal. 4646 break; 4647 } 4648 return false; 4649 } 4650 4651 /// isLoopBackedgeGuardedByCond - Test whether the backedge of the loop is 4652 /// protected by a conditional between LHS and RHS. This is used to 4653 /// to eliminate casts. 4654 bool 4655 ScalarEvolution::isLoopBackedgeGuardedByCond(const Loop *L, 4656 ICmpInst::Predicate Pred, 4657 const SCEV *LHS, const SCEV *RHS) { 4658 // Interpret a null as meaning no loop, where there is obviously no guard 4659 // (interprocedural conditions notwithstanding). 4660 if (!L) return true; 4661 4662 BasicBlock *Latch = L->getLoopLatch(); 4663 if (!Latch) 4664 return false; 4665 4666 BranchInst *LoopContinuePredicate = 4667 dyn_cast<BranchInst>(Latch->getTerminator()); 4668 if (!LoopContinuePredicate || 4669 LoopContinuePredicate->isUnconditional()) 4670 return false; 4671 4672 return isImpliedCond(LoopContinuePredicate->getCondition(), Pred, LHS, RHS, 4673 LoopContinuePredicate->getSuccessor(0) != L->getHeader()); 4674 } 4675 4676 /// isLoopGuardedByCond - Test whether entry to the loop is protected 4677 /// by a conditional between LHS and RHS. This is used to help avoid max 4678 /// expressions in loop trip counts, and to eliminate casts. 4679 bool 4680 ScalarEvolution::isLoopGuardedByCond(const Loop *L, 4681 ICmpInst::Predicate Pred, 4682 const SCEV *LHS, const SCEV *RHS) { 4683 // Interpret a null as meaning no loop, where there is obviously no guard 4684 // (interprocedural conditions notwithstanding). 4685 if (!L) return false; 4686 4687 BasicBlock *Predecessor = getLoopPredecessor(L); 4688 BasicBlock *PredecessorDest = L->getHeader(); 4689 4690 // Starting at the loop predecessor, climb up the predecessor chain, as long 4691 // as there are predecessors that can be found that have unique successors 4692 // leading to the original header. 4693 for (; Predecessor; 4694 PredecessorDest = Predecessor, 4695 Predecessor = getPredecessorWithUniqueSuccessorForBB(Predecessor)) { 4696 4697 BranchInst *LoopEntryPredicate = 4698 dyn_cast<BranchInst>(Predecessor->getTerminator()); 4699 if (!LoopEntryPredicate || 4700 LoopEntryPredicate->isUnconditional()) 4701 continue; 4702 4703 if (isImpliedCond(LoopEntryPredicate->getCondition(), Pred, LHS, RHS, 4704 LoopEntryPredicate->getSuccessor(0) != PredecessorDest)) 4705 return true; 4706 } 4707 4708 return false; 4709 } 4710 4711 /// isImpliedCond - Test whether the condition described by Pred, LHS, 4712 /// and RHS is true whenever the given Cond value evaluates to true. 4713 bool ScalarEvolution::isImpliedCond(Value *CondValue, 4714 ICmpInst::Predicate Pred, 4715 const SCEV *LHS, const SCEV *RHS, 4716 bool Inverse) { 4717 // Recursivly handle And and Or conditions. 4718 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(CondValue)) { 4719 if (BO->getOpcode() == Instruction::And) { 4720 if (!Inverse) 4721 return isImpliedCond(BO->getOperand(0), Pred, LHS, RHS, Inverse) || 4722 isImpliedCond(BO->getOperand(1), Pred, LHS, RHS, Inverse); 4723 } else if (BO->getOpcode() == Instruction::Or) { 4724 if (Inverse) 4725 return isImpliedCond(BO->getOperand(0), Pred, LHS, RHS, Inverse) || 4726 isImpliedCond(BO->getOperand(1), Pred, LHS, RHS, Inverse); 4727 } 4728 } 4729 4730 ICmpInst *ICI = dyn_cast<ICmpInst>(CondValue); 4731 if (!ICI) return false; 4732 4733 // Bail if the ICmp's operands' types are wider than the needed type 4734 // before attempting to call getSCEV on them. This avoids infinite 4735 // recursion, since the analysis of widening casts can require loop 4736 // exit condition information for overflow checking, which would 4737 // lead back here. 4738 if (getTypeSizeInBits(LHS->getType()) < 4739 getTypeSizeInBits(ICI->getOperand(0)->getType())) 4740 return false; 4741 4742 // Now that we found a conditional branch that dominates the loop, check to 4743 // see if it is the comparison we are looking for. 4744 ICmpInst::Predicate FoundPred; 4745 if (Inverse) 4746 FoundPred = ICI->getInversePredicate(); 4747 else 4748 FoundPred = ICI->getPredicate(); 4749 4750 const SCEV *FoundLHS = getSCEV(ICI->getOperand(0)); 4751 const SCEV *FoundRHS = getSCEV(ICI->getOperand(1)); 4752 4753 // Balance the types. The case where FoundLHS' type is wider than 4754 // LHS' type is checked for above. 4755 if (getTypeSizeInBits(LHS->getType()) > 4756 getTypeSizeInBits(FoundLHS->getType())) { 4757 if (CmpInst::isSigned(Pred)) { 4758 FoundLHS = getSignExtendExpr(FoundLHS, LHS->getType()); 4759 FoundRHS = getSignExtendExpr(FoundRHS, LHS->getType()); 4760 } else { 4761 FoundLHS = getZeroExtendExpr(FoundLHS, LHS->getType()); 4762 FoundRHS = getZeroExtendExpr(FoundRHS, LHS->getType()); 4763 } 4764 } 4765 4766 // Canonicalize the query to match the way instcombine will have 4767 // canonicalized the comparison. 4768 // First, put a constant operand on the right. 4769 if (isa<SCEVConstant>(LHS)) { 4770 std::swap(LHS, RHS); 4771 Pred = ICmpInst::getSwappedPredicate(Pred); 4772 } 4773 // Then, canonicalize comparisons with boundary cases. 4774 if (const SCEVConstant *RC = dyn_cast<SCEVConstant>(RHS)) { 4775 const APInt &RA = RC->getValue()->getValue(); 4776 switch (Pred) { 4777 default: llvm_unreachable("Unexpected ICmpInst::Predicate value!"); 4778 case ICmpInst::ICMP_EQ: 4779 case ICmpInst::ICMP_NE: 4780 break; 4781 case ICmpInst::ICMP_UGE: 4782 if ((RA - 1).isMinValue()) { 4783 Pred = ICmpInst::ICMP_NE; 4784 RHS = getConstant(RA - 1); 4785 break; 4786 } 4787 if (RA.isMaxValue()) { 4788 Pred = ICmpInst::ICMP_EQ; 4789 break; 4790 } 4791 if (RA.isMinValue()) return true; 4792 break; 4793 case ICmpInst::ICMP_ULE: 4794 if ((RA + 1).isMaxValue()) { 4795 Pred = ICmpInst::ICMP_NE; 4796 RHS = getConstant(RA + 1); 4797 break; 4798 } 4799 if (RA.isMinValue()) { 4800 Pred = ICmpInst::ICMP_EQ; 4801 break; 4802 } 4803 if (RA.isMaxValue()) return true; 4804 break; 4805 case ICmpInst::ICMP_SGE: 4806 if ((RA - 1).isMinSignedValue()) { 4807 Pred = ICmpInst::ICMP_NE; 4808 RHS = getConstant(RA - 1); 4809 break; 4810 } 4811 if (RA.isMaxSignedValue()) { 4812 Pred = ICmpInst::ICMP_EQ; 4813 break; 4814 } 4815 if (RA.isMinSignedValue()) return true; 4816 break; 4817 case ICmpInst::ICMP_SLE: 4818 if ((RA + 1).isMaxSignedValue()) { 4819 Pred = ICmpInst::ICMP_NE; 4820 RHS = getConstant(RA + 1); 4821 break; 4822 } 4823 if (RA.isMinSignedValue()) { 4824 Pred = ICmpInst::ICMP_EQ; 4825 break; 4826 } 4827 if (RA.isMaxSignedValue()) return true; 4828 break; 4829 case ICmpInst::ICMP_UGT: 4830 if (RA.isMinValue()) { 4831 Pred = ICmpInst::ICMP_NE; 4832 break; 4833 } 4834 if ((RA + 1).isMaxValue()) { 4835 Pred = ICmpInst::ICMP_EQ; 4836 RHS = getConstant(RA + 1); 4837 break; 4838 } 4839 if (RA.isMaxValue()) return false; 4840 break; 4841 case ICmpInst::ICMP_ULT: 4842 if (RA.isMaxValue()) { 4843 Pred = ICmpInst::ICMP_NE; 4844 break; 4845 } 4846 if ((RA - 1).isMinValue()) { 4847 Pred = ICmpInst::ICMP_EQ; 4848 RHS = getConstant(RA - 1); 4849 break; 4850 } 4851 if (RA.isMinValue()) return false; 4852 break; 4853 case ICmpInst::ICMP_SGT: 4854 if (RA.isMinSignedValue()) { 4855 Pred = ICmpInst::ICMP_NE; 4856 break; 4857 } 4858 if ((RA + 1).isMaxSignedValue()) { 4859 Pred = ICmpInst::ICMP_EQ; 4860 RHS = getConstant(RA + 1); 4861 break; 4862 } 4863 if (RA.isMaxSignedValue()) return false; 4864 break; 4865 case ICmpInst::ICMP_SLT: 4866 if (RA.isMaxSignedValue()) { 4867 Pred = ICmpInst::ICMP_NE; 4868 break; 4869 } 4870 if ((RA - 1).isMinSignedValue()) { 4871 Pred = ICmpInst::ICMP_EQ; 4872 RHS = getConstant(RA - 1); 4873 break; 4874 } 4875 if (RA.isMinSignedValue()) return false; 4876 break; 4877 } 4878 } 4879 4880 // Check to see if we can make the LHS or RHS match. 4881 if (LHS == FoundRHS || RHS == FoundLHS) { 4882 if (isa<SCEVConstant>(RHS)) { 4883 std::swap(FoundLHS, FoundRHS); 4884 FoundPred = ICmpInst::getSwappedPredicate(FoundPred); 4885 } else { 4886 std::swap(LHS, RHS); 4887 Pred = ICmpInst::getSwappedPredicate(Pred); 4888 } 4889 } 4890 4891 // Check whether the found predicate is the same as the desired predicate. 4892 if (FoundPred == Pred) 4893 return isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS); 4894 4895 // Check whether swapping the found predicate makes it the same as the 4896 // desired predicate. 4897 if (ICmpInst::getSwappedPredicate(FoundPred) == Pred) { 4898 if (isa<SCEVConstant>(RHS)) 4899 return isImpliedCondOperands(Pred, LHS, RHS, FoundRHS, FoundLHS); 4900 else 4901 return isImpliedCondOperands(ICmpInst::getSwappedPredicate(Pred), 4902 RHS, LHS, FoundLHS, FoundRHS); 4903 } 4904 4905 // Check whether the actual condition is beyond sufficient. 4906 if (FoundPred == ICmpInst::ICMP_EQ) 4907 if (ICmpInst::isTrueWhenEqual(Pred)) 4908 if (isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS)) 4909 return true; 4910 if (Pred == ICmpInst::ICMP_NE) 4911 if (!ICmpInst::isTrueWhenEqual(FoundPred)) 4912 if (isImpliedCondOperands(FoundPred, LHS, RHS, FoundLHS, FoundRHS)) 4913 return true; 4914 4915 // Otherwise assume the worst. 4916 return false; 4917 } 4918 4919 /// isImpliedCondOperands - Test whether the condition described by Pred, 4920 /// LHS, and RHS is true whenever the condition desribed by Pred, FoundLHS, 4921 /// and FoundRHS is true. 4922 bool ScalarEvolution::isImpliedCondOperands(ICmpInst::Predicate Pred, 4923 const SCEV *LHS, const SCEV *RHS, 4924 const SCEV *FoundLHS, 4925 const SCEV *FoundRHS) { 4926 return isImpliedCondOperandsHelper(Pred, LHS, RHS, 4927 FoundLHS, FoundRHS) || 4928 // ~x < ~y --> x > y 4929 isImpliedCondOperandsHelper(Pred, LHS, RHS, 4930 getNotSCEV(FoundRHS), 4931 getNotSCEV(FoundLHS)); 4932 } 4933 4934 /// isImpliedCondOperandsHelper - Test whether the condition described by 4935 /// Pred, LHS, and RHS is true whenever the condition desribed by Pred, 4936 /// FoundLHS, and FoundRHS is true. 4937 bool 4938 ScalarEvolution::isImpliedCondOperandsHelper(ICmpInst::Predicate Pred, 4939 const SCEV *LHS, const SCEV *RHS, 4940 const SCEV *FoundLHS, 4941 const SCEV *FoundRHS) { 4942 switch (Pred) { 4943 default: llvm_unreachable("Unexpected ICmpInst::Predicate value!"); 4944 case ICmpInst::ICMP_EQ: 4945 case ICmpInst::ICMP_NE: 4946 if (HasSameValue(LHS, FoundLHS) && HasSameValue(RHS, FoundRHS)) 4947 return true; 4948 break; 4949 case ICmpInst::ICMP_SLT: 4950 case ICmpInst::ICMP_SLE: 4951 if (isKnownPredicate(ICmpInst::ICMP_SLE, LHS, FoundLHS) && 4952 isKnownPredicate(ICmpInst::ICMP_SGE, RHS, FoundRHS)) 4953 return true; 4954 break; 4955 case ICmpInst::ICMP_SGT: 4956 case ICmpInst::ICMP_SGE: 4957 if (isKnownPredicate(ICmpInst::ICMP_SGE, LHS, FoundLHS) && 4958 isKnownPredicate(ICmpInst::ICMP_SLE, RHS, FoundRHS)) 4959 return true; 4960 break; 4961 case ICmpInst::ICMP_ULT: 4962 case ICmpInst::ICMP_ULE: 4963 if (isKnownPredicate(ICmpInst::ICMP_ULE, LHS, FoundLHS) && 4964 isKnownPredicate(ICmpInst::ICMP_UGE, RHS, FoundRHS)) 4965 return true; 4966 break; 4967 case ICmpInst::ICMP_UGT: 4968 case ICmpInst::ICMP_UGE: 4969 if (isKnownPredicate(ICmpInst::ICMP_UGE, LHS, FoundLHS) && 4970 isKnownPredicate(ICmpInst::ICMP_ULE, RHS, FoundRHS)) 4971 return true; 4972 break; 4973 } 4974 4975 return false; 4976 } 4977 4978 /// getBECount - Subtract the end and start values and divide by the step, 4979 /// rounding up, to get the number of times the backedge is executed. Return 4980 /// CouldNotCompute if an intermediate computation overflows. 4981 const SCEV *ScalarEvolution::getBECount(const SCEV *Start, 4982 const SCEV *End, 4983 const SCEV *Step, 4984 bool NoWrap) { 4985 assert(!isKnownNegative(Step) && 4986 "This code doesn't handle negative strides yet!"); 4987 4988 const Type *Ty = Start->getType(); 4989 const SCEV *NegOne = getIntegerSCEV(-1, Ty); 4990 const SCEV *Diff = getMinusSCEV(End, Start); 4991 const SCEV *RoundUp = getAddExpr(Step, NegOne); 4992 4993 // Add an adjustment to the difference between End and Start so that 4994 // the division will effectively round up. 4995 const SCEV *Add = getAddExpr(Diff, RoundUp); 4996 4997 if (!NoWrap) { 4998 // Check Add for unsigned overflow. 4999 // TODO: More sophisticated things could be done here. 5000 const Type *WideTy = IntegerType::get(getContext(), 5001 getTypeSizeInBits(Ty) + 1); 5002 const SCEV *EDiff = getZeroExtendExpr(Diff, WideTy); 5003 const SCEV *ERoundUp = getZeroExtendExpr(RoundUp, WideTy); 5004 const SCEV *OperandExtendedAdd = getAddExpr(EDiff, ERoundUp); 5005 if (getZeroExtendExpr(Add, WideTy) != OperandExtendedAdd) 5006 return getCouldNotCompute(); 5007 } 5008 5009 return getUDivExpr(Add, Step); 5010 } 5011 5012 /// HowManyLessThans - Return the number of times a backedge containing the 5013 /// specified less-than comparison will execute. If not computable, return 5014 /// CouldNotCompute. 5015 ScalarEvolution::BackedgeTakenInfo 5016 ScalarEvolution::HowManyLessThans(const SCEV *LHS, const SCEV *RHS, 5017 const Loop *L, bool isSigned) { 5018 // Only handle: "ADDREC < LoopInvariant". 5019 if (!RHS->isLoopInvariant(L)) return getCouldNotCompute(); 5020 5021 const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(LHS); 5022 if (!AddRec || AddRec->getLoop() != L) 5023 return getCouldNotCompute(); 5024 5025 // Check to see if we have a flag which makes analysis easy. 5026 bool NoWrap = isSigned ? AddRec->hasNoSignedWrap() : 5027 AddRec->hasNoUnsignedWrap(); 5028 5029 if (AddRec->isAffine()) { 5030 unsigned BitWidth = getTypeSizeInBits(AddRec->getType()); 5031 const SCEV *Step = AddRec->getStepRecurrence(*this); 5032 5033 if (Step->isZero()) 5034 return getCouldNotCompute(); 5035 if (Step->isOne()) { 5036 // With unit stride, the iteration never steps past the limit value. 5037 } else if (isKnownPositive(Step)) { 5038 // Test whether a positive iteration iteration can step past the limit 5039 // value and past the maximum value for its type in a single step. 5040 // Note that it's not sufficient to check NoWrap here, because even 5041 // though the value after a wrap is undefined, it's not undefined 5042 // behavior, so if wrap does occur, the loop could either terminate or 5043 // loop infinitely, but in either case, the loop is guaranteed to 5044 // iterate at least until the iteration where the wrapping occurs. 5045 const SCEV *One = getIntegerSCEV(1, Step->getType()); 5046 if (isSigned) { 5047 APInt Max = APInt::getSignedMaxValue(BitWidth); 5048 if ((Max - getSignedRange(getMinusSCEV(Step, One)).getSignedMax()) 5049 .slt(getSignedRange(RHS).getSignedMax())) 5050 return getCouldNotCompute(); 5051 } else { 5052 APInt Max = APInt::getMaxValue(BitWidth); 5053 if ((Max - getUnsignedRange(getMinusSCEV(Step, One)).getUnsignedMax()) 5054 .ult(getUnsignedRange(RHS).getUnsignedMax())) 5055 return getCouldNotCompute(); 5056 } 5057 } else 5058 // TODO: Handle negative strides here and below. 5059 return getCouldNotCompute(); 5060 5061 // We know the LHS is of the form {n,+,s} and the RHS is some loop-invariant 5062 // m. So, we count the number of iterations in which {n,+,s} < m is true. 5063 // Note that we cannot simply return max(m-n,0)/s because it's not safe to 5064 // treat m-n as signed nor unsigned due to overflow possibility. 5065 5066 // First, we get the value of the LHS in the first iteration: n 5067 const SCEV *Start = AddRec->getOperand(0); 5068 5069 // Determine the minimum constant start value. 5070 const SCEV *MinStart = getConstant(isSigned ? 5071 getSignedRange(Start).getSignedMin() : 5072 getUnsignedRange(Start).getUnsignedMin()); 5073 5074 // If we know that the condition is true in order to enter the loop, 5075 // then we know that it will run exactly (m-n)/s times. Otherwise, we 5076 // only know that it will execute (max(m,n)-n)/s times. In both cases, 5077 // the division must round up. 5078 const SCEV *End = RHS; 5079 if (!isLoopGuardedByCond(L, 5080 isSigned ? ICmpInst::ICMP_SLT : 5081 ICmpInst::ICMP_ULT, 5082 getMinusSCEV(Start, Step), RHS)) 5083 End = isSigned ? getSMaxExpr(RHS, Start) 5084 : getUMaxExpr(RHS, Start); 5085 5086 // Determine the maximum constant end value. 5087 const SCEV *MaxEnd = getConstant(isSigned ? 5088 getSignedRange(End).getSignedMax() : 5089 getUnsignedRange(End).getUnsignedMax()); 5090 5091 // If MaxEnd is within a step of the maximum integer value in its type, 5092 // adjust it down to the minimum value which would produce the same effect. 5093 // This allows the subsequent ceiling divison of (N+(step-1))/step to 5094 // compute the correct value. 5095 const SCEV *StepMinusOne = getMinusSCEV(Step, 5096 getIntegerSCEV(1, Step->getType())); 5097 MaxEnd = isSigned ? 5098 getSMinExpr(MaxEnd, 5099 getMinusSCEV(getConstant(APInt::getSignedMaxValue(BitWidth)), 5100 StepMinusOne)) : 5101 getUMinExpr(MaxEnd, 5102 getMinusSCEV(getConstant(APInt::getMaxValue(BitWidth)), 5103 StepMinusOne)); 5104 5105 // Finally, we subtract these two values and divide, rounding up, to get 5106 // the number of times the backedge is executed. 5107 const SCEV *BECount = getBECount(Start, End, Step, NoWrap); 5108 5109 // The maximum backedge count is similar, except using the minimum start 5110 // value and the maximum end value. 5111 const SCEV *MaxBECount = getBECount(MinStart, MaxEnd, Step, NoWrap); 5112 5113 return BackedgeTakenInfo(BECount, MaxBECount); 5114 } 5115 5116 return getCouldNotCompute(); 5117 } 5118 5119 /// getNumIterationsInRange - Return the number of iterations of this loop that 5120 /// produce values in the specified constant range. Another way of looking at 5121 /// this is that it returns the first iteration number where the value is not in 5122 /// the condition, thus computing the exit count. If the iteration count can't 5123 /// be computed, an instance of SCEVCouldNotCompute is returned. 5124 const SCEV *SCEVAddRecExpr::getNumIterationsInRange(ConstantRange Range, 5125 ScalarEvolution &SE) const { 5126 if (Range.isFullSet()) // Infinite loop. 5127 return SE.getCouldNotCompute(); 5128 5129 // If the start is a non-zero constant, shift the range to simplify things. 5130 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(getStart())) 5131 if (!SC->getValue()->isZero()) { 5132 SmallVector<const SCEV *, 4> Operands(op_begin(), op_end()); 5133 Operands[0] = SE.getIntegerSCEV(0, SC->getType()); 5134 const SCEV *Shifted = SE.getAddRecExpr(Operands, getLoop()); 5135 if (const SCEVAddRecExpr *ShiftedAddRec = 5136 dyn_cast<SCEVAddRecExpr>(Shifted)) 5137 return ShiftedAddRec->getNumIterationsInRange( 5138 Range.subtract(SC->getValue()->getValue()), SE); 5139 // This is strange and shouldn't happen. 5140 return SE.getCouldNotCompute(); 5141 } 5142 5143 // The only time we can solve this is when we have all constant indices. 5144 // Otherwise, we cannot determine the overflow conditions. 5145 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) 5146 if (!isa<SCEVConstant>(getOperand(i))) 5147 return SE.getCouldNotCompute(); 5148 5149 5150 // Okay at this point we know that all elements of the chrec are constants and 5151 // that the start element is zero. 5152 5153 // First check to see if the range contains zero. If not, the first 5154 // iteration exits. 5155 unsigned BitWidth = SE.getTypeSizeInBits(getType()); 5156 if (!Range.contains(APInt(BitWidth, 0))) 5157 return SE.getIntegerSCEV(0, getType()); 5158 5159 if (isAffine()) { 5160 // If this is an affine expression then we have this situation: 5161 // Solve {0,+,A} in Range === Ax in Range 5162 5163 // We know that zero is in the range. If A is positive then we know that 5164 // the upper value of the range must be the first possible exit value. 5165 // If A is negative then the lower of the range is the last possible loop 5166 // value. Also note that we already checked for a full range. 5167 APInt One(BitWidth,1); 5168 APInt A = cast<SCEVConstant>(getOperand(1))->getValue()->getValue(); 5169 APInt End = A.sge(One) ? (Range.getUpper() - One) : Range.getLower(); 5170 5171 // The exit value should be (End+A)/A. 5172 APInt ExitVal = (End + A).udiv(A); 5173 ConstantInt *ExitValue = ConstantInt::get(SE.getContext(), ExitVal); 5174 5175 // Evaluate at the exit value. If we really did fall out of the valid 5176 // range, then we computed our trip count, otherwise wrap around or other 5177 // things must have happened. 5178 ConstantInt *Val = EvaluateConstantChrecAtConstant(this, ExitValue, SE); 5179 if (Range.contains(Val->getValue())) 5180 return SE.getCouldNotCompute(); // Something strange happened 5181 5182 // Ensure that the previous value is in the range. This is a sanity check. 5183 assert(Range.contains( 5184 EvaluateConstantChrecAtConstant(this, 5185 ConstantInt::get(SE.getContext(), ExitVal - One), SE)->getValue()) && 5186 "Linear scev computation is off in a bad way!"); 5187 return SE.getConstant(ExitValue); 5188 } else if (isQuadratic()) { 5189 // If this is a quadratic (3-term) AddRec {L,+,M,+,N}, find the roots of the 5190 // quadratic equation to solve it. To do this, we must frame our problem in 5191 // terms of figuring out when zero is crossed, instead of when 5192 // Range.getUpper() is crossed. 5193 SmallVector<const SCEV *, 4> NewOps(op_begin(), op_end()); 5194 NewOps[0] = SE.getNegativeSCEV(SE.getConstant(Range.getUpper())); 5195 const SCEV *NewAddRec = SE.getAddRecExpr(NewOps, getLoop()); 5196 5197 // Next, solve the constructed addrec 5198 std::pair<const SCEV *,const SCEV *> Roots = 5199 SolveQuadraticEquation(cast<SCEVAddRecExpr>(NewAddRec), SE); 5200 const SCEVConstant *R1 = dyn_cast<SCEVConstant>(Roots.first); 5201 const SCEVConstant *R2 = dyn_cast<SCEVConstant>(Roots.second); 5202 if (R1) { 5203 // Pick the smallest positive root value. 5204 if (ConstantInt *CB = 5205 dyn_cast<ConstantInt>(ConstantExpr::getICmp(ICmpInst::ICMP_ULT, 5206 R1->getValue(), R2->getValue()))) { 5207 if (CB->getZExtValue() == false) 5208 std::swap(R1, R2); // R1 is the minimum root now. 5209 5210 // Make sure the root is not off by one. The returned iteration should 5211 // not be in the range, but the previous one should be. When solving 5212 // for "X*X < 5", for example, we should not return a root of 2. 5213 ConstantInt *R1Val = EvaluateConstantChrecAtConstant(this, 5214 R1->getValue(), 5215 SE); 5216 if (Range.contains(R1Val->getValue())) { 5217 // The next iteration must be out of the range... 5218 ConstantInt *NextVal = 5219 ConstantInt::get(SE.getContext(), R1->getValue()->getValue()+1); 5220 5221 R1Val = EvaluateConstantChrecAtConstant(this, NextVal, SE); 5222 if (!Range.contains(R1Val->getValue())) 5223 return SE.getConstant(NextVal); 5224 return SE.getCouldNotCompute(); // Something strange happened 5225 } 5226 5227 // If R1 was not in the range, then it is a good return value. Make 5228 // sure that R1-1 WAS in the range though, just in case. 5229 ConstantInt *NextVal = 5230 ConstantInt::get(SE.getContext(), R1->getValue()->getValue()-1); 5231 R1Val = EvaluateConstantChrecAtConstant(this, NextVal, SE); 5232 if (Range.contains(R1Val->getValue())) 5233 return R1; 5234 return SE.getCouldNotCompute(); // Something strange happened 5235 } 5236 } 5237 } 5238 5239 return SE.getCouldNotCompute(); 5240 } 5241 5242 5243 5244 //===----------------------------------------------------------------------===// 5245 // SCEVCallbackVH Class Implementation 5246 //===----------------------------------------------------------------------===// 5247 5248 void ScalarEvolution::SCEVCallbackVH::deleted() { 5249 assert(SE && "SCEVCallbackVH called with a null ScalarEvolution!"); 5250 if (PHINode *PN = dyn_cast<PHINode>(getValPtr())) 5251 SE->ConstantEvolutionLoopExitValue.erase(PN); 5252 SE->Scalars.erase(getValPtr()); 5253 // this now dangles! 5254 } 5255 5256 void ScalarEvolution::SCEVCallbackVH::allUsesReplacedWith(Value *) { 5257 assert(SE && "SCEVCallbackVH called with a null ScalarEvolution!"); 5258 5259 // Forget all the expressions associated with users of the old value, 5260 // so that future queries will recompute the expressions using the new 5261 // value. 5262 SmallVector<User *, 16> Worklist; 5263 SmallPtrSet<User *, 8> Visited; 5264 Value *Old = getValPtr(); 5265 bool DeleteOld = false; 5266 for (Value::use_iterator UI = Old->use_begin(), UE = Old->use_end(); 5267 UI != UE; ++UI) 5268 Worklist.push_back(*UI); 5269 while (!Worklist.empty()) { 5270 User *U = Worklist.pop_back_val(); 5271 // Deleting the Old value will cause this to dangle. Postpone 5272 // that until everything else is done. 5273 if (U == Old) { 5274 DeleteOld = true; 5275 continue; 5276 } 5277 if (!Visited.insert(U)) 5278 continue; 5279 if (PHINode *PN = dyn_cast<PHINode>(U)) 5280 SE->ConstantEvolutionLoopExitValue.erase(PN); 5281 SE->Scalars.erase(U); 5282 for (Value::use_iterator UI = U->use_begin(), UE = U->use_end(); 5283 UI != UE; ++UI) 5284 Worklist.push_back(*UI); 5285 } 5286 // Delete the Old value if it (indirectly) references itself. 5287 if (DeleteOld) { 5288 if (PHINode *PN = dyn_cast<PHINode>(Old)) 5289 SE->ConstantEvolutionLoopExitValue.erase(PN); 5290 SE->Scalars.erase(Old); 5291 // this now dangles! 5292 } 5293 // this may dangle! 5294 } 5295 5296 ScalarEvolution::SCEVCallbackVH::SCEVCallbackVH(Value *V, ScalarEvolution *se) 5297 : CallbackVH(V), SE(se) {} 5298 5299 //===----------------------------------------------------------------------===// 5300 // ScalarEvolution Class Implementation 5301 //===----------------------------------------------------------------------===// 5302 5303 ScalarEvolution::ScalarEvolution() 5304 : FunctionPass(&ID) { 5305 } 5306 5307 bool ScalarEvolution::runOnFunction(Function &F) { 5308 this->F = &F; 5309 LI = &getAnalysis<LoopInfo>(); 5310 DT = &getAnalysis<DominatorTree>(); 5311 TD = getAnalysisIfAvailable<TargetData>(); 5312 return false; 5313 } 5314 5315 void ScalarEvolution::releaseMemory() { 5316 Scalars.clear(); 5317 BackedgeTakenCounts.clear(); 5318 ConstantEvolutionLoopExitValue.clear(); 5319 ValuesAtScopes.clear(); 5320 UniqueSCEVs.clear(); 5321 SCEVAllocator.Reset(); 5322 } 5323 5324 void ScalarEvolution::getAnalysisUsage(AnalysisUsage &AU) const { 5325 AU.setPreservesAll(); 5326 AU.addRequiredTransitive<LoopInfo>(); 5327 AU.addRequiredTransitive<DominatorTree>(); 5328 } 5329 5330 bool ScalarEvolution::hasLoopInvariantBackedgeTakenCount(const Loop *L) { 5331 return !isa<SCEVCouldNotCompute>(getBackedgeTakenCount(L)); 5332 } 5333 5334 static void PrintLoopInfo(raw_ostream &OS, ScalarEvolution *SE, 5335 const Loop *L) { 5336 // Print all inner loops first 5337 for (Loop::iterator I = L->begin(), E = L->end(); I != E; ++I) 5338 PrintLoopInfo(OS, SE, *I); 5339 5340 OS << "Loop "; 5341 WriteAsOperand(OS, L->getHeader(), /*PrintType=*/false); 5342 OS << ": "; 5343 5344 SmallVector<BasicBlock *, 8> ExitBlocks; 5345 L->getExitBlocks(ExitBlocks); 5346 if (ExitBlocks.size() != 1) 5347 OS << "<multiple exits> "; 5348 5349 if (SE->hasLoopInvariantBackedgeTakenCount(L)) { 5350 OS << "backedge-taken count is " << *SE->getBackedgeTakenCount(L); 5351 } else { 5352 OS << "Unpredictable backedge-taken count. "; 5353 } 5354 5355 OS << "\n" 5356 "Loop "; 5357 WriteAsOperand(OS, L->getHeader(), /*PrintType=*/false); 5358 OS << ": "; 5359 5360 if (!isa<SCEVCouldNotCompute>(SE->getMaxBackedgeTakenCount(L))) { 5361 OS << "max backedge-taken count is " << *SE->getMaxBackedgeTakenCount(L); 5362 } else { 5363 OS << "Unpredictable max backedge-taken count. "; 5364 } 5365 5366 OS << "\n"; 5367 } 5368 5369 void ScalarEvolution::print(raw_ostream &OS, const Module *) const { 5370 // ScalarEvolution's implementaiton of the print method is to print 5371 // out SCEV values of all instructions that are interesting. Doing 5372 // this potentially causes it to create new SCEV objects though, 5373 // which technically conflicts with the const qualifier. This isn't 5374 // observable from outside the class though, so casting away the 5375 // const isn't dangerous. 5376 ScalarEvolution &SE = *const_cast<ScalarEvolution *>(this); 5377 5378 OS << "Classifying expressions for: "; 5379 WriteAsOperand(OS, F, /*PrintType=*/false); 5380 OS << "\n"; 5381 for (inst_iterator I = inst_begin(F), E = inst_end(F); I != E; ++I) 5382 if (isSCEVable(I->getType())) { 5383 OS << *I << '\n'; 5384 OS << " --> "; 5385 const SCEV *SV = SE.getSCEV(&*I); 5386 SV->print(OS); 5387 5388 const Loop *L = LI->getLoopFor((*I).getParent()); 5389 5390 const SCEV *AtUse = SE.getSCEVAtScope(SV, L); 5391 if (AtUse != SV) { 5392 OS << " --> "; 5393 AtUse->print(OS); 5394 } 5395 5396 if (L) { 5397 OS << "\t\t" "Exits: "; 5398 const SCEV *ExitValue = SE.getSCEVAtScope(SV, L->getParentLoop()); 5399 if (!ExitValue->isLoopInvariant(L)) { 5400 OS << "<<Unknown>>"; 5401 } else { 5402 OS << *ExitValue; 5403 } 5404 } 5405 5406 OS << "\n"; 5407 } 5408 5409 OS << "Determining loop execution counts for: "; 5410 WriteAsOperand(OS, F, /*PrintType=*/false); 5411 OS << "\n"; 5412 for (LoopInfo::iterator I = LI->begin(), E = LI->end(); I != E; ++I) 5413 PrintLoopInfo(OS, &SE, *I); 5414 } 5415 5416