1 //===- ScalarEvolutionExpander.cpp - Scalar Evolution Analysis ------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file contains the implementation of the scalar evolution expander, 10 // which is used to generate the code corresponding to a given scalar evolution 11 // expression. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "llvm/Transforms/Utils/ScalarEvolutionExpander.h" 16 #include "llvm/ADT/STLExtras.h" 17 #include "llvm/ADT/SmallSet.h" 18 #include "llvm/Analysis/InstructionSimplify.h" 19 #include "llvm/Analysis/LoopInfo.h" 20 #include "llvm/Analysis/TargetTransformInfo.h" 21 #include "llvm/IR/DataLayout.h" 22 #include "llvm/IR/Dominators.h" 23 #include "llvm/IR/IntrinsicInst.h" 24 #include "llvm/IR/LLVMContext.h" 25 #include "llvm/IR/Module.h" 26 #include "llvm/IR/PatternMatch.h" 27 #include "llvm/Support/CommandLine.h" 28 #include "llvm/Support/Debug.h" 29 #include "llvm/Support/raw_ostream.h" 30 #include "llvm/Transforms/Utils/LoopUtils.h" 31 32 #ifdef LLVM_ENABLE_ABI_BREAKING_CHECKS 33 #define SCEV_DEBUG_WITH_TYPE(TYPE, X) DEBUG_WITH_TYPE(TYPE, X) 34 #else 35 #define SCEV_DEBUG_WITH_TYPE(TYPE, X) 36 #endif 37 38 using namespace llvm; 39 40 cl::opt<unsigned> llvm::SCEVCheapExpansionBudget( 41 "scev-cheap-expansion-budget", cl::Hidden, cl::init(4), 42 cl::desc("When performing SCEV expansion only if it is cheap to do, this " 43 "controls the budget that is considered cheap (default = 4)")); 44 45 using namespace PatternMatch; 46 47 /// ReuseOrCreateCast - Arrange for there to be a cast of V to Ty at IP, 48 /// reusing an existing cast if a suitable one (= dominating IP) exists, or 49 /// creating a new one. 50 Value *SCEVExpander::ReuseOrCreateCast(Value *V, Type *Ty, 51 Instruction::CastOps Op, 52 BasicBlock::iterator IP) { 53 // This function must be called with the builder having a valid insertion 54 // point. It doesn't need to be the actual IP where the uses of the returned 55 // cast will be added, but it must dominate such IP. 56 // We use this precondition to produce a cast that will dominate all its 57 // uses. In particular, this is crucial for the case where the builder's 58 // insertion point *is* the point where we were asked to put the cast. 59 // Since we don't know the builder's insertion point is actually 60 // where the uses will be added (only that it dominates it), we are 61 // not allowed to move it. 62 BasicBlock::iterator BIP = Builder.GetInsertPoint(); 63 64 Value *Ret = nullptr; 65 66 // Check to see if there is already a cast! 67 for (User *U : V->users()) { 68 if (U->getType() != Ty) 69 continue; 70 CastInst *CI = dyn_cast<CastInst>(U); 71 if (!CI || CI->getOpcode() != Op) 72 continue; 73 74 // Found a suitable cast that is at IP or comes before IP. Use it. Note that 75 // the cast must also properly dominate the Builder's insertion point. 76 if (IP->getParent() == CI->getParent() && &*BIP != CI && 77 (&*IP == CI || CI->comesBefore(&*IP))) { 78 Ret = CI; 79 break; 80 } 81 } 82 83 // Create a new cast. 84 if (!Ret) { 85 SCEVInsertPointGuard Guard(Builder, this); 86 Builder.SetInsertPoint(&*IP); 87 Ret = Builder.CreateCast(Op, V, Ty, V->getName()); 88 } 89 90 // We assert at the end of the function since IP might point to an 91 // instruction with different dominance properties than a cast 92 // (an invoke for example) and not dominate BIP (but the cast does). 93 assert(!isa<Instruction>(Ret) || 94 SE.DT.dominates(cast<Instruction>(Ret), &*BIP)); 95 96 return Ret; 97 } 98 99 BasicBlock::iterator 100 SCEVExpander::findInsertPointAfter(Instruction *I, 101 Instruction *MustDominate) const { 102 BasicBlock::iterator IP = ++I->getIterator(); 103 if (auto *II = dyn_cast<InvokeInst>(I)) 104 IP = II->getNormalDest()->begin(); 105 106 while (isa<PHINode>(IP)) 107 ++IP; 108 109 if (isa<FuncletPadInst>(IP) || isa<LandingPadInst>(IP)) { 110 ++IP; 111 } else if (isa<CatchSwitchInst>(IP)) { 112 IP = MustDominate->getParent()->getFirstInsertionPt(); 113 } else { 114 assert(!IP->isEHPad() && "unexpected eh pad!"); 115 } 116 117 // Adjust insert point to be after instructions inserted by the expander, so 118 // we can re-use already inserted instructions. Avoid skipping past the 119 // original \p MustDominate, in case it is an inserted instruction. 120 while (isInsertedInstruction(&*IP) && &*IP != MustDominate) 121 ++IP; 122 123 return IP; 124 } 125 126 BasicBlock::iterator 127 SCEVExpander::GetOptimalInsertionPointForCastOf(Value *V) const { 128 // Cast the argument at the beginning of the entry block, after 129 // any bitcasts of other arguments. 130 if (Argument *A = dyn_cast<Argument>(V)) { 131 BasicBlock::iterator IP = A->getParent()->getEntryBlock().begin(); 132 while ((isa<BitCastInst>(IP) && 133 isa<Argument>(cast<BitCastInst>(IP)->getOperand(0)) && 134 cast<BitCastInst>(IP)->getOperand(0) != A) || 135 isa<DbgInfoIntrinsic>(IP)) 136 ++IP; 137 return IP; 138 } 139 140 // Cast the instruction immediately after the instruction. 141 if (Instruction *I = dyn_cast<Instruction>(V)) 142 return findInsertPointAfter(I, &*Builder.GetInsertPoint()); 143 144 // Otherwise, this must be some kind of a constant, 145 // so let's plop this cast into the function's entry block. 146 assert(isa<Constant>(V) && 147 "Expected the cast argument to be a global/constant"); 148 return Builder.GetInsertBlock() 149 ->getParent() 150 ->getEntryBlock() 151 .getFirstInsertionPt(); 152 } 153 154 /// InsertNoopCastOfTo - Insert a cast of V to the specified type, 155 /// which must be possible with a noop cast, doing what we can to share 156 /// the casts. 157 Value *SCEVExpander::InsertNoopCastOfTo(Value *V, Type *Ty) { 158 Instruction::CastOps Op = CastInst::getCastOpcode(V, false, Ty, false); 159 assert((Op == Instruction::BitCast || 160 Op == Instruction::PtrToInt || 161 Op == Instruction::IntToPtr) && 162 "InsertNoopCastOfTo cannot perform non-noop casts!"); 163 assert(SE.getTypeSizeInBits(V->getType()) == SE.getTypeSizeInBits(Ty) && 164 "InsertNoopCastOfTo cannot change sizes!"); 165 166 // inttoptr only works for integral pointers. For non-integral pointers, we 167 // can create a GEP on i8* null with the integral value as index. Note that 168 // it is safe to use GEP of null instead of inttoptr here, because only 169 // expressions already based on a GEP of null should be converted to pointers 170 // during expansion. 171 if (Op == Instruction::IntToPtr) { 172 auto *PtrTy = cast<PointerType>(Ty); 173 if (DL.isNonIntegralPointerType(PtrTy)) { 174 auto *Int8PtrTy = Builder.getInt8PtrTy(PtrTy->getAddressSpace()); 175 assert(DL.getTypeAllocSize(Int8PtrTy->getElementType()) == 1 && 176 "alloc size of i8 must by 1 byte for the GEP to be correct"); 177 auto *GEP = Builder.CreateGEP( 178 Builder.getInt8Ty(), Constant::getNullValue(Int8PtrTy), V, "uglygep"); 179 return Builder.CreateBitCast(GEP, Ty); 180 } 181 } 182 // Short-circuit unnecessary bitcasts. 183 if (Op == Instruction::BitCast) { 184 if (V->getType() == Ty) 185 return V; 186 if (CastInst *CI = dyn_cast<CastInst>(V)) { 187 if (CI->getOperand(0)->getType() == Ty) 188 return CI->getOperand(0); 189 } 190 } 191 // Short-circuit unnecessary inttoptr<->ptrtoint casts. 192 if ((Op == Instruction::PtrToInt || Op == Instruction::IntToPtr) && 193 SE.getTypeSizeInBits(Ty) == SE.getTypeSizeInBits(V->getType())) { 194 if (CastInst *CI = dyn_cast<CastInst>(V)) 195 if ((CI->getOpcode() == Instruction::PtrToInt || 196 CI->getOpcode() == Instruction::IntToPtr) && 197 SE.getTypeSizeInBits(CI->getType()) == 198 SE.getTypeSizeInBits(CI->getOperand(0)->getType())) 199 return CI->getOperand(0); 200 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) 201 if ((CE->getOpcode() == Instruction::PtrToInt || 202 CE->getOpcode() == Instruction::IntToPtr) && 203 SE.getTypeSizeInBits(CE->getType()) == 204 SE.getTypeSizeInBits(CE->getOperand(0)->getType())) 205 return CE->getOperand(0); 206 } 207 208 // Fold a cast of a constant. 209 if (Constant *C = dyn_cast<Constant>(V)) 210 return ConstantExpr::getCast(Op, C, Ty); 211 212 // Try to reuse existing cast, or insert one. 213 return ReuseOrCreateCast(V, Ty, Op, GetOptimalInsertionPointForCastOf(V)); 214 } 215 216 /// InsertBinop - Insert the specified binary operator, doing a small amount 217 /// of work to avoid inserting an obviously redundant operation, and hoisting 218 /// to an outer loop when the opportunity is there and it is safe. 219 Value *SCEVExpander::InsertBinop(Instruction::BinaryOps Opcode, 220 Value *LHS, Value *RHS, 221 SCEV::NoWrapFlags Flags, bool IsSafeToHoist) { 222 // Fold a binop with constant operands. 223 if (Constant *CLHS = dyn_cast<Constant>(LHS)) 224 if (Constant *CRHS = dyn_cast<Constant>(RHS)) 225 return ConstantExpr::get(Opcode, CLHS, CRHS); 226 227 // Do a quick scan to see if we have this binop nearby. If so, reuse it. 228 unsigned ScanLimit = 6; 229 BasicBlock::iterator BlockBegin = Builder.GetInsertBlock()->begin(); 230 // Scanning starts from the last instruction before the insertion point. 231 BasicBlock::iterator IP = Builder.GetInsertPoint(); 232 if (IP != BlockBegin) { 233 --IP; 234 for (; ScanLimit; --IP, --ScanLimit) { 235 // Don't count dbg.value against the ScanLimit, to avoid perturbing the 236 // generated code. 237 if (isa<DbgInfoIntrinsic>(IP)) 238 ScanLimit++; 239 240 auto canGenerateIncompatiblePoison = [&Flags](Instruction *I) { 241 // Ensure that no-wrap flags match. 242 if (isa<OverflowingBinaryOperator>(I)) { 243 if (I->hasNoSignedWrap() != (Flags & SCEV::FlagNSW)) 244 return true; 245 if (I->hasNoUnsignedWrap() != (Flags & SCEV::FlagNUW)) 246 return true; 247 } 248 // Conservatively, do not use any instruction which has any of exact 249 // flags installed. 250 if (isa<PossiblyExactOperator>(I) && I->isExact()) 251 return true; 252 return false; 253 }; 254 if (IP->getOpcode() == (unsigned)Opcode && IP->getOperand(0) == LHS && 255 IP->getOperand(1) == RHS && !canGenerateIncompatiblePoison(&*IP)) 256 return &*IP; 257 if (IP == BlockBegin) break; 258 } 259 } 260 261 // Save the original insertion point so we can restore it when we're done. 262 DebugLoc Loc = Builder.GetInsertPoint()->getDebugLoc(); 263 SCEVInsertPointGuard Guard(Builder, this); 264 265 if (IsSafeToHoist) { 266 // Move the insertion point out of as many loops as we can. 267 while (const Loop *L = SE.LI.getLoopFor(Builder.GetInsertBlock())) { 268 if (!L->isLoopInvariant(LHS) || !L->isLoopInvariant(RHS)) break; 269 BasicBlock *Preheader = L->getLoopPreheader(); 270 if (!Preheader) break; 271 272 // Ok, move up a level. 273 Builder.SetInsertPoint(Preheader->getTerminator()); 274 } 275 } 276 277 // If we haven't found this binop, insert it. 278 Instruction *BO = cast<Instruction>(Builder.CreateBinOp(Opcode, LHS, RHS)); 279 BO->setDebugLoc(Loc); 280 if (Flags & SCEV::FlagNUW) 281 BO->setHasNoUnsignedWrap(); 282 if (Flags & SCEV::FlagNSW) 283 BO->setHasNoSignedWrap(); 284 285 return BO; 286 } 287 288 /// FactorOutConstant - Test if S is divisible by Factor, using signed 289 /// division. If so, update S with Factor divided out and return true. 290 /// S need not be evenly divisible if a reasonable remainder can be 291 /// computed. 292 static bool FactorOutConstant(const SCEV *&S, const SCEV *&Remainder, 293 const SCEV *Factor, ScalarEvolution &SE, 294 const DataLayout &DL) { 295 // Everything is divisible by one. 296 if (Factor->isOne()) 297 return true; 298 299 // x/x == 1. 300 if (S == Factor) { 301 S = SE.getConstant(S->getType(), 1); 302 return true; 303 } 304 305 // For a Constant, check for a multiple of the given factor. 306 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) { 307 // 0/x == 0. 308 if (C->isZero()) 309 return true; 310 // Check for divisibility. 311 if (const SCEVConstant *FC = dyn_cast<SCEVConstant>(Factor)) { 312 ConstantInt *CI = 313 ConstantInt::get(SE.getContext(), C->getAPInt().sdiv(FC->getAPInt())); 314 // If the quotient is zero and the remainder is non-zero, reject 315 // the value at this scale. It will be considered for subsequent 316 // smaller scales. 317 if (!CI->isZero()) { 318 const SCEV *Div = SE.getConstant(CI); 319 S = Div; 320 Remainder = SE.getAddExpr( 321 Remainder, SE.getConstant(C->getAPInt().srem(FC->getAPInt()))); 322 return true; 323 } 324 } 325 } 326 327 // In a Mul, check if there is a constant operand which is a multiple 328 // of the given factor. 329 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(S)) { 330 // Size is known, check if there is a constant operand which is a multiple 331 // of the given factor. If so, we can factor it. 332 if (const SCEVConstant *FC = dyn_cast<SCEVConstant>(Factor)) 333 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(M->getOperand(0))) 334 if (!C->getAPInt().srem(FC->getAPInt())) { 335 SmallVector<const SCEV *, 4> NewMulOps(M->operands()); 336 NewMulOps[0] = SE.getConstant(C->getAPInt().sdiv(FC->getAPInt())); 337 S = SE.getMulExpr(NewMulOps); 338 return true; 339 } 340 } 341 342 // In an AddRec, check if both start and step are divisible. 343 if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(S)) { 344 const SCEV *Step = A->getStepRecurrence(SE); 345 const SCEV *StepRem = SE.getConstant(Step->getType(), 0); 346 if (!FactorOutConstant(Step, StepRem, Factor, SE, DL)) 347 return false; 348 if (!StepRem->isZero()) 349 return false; 350 const SCEV *Start = A->getStart(); 351 if (!FactorOutConstant(Start, Remainder, Factor, SE, DL)) 352 return false; 353 S = SE.getAddRecExpr(Start, Step, A->getLoop(), 354 A->getNoWrapFlags(SCEV::FlagNW)); 355 return true; 356 } 357 358 return false; 359 } 360 361 /// SimplifyAddOperands - Sort and simplify a list of add operands. NumAddRecs 362 /// is the number of SCEVAddRecExprs present, which are kept at the end of 363 /// the list. 364 /// 365 static void SimplifyAddOperands(SmallVectorImpl<const SCEV *> &Ops, 366 Type *Ty, 367 ScalarEvolution &SE) { 368 unsigned NumAddRecs = 0; 369 for (unsigned i = Ops.size(); i > 0 && isa<SCEVAddRecExpr>(Ops[i-1]); --i) 370 ++NumAddRecs; 371 // Group Ops into non-addrecs and addrecs. 372 SmallVector<const SCEV *, 8> NoAddRecs(Ops.begin(), Ops.end() - NumAddRecs); 373 SmallVector<const SCEV *, 8> AddRecs(Ops.end() - NumAddRecs, Ops.end()); 374 // Let ScalarEvolution sort and simplify the non-addrecs list. 375 const SCEV *Sum = NoAddRecs.empty() ? 376 SE.getConstant(Ty, 0) : 377 SE.getAddExpr(NoAddRecs); 378 // If it returned an add, use the operands. Otherwise it simplified 379 // the sum into a single value, so just use that. 380 Ops.clear(); 381 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Sum)) 382 Ops.append(Add->op_begin(), Add->op_end()); 383 else if (!Sum->isZero()) 384 Ops.push_back(Sum); 385 // Then append the addrecs. 386 Ops.append(AddRecs.begin(), AddRecs.end()); 387 } 388 389 /// SplitAddRecs - Flatten a list of add operands, moving addrec start values 390 /// out to the top level. For example, convert {a + b,+,c} to a, b, {0,+,d}. 391 /// This helps expose more opportunities for folding parts of the expressions 392 /// into GEP indices. 393 /// 394 static void SplitAddRecs(SmallVectorImpl<const SCEV *> &Ops, 395 Type *Ty, 396 ScalarEvolution &SE) { 397 // Find the addrecs. 398 SmallVector<const SCEV *, 8> AddRecs; 399 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 400 while (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(Ops[i])) { 401 const SCEV *Start = A->getStart(); 402 if (Start->isZero()) break; 403 const SCEV *Zero = SE.getConstant(Ty, 0); 404 AddRecs.push_back(SE.getAddRecExpr(Zero, 405 A->getStepRecurrence(SE), 406 A->getLoop(), 407 A->getNoWrapFlags(SCEV::FlagNW))); 408 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Start)) { 409 Ops[i] = Zero; 410 Ops.append(Add->op_begin(), Add->op_end()); 411 e += Add->getNumOperands(); 412 } else { 413 Ops[i] = Start; 414 } 415 } 416 if (!AddRecs.empty()) { 417 // Add the addrecs onto the end of the list. 418 Ops.append(AddRecs.begin(), AddRecs.end()); 419 // Resort the operand list, moving any constants to the front. 420 SimplifyAddOperands(Ops, Ty, SE); 421 } 422 } 423 424 /// expandAddToGEP - Expand an addition expression with a pointer type into 425 /// a GEP instead of using ptrtoint+arithmetic+inttoptr. This helps 426 /// BasicAliasAnalysis and other passes analyze the result. See the rules 427 /// for getelementptr vs. inttoptr in 428 /// http://llvm.org/docs/LangRef.html#pointeraliasing 429 /// for details. 430 /// 431 /// Design note: The correctness of using getelementptr here depends on 432 /// ScalarEvolution not recognizing inttoptr and ptrtoint operators, as 433 /// they may introduce pointer arithmetic which may not be safely converted 434 /// into getelementptr. 435 /// 436 /// Design note: It might seem desirable for this function to be more 437 /// loop-aware. If some of the indices are loop-invariant while others 438 /// aren't, it might seem desirable to emit multiple GEPs, keeping the 439 /// loop-invariant portions of the overall computation outside the loop. 440 /// However, there are a few reasons this is not done here. Hoisting simple 441 /// arithmetic is a low-level optimization that often isn't very 442 /// important until late in the optimization process. In fact, passes 443 /// like InstructionCombining will combine GEPs, even if it means 444 /// pushing loop-invariant computation down into loops, so even if the 445 /// GEPs were split here, the work would quickly be undone. The 446 /// LoopStrengthReduction pass, which is usually run quite late (and 447 /// after the last InstructionCombining pass), takes care of hoisting 448 /// loop-invariant portions of expressions, after considering what 449 /// can be folded using target addressing modes. 450 /// 451 Value *SCEVExpander::expandAddToGEP(const SCEV *const *op_begin, 452 const SCEV *const *op_end, 453 PointerType *PTy, 454 Type *Ty, 455 Value *V) { 456 Type *OriginalElTy = PTy->getElementType(); 457 Type *ElTy = OriginalElTy; 458 SmallVector<Value *, 4> GepIndices; 459 SmallVector<const SCEV *, 8> Ops(op_begin, op_end); 460 bool AnyNonZeroIndices = false; 461 462 // Split AddRecs up into parts as either of the parts may be usable 463 // without the other. 464 SplitAddRecs(Ops, Ty, SE); 465 466 Type *IntIdxTy = DL.getIndexType(PTy); 467 468 // Descend down the pointer's type and attempt to convert the other 469 // operands into GEP indices, at each level. The first index in a GEP 470 // indexes into the array implied by the pointer operand; the rest of 471 // the indices index into the element or field type selected by the 472 // preceding index. 473 for (;;) { 474 // If the scale size is not 0, attempt to factor out a scale for 475 // array indexing. 476 SmallVector<const SCEV *, 8> ScaledOps; 477 if (ElTy->isSized()) { 478 const SCEV *ElSize = SE.getSizeOfExpr(IntIdxTy, ElTy); 479 if (!ElSize->isZero()) { 480 SmallVector<const SCEV *, 8> NewOps; 481 for (const SCEV *Op : Ops) { 482 const SCEV *Remainder = SE.getConstant(Ty, 0); 483 if (FactorOutConstant(Op, Remainder, ElSize, SE, DL)) { 484 // Op now has ElSize factored out. 485 ScaledOps.push_back(Op); 486 if (!Remainder->isZero()) 487 NewOps.push_back(Remainder); 488 AnyNonZeroIndices = true; 489 } else { 490 // The operand was not divisible, so add it to the list of operands 491 // we'll scan next iteration. 492 NewOps.push_back(Op); 493 } 494 } 495 // If we made any changes, update Ops. 496 if (!ScaledOps.empty()) { 497 Ops = NewOps; 498 SimplifyAddOperands(Ops, Ty, SE); 499 } 500 } 501 } 502 503 // Record the scaled array index for this level of the type. If 504 // we didn't find any operands that could be factored, tentatively 505 // assume that element zero was selected (since the zero offset 506 // would obviously be folded away). 507 Value *Scaled = 508 ScaledOps.empty() 509 ? Constant::getNullValue(Ty) 510 : expandCodeForImpl(SE.getAddExpr(ScaledOps), Ty, false); 511 GepIndices.push_back(Scaled); 512 513 // Collect struct field index operands. 514 while (StructType *STy = dyn_cast<StructType>(ElTy)) { 515 bool FoundFieldNo = false; 516 // An empty struct has no fields. 517 if (STy->getNumElements() == 0) break; 518 // Field offsets are known. See if a constant offset falls within any of 519 // the struct fields. 520 if (Ops.empty()) 521 break; 522 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[0])) 523 if (SE.getTypeSizeInBits(C->getType()) <= 64) { 524 const StructLayout &SL = *DL.getStructLayout(STy); 525 uint64_t FullOffset = C->getValue()->getZExtValue(); 526 if (FullOffset < SL.getSizeInBytes()) { 527 unsigned ElIdx = SL.getElementContainingOffset(FullOffset); 528 GepIndices.push_back( 529 ConstantInt::get(Type::getInt32Ty(Ty->getContext()), ElIdx)); 530 ElTy = STy->getTypeAtIndex(ElIdx); 531 Ops[0] = 532 SE.getConstant(Ty, FullOffset - SL.getElementOffset(ElIdx)); 533 AnyNonZeroIndices = true; 534 FoundFieldNo = true; 535 } 536 } 537 // If no struct field offsets were found, tentatively assume that 538 // field zero was selected (since the zero offset would obviously 539 // be folded away). 540 if (!FoundFieldNo) { 541 ElTy = STy->getTypeAtIndex(0u); 542 GepIndices.push_back( 543 Constant::getNullValue(Type::getInt32Ty(Ty->getContext()))); 544 } 545 } 546 547 if (ArrayType *ATy = dyn_cast<ArrayType>(ElTy)) 548 ElTy = ATy->getElementType(); 549 else 550 // FIXME: Handle VectorType. 551 // E.g., If ElTy is scalable vector, then ElSize is not a compile-time 552 // constant, therefore can not be factored out. The generated IR is less 553 // ideal with base 'V' cast to i8* and do ugly getelementptr over that. 554 break; 555 } 556 557 // If none of the operands were convertible to proper GEP indices, cast 558 // the base to i8* and do an ugly getelementptr with that. It's still 559 // better than ptrtoint+arithmetic+inttoptr at least. 560 if (!AnyNonZeroIndices) { 561 // Cast the base to i8*. 562 V = InsertNoopCastOfTo(V, 563 Type::getInt8PtrTy(Ty->getContext(), PTy->getAddressSpace())); 564 565 assert(!isa<Instruction>(V) || 566 SE.DT.dominates(cast<Instruction>(V), &*Builder.GetInsertPoint())); 567 568 // Expand the operands for a plain byte offset. 569 Value *Idx = expandCodeForImpl(SE.getAddExpr(Ops), Ty, false); 570 571 // Fold a GEP with constant operands. 572 if (Constant *CLHS = dyn_cast<Constant>(V)) 573 if (Constant *CRHS = dyn_cast<Constant>(Idx)) 574 return ConstantExpr::getGetElementPtr(Type::getInt8Ty(Ty->getContext()), 575 CLHS, CRHS); 576 577 // Do a quick scan to see if we have this GEP nearby. If so, reuse it. 578 unsigned ScanLimit = 6; 579 BasicBlock::iterator BlockBegin = Builder.GetInsertBlock()->begin(); 580 // Scanning starts from the last instruction before the insertion point. 581 BasicBlock::iterator IP = Builder.GetInsertPoint(); 582 if (IP != BlockBegin) { 583 --IP; 584 for (; ScanLimit; --IP, --ScanLimit) { 585 // Don't count dbg.value against the ScanLimit, to avoid perturbing the 586 // generated code. 587 if (isa<DbgInfoIntrinsic>(IP)) 588 ScanLimit++; 589 if (IP->getOpcode() == Instruction::GetElementPtr && 590 IP->getOperand(0) == V && IP->getOperand(1) == Idx) 591 return &*IP; 592 if (IP == BlockBegin) break; 593 } 594 } 595 596 // Save the original insertion point so we can restore it when we're done. 597 SCEVInsertPointGuard Guard(Builder, this); 598 599 // Move the insertion point out of as many loops as we can. 600 while (const Loop *L = SE.LI.getLoopFor(Builder.GetInsertBlock())) { 601 if (!L->isLoopInvariant(V) || !L->isLoopInvariant(Idx)) break; 602 BasicBlock *Preheader = L->getLoopPreheader(); 603 if (!Preheader) break; 604 605 // Ok, move up a level. 606 Builder.SetInsertPoint(Preheader->getTerminator()); 607 } 608 609 // Emit a GEP. 610 return Builder.CreateGEP(Builder.getInt8Ty(), V, Idx, "uglygep"); 611 } 612 613 { 614 SCEVInsertPointGuard Guard(Builder, this); 615 616 // Move the insertion point out of as many loops as we can. 617 while (const Loop *L = SE.LI.getLoopFor(Builder.GetInsertBlock())) { 618 if (!L->isLoopInvariant(V)) break; 619 620 bool AnyIndexNotLoopInvariant = any_of( 621 GepIndices, [L](Value *Op) { return !L->isLoopInvariant(Op); }); 622 623 if (AnyIndexNotLoopInvariant) 624 break; 625 626 BasicBlock *Preheader = L->getLoopPreheader(); 627 if (!Preheader) break; 628 629 // Ok, move up a level. 630 Builder.SetInsertPoint(Preheader->getTerminator()); 631 } 632 633 // Insert a pretty getelementptr. Note that this GEP is not marked inbounds, 634 // because ScalarEvolution may have changed the address arithmetic to 635 // compute a value which is beyond the end of the allocated object. 636 Value *Casted = V; 637 if (V->getType() != PTy) 638 Casted = InsertNoopCastOfTo(Casted, PTy); 639 Value *GEP = Builder.CreateGEP(OriginalElTy, Casted, GepIndices, "scevgep"); 640 Ops.push_back(SE.getUnknown(GEP)); 641 } 642 643 return expand(SE.getAddExpr(Ops)); 644 } 645 646 Value *SCEVExpander::expandAddToGEP(const SCEV *Op, PointerType *PTy, Type *Ty, 647 Value *V) { 648 const SCEV *const Ops[1] = {Op}; 649 return expandAddToGEP(Ops, Ops + 1, PTy, Ty, V); 650 } 651 652 /// PickMostRelevantLoop - Given two loops pick the one that's most relevant for 653 /// SCEV expansion. If they are nested, this is the most nested. If they are 654 /// neighboring, pick the later. 655 static const Loop *PickMostRelevantLoop(const Loop *A, const Loop *B, 656 DominatorTree &DT) { 657 if (!A) return B; 658 if (!B) return A; 659 if (A->contains(B)) return B; 660 if (B->contains(A)) return A; 661 if (DT.dominates(A->getHeader(), B->getHeader())) return B; 662 if (DT.dominates(B->getHeader(), A->getHeader())) return A; 663 return A; // Arbitrarily break the tie. 664 } 665 666 /// getRelevantLoop - Get the most relevant loop associated with the given 667 /// expression, according to PickMostRelevantLoop. 668 const Loop *SCEVExpander::getRelevantLoop(const SCEV *S) { 669 // Test whether we've already computed the most relevant loop for this SCEV. 670 auto Pair = RelevantLoops.insert(std::make_pair(S, nullptr)); 671 if (!Pair.second) 672 return Pair.first->second; 673 674 if (isa<SCEVConstant>(S)) 675 // A constant has no relevant loops. 676 return nullptr; 677 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) { 678 if (const Instruction *I = dyn_cast<Instruction>(U->getValue())) 679 return Pair.first->second = SE.LI.getLoopFor(I->getParent()); 680 // A non-instruction has no relevant loops. 681 return nullptr; 682 } 683 if (const SCEVNAryExpr *N = dyn_cast<SCEVNAryExpr>(S)) { 684 const Loop *L = nullptr; 685 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) 686 L = AR->getLoop(); 687 for (const SCEV *Op : N->operands()) 688 L = PickMostRelevantLoop(L, getRelevantLoop(Op), SE.DT); 689 return RelevantLoops[N] = L; 690 } 691 if (const SCEVCastExpr *C = dyn_cast<SCEVCastExpr>(S)) { 692 const Loop *Result = getRelevantLoop(C->getOperand()); 693 return RelevantLoops[C] = Result; 694 } 695 if (const SCEVUDivExpr *D = dyn_cast<SCEVUDivExpr>(S)) { 696 const Loop *Result = PickMostRelevantLoop( 697 getRelevantLoop(D->getLHS()), getRelevantLoop(D->getRHS()), SE.DT); 698 return RelevantLoops[D] = Result; 699 } 700 llvm_unreachable("Unexpected SCEV type!"); 701 } 702 703 namespace { 704 705 /// LoopCompare - Compare loops by PickMostRelevantLoop. 706 class LoopCompare { 707 DominatorTree &DT; 708 public: 709 explicit LoopCompare(DominatorTree &dt) : DT(dt) {} 710 711 bool operator()(std::pair<const Loop *, const SCEV *> LHS, 712 std::pair<const Loop *, const SCEV *> RHS) const { 713 // Keep pointer operands sorted at the end. 714 if (LHS.second->getType()->isPointerTy() != 715 RHS.second->getType()->isPointerTy()) 716 return LHS.second->getType()->isPointerTy(); 717 718 // Compare loops with PickMostRelevantLoop. 719 if (LHS.first != RHS.first) 720 return PickMostRelevantLoop(LHS.first, RHS.first, DT) != LHS.first; 721 722 // If one operand is a non-constant negative and the other is not, 723 // put the non-constant negative on the right so that a sub can 724 // be used instead of a negate and add. 725 if (LHS.second->isNonConstantNegative()) { 726 if (!RHS.second->isNonConstantNegative()) 727 return false; 728 } else if (RHS.second->isNonConstantNegative()) 729 return true; 730 731 // Otherwise they are equivalent according to this comparison. 732 return false; 733 } 734 }; 735 736 } 737 738 Value *SCEVExpander::visitAddExpr(const SCEVAddExpr *S) { 739 Type *Ty = SE.getEffectiveSCEVType(S->getType()); 740 741 // Collect all the add operands in a loop, along with their associated loops. 742 // Iterate in reverse so that constants are emitted last, all else equal, and 743 // so that pointer operands are inserted first, which the code below relies on 744 // to form more involved GEPs. 745 SmallVector<std::pair<const Loop *, const SCEV *>, 8> OpsAndLoops; 746 for (std::reverse_iterator<SCEVAddExpr::op_iterator> I(S->op_end()), 747 E(S->op_begin()); I != E; ++I) 748 OpsAndLoops.push_back(std::make_pair(getRelevantLoop(*I), *I)); 749 750 // Sort by loop. Use a stable sort so that constants follow non-constants and 751 // pointer operands precede non-pointer operands. 752 llvm::stable_sort(OpsAndLoops, LoopCompare(SE.DT)); 753 754 // Emit instructions to add all the operands. Hoist as much as possible 755 // out of loops, and form meaningful getelementptrs where possible. 756 Value *Sum = nullptr; 757 for (auto I = OpsAndLoops.begin(), E = OpsAndLoops.end(); I != E;) { 758 const Loop *CurLoop = I->first; 759 const SCEV *Op = I->second; 760 if (!Sum) { 761 // This is the first operand. Just expand it. 762 Sum = expand(Op); 763 ++I; 764 } else if (PointerType *PTy = dyn_cast<PointerType>(Sum->getType())) { 765 // The running sum expression is a pointer. Try to form a getelementptr 766 // at this level with that as the base. 767 SmallVector<const SCEV *, 4> NewOps; 768 for (; I != E && I->first == CurLoop; ++I) { 769 // If the operand is SCEVUnknown and not instructions, peek through 770 // it, to enable more of it to be folded into the GEP. 771 const SCEV *X = I->second; 772 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(X)) 773 if (!isa<Instruction>(U->getValue())) 774 X = SE.getSCEV(U->getValue()); 775 NewOps.push_back(X); 776 } 777 Sum = expandAddToGEP(NewOps.begin(), NewOps.end(), PTy, Ty, Sum); 778 } else if (PointerType *PTy = dyn_cast<PointerType>(Op->getType())) { 779 // The running sum is an integer, and there's a pointer at this level. 780 // Try to form a getelementptr. If the running sum is instructions, 781 // use a SCEVUnknown to avoid re-analyzing them. 782 SmallVector<const SCEV *, 4> NewOps; 783 NewOps.push_back(isa<Instruction>(Sum) ? SE.getUnknown(Sum) : 784 SE.getSCEV(Sum)); 785 for (++I; I != E && I->first == CurLoop; ++I) 786 NewOps.push_back(I->second); 787 Sum = expandAddToGEP(NewOps.begin(), NewOps.end(), PTy, Ty, expand(Op)); 788 } else if (Op->isNonConstantNegative()) { 789 // Instead of doing a negate and add, just do a subtract. 790 Value *W = expandCodeForImpl(SE.getNegativeSCEV(Op), Ty, false); 791 Sum = InsertNoopCastOfTo(Sum, Ty); 792 Sum = InsertBinop(Instruction::Sub, Sum, W, SCEV::FlagAnyWrap, 793 /*IsSafeToHoist*/ true); 794 ++I; 795 } else { 796 // A simple add. 797 Value *W = expandCodeForImpl(Op, Ty, false); 798 Sum = InsertNoopCastOfTo(Sum, Ty); 799 // Canonicalize a constant to the RHS. 800 if (isa<Constant>(Sum)) std::swap(Sum, W); 801 Sum = InsertBinop(Instruction::Add, Sum, W, S->getNoWrapFlags(), 802 /*IsSafeToHoist*/ true); 803 ++I; 804 } 805 } 806 807 return Sum; 808 } 809 810 Value *SCEVExpander::visitMulExpr(const SCEVMulExpr *S) { 811 Type *Ty = SE.getEffectiveSCEVType(S->getType()); 812 813 // Collect all the mul operands in a loop, along with their associated loops. 814 // Iterate in reverse so that constants are emitted last, all else equal. 815 SmallVector<std::pair<const Loop *, const SCEV *>, 8> OpsAndLoops; 816 for (std::reverse_iterator<SCEVMulExpr::op_iterator> I(S->op_end()), 817 E(S->op_begin()); I != E; ++I) 818 OpsAndLoops.push_back(std::make_pair(getRelevantLoop(*I), *I)); 819 820 // Sort by loop. Use a stable sort so that constants follow non-constants. 821 llvm::stable_sort(OpsAndLoops, LoopCompare(SE.DT)); 822 823 // Emit instructions to mul all the operands. Hoist as much as possible 824 // out of loops. 825 Value *Prod = nullptr; 826 auto I = OpsAndLoops.begin(); 827 828 // Expand the calculation of X pow N in the following manner: 829 // Let N = P1 + P2 + ... + PK, where all P are powers of 2. Then: 830 // X pow N = (X pow P1) * (X pow P2) * ... * (X pow PK). 831 const auto ExpandOpBinPowN = [this, &I, &OpsAndLoops, &Ty]() { 832 auto E = I; 833 // Calculate how many times the same operand from the same loop is included 834 // into this power. 835 uint64_t Exponent = 0; 836 const uint64_t MaxExponent = UINT64_MAX >> 1; 837 // No one sane will ever try to calculate such huge exponents, but if we 838 // need this, we stop on UINT64_MAX / 2 because we need to exit the loop 839 // below when the power of 2 exceeds our Exponent, and we want it to be 840 // 1u << 31 at most to not deal with unsigned overflow. 841 while (E != OpsAndLoops.end() && *I == *E && Exponent != MaxExponent) { 842 ++Exponent; 843 ++E; 844 } 845 assert(Exponent > 0 && "Trying to calculate a zeroth exponent of operand?"); 846 847 // Calculate powers with exponents 1, 2, 4, 8 etc. and include those of them 848 // that are needed into the result. 849 Value *P = expandCodeForImpl(I->second, Ty, false); 850 Value *Result = nullptr; 851 if (Exponent & 1) 852 Result = P; 853 for (uint64_t BinExp = 2; BinExp <= Exponent; BinExp <<= 1) { 854 P = InsertBinop(Instruction::Mul, P, P, SCEV::FlagAnyWrap, 855 /*IsSafeToHoist*/ true); 856 if (Exponent & BinExp) 857 Result = Result ? InsertBinop(Instruction::Mul, Result, P, 858 SCEV::FlagAnyWrap, 859 /*IsSafeToHoist*/ true) 860 : P; 861 } 862 863 I = E; 864 assert(Result && "Nothing was expanded?"); 865 return Result; 866 }; 867 868 while (I != OpsAndLoops.end()) { 869 if (!Prod) { 870 // This is the first operand. Just expand it. 871 Prod = ExpandOpBinPowN(); 872 } else if (I->second->isAllOnesValue()) { 873 // Instead of doing a multiply by negative one, just do a negate. 874 Prod = InsertNoopCastOfTo(Prod, Ty); 875 Prod = InsertBinop(Instruction::Sub, Constant::getNullValue(Ty), Prod, 876 SCEV::FlagAnyWrap, /*IsSafeToHoist*/ true); 877 ++I; 878 } else { 879 // A simple mul. 880 Value *W = ExpandOpBinPowN(); 881 Prod = InsertNoopCastOfTo(Prod, Ty); 882 // Canonicalize a constant to the RHS. 883 if (isa<Constant>(Prod)) std::swap(Prod, W); 884 const APInt *RHS; 885 if (match(W, m_Power2(RHS))) { 886 // Canonicalize Prod*(1<<C) to Prod<<C. 887 assert(!Ty->isVectorTy() && "vector types are not SCEVable"); 888 auto NWFlags = S->getNoWrapFlags(); 889 // clear nsw flag if shl will produce poison value. 890 if (RHS->logBase2() == RHS->getBitWidth() - 1) 891 NWFlags = ScalarEvolution::clearFlags(NWFlags, SCEV::FlagNSW); 892 Prod = InsertBinop(Instruction::Shl, Prod, 893 ConstantInt::get(Ty, RHS->logBase2()), NWFlags, 894 /*IsSafeToHoist*/ true); 895 } else { 896 Prod = InsertBinop(Instruction::Mul, Prod, W, S->getNoWrapFlags(), 897 /*IsSafeToHoist*/ true); 898 } 899 } 900 } 901 902 return Prod; 903 } 904 905 Value *SCEVExpander::visitUDivExpr(const SCEVUDivExpr *S) { 906 Type *Ty = SE.getEffectiveSCEVType(S->getType()); 907 908 Value *LHS = expandCodeForImpl(S->getLHS(), Ty, false); 909 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(S->getRHS())) { 910 const APInt &RHS = SC->getAPInt(); 911 if (RHS.isPowerOf2()) 912 return InsertBinop(Instruction::LShr, LHS, 913 ConstantInt::get(Ty, RHS.logBase2()), 914 SCEV::FlagAnyWrap, /*IsSafeToHoist*/ true); 915 } 916 917 Value *RHS = expandCodeForImpl(S->getRHS(), Ty, false); 918 return InsertBinop(Instruction::UDiv, LHS, RHS, SCEV::FlagAnyWrap, 919 /*IsSafeToHoist*/ SE.isKnownNonZero(S->getRHS())); 920 } 921 922 /// Move parts of Base into Rest to leave Base with the minimal 923 /// expression that provides a pointer operand suitable for a 924 /// GEP expansion. 925 static void ExposePointerBase(const SCEV *&Base, const SCEV *&Rest, 926 ScalarEvolution &SE) { 927 while (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(Base)) { 928 Base = A->getStart(); 929 Rest = SE.getAddExpr(Rest, 930 SE.getAddRecExpr(SE.getConstant(A->getType(), 0), 931 A->getStepRecurrence(SE), 932 A->getLoop(), 933 A->getNoWrapFlags(SCEV::FlagNW))); 934 } 935 if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(Base)) { 936 Base = A->getOperand(A->getNumOperands()-1); 937 SmallVector<const SCEV *, 8> NewAddOps(A->operands()); 938 NewAddOps.back() = Rest; 939 Rest = SE.getAddExpr(NewAddOps); 940 ExposePointerBase(Base, Rest, SE); 941 } 942 } 943 944 /// Determine if this is a well-behaved chain of instructions leading back to 945 /// the PHI. If so, it may be reused by expanded expressions. 946 bool SCEVExpander::isNormalAddRecExprPHI(PHINode *PN, Instruction *IncV, 947 const Loop *L) { 948 if (IncV->getNumOperands() == 0 || isa<PHINode>(IncV) || 949 (isa<CastInst>(IncV) && !isa<BitCastInst>(IncV))) 950 return false; 951 // If any of the operands don't dominate the insert position, bail. 952 // Addrec operands are always loop-invariant, so this can only happen 953 // if there are instructions which haven't been hoisted. 954 if (L == IVIncInsertLoop) { 955 for (Use &Op : llvm::drop_begin(IncV->operands())) 956 if (Instruction *OInst = dyn_cast<Instruction>(Op)) 957 if (!SE.DT.dominates(OInst, IVIncInsertPos)) 958 return false; 959 } 960 // Advance to the next instruction. 961 IncV = dyn_cast<Instruction>(IncV->getOperand(0)); 962 if (!IncV) 963 return false; 964 965 if (IncV->mayHaveSideEffects()) 966 return false; 967 968 if (IncV == PN) 969 return true; 970 971 return isNormalAddRecExprPHI(PN, IncV, L); 972 } 973 974 /// getIVIncOperand returns an induction variable increment's induction 975 /// variable operand. 976 /// 977 /// If allowScale is set, any type of GEP is allowed as long as the nonIV 978 /// operands dominate InsertPos. 979 /// 980 /// If allowScale is not set, ensure that a GEP increment conforms to one of the 981 /// simple patterns generated by getAddRecExprPHILiterally and 982 /// expandAddtoGEP. If the pattern isn't recognized, return NULL. 983 Instruction *SCEVExpander::getIVIncOperand(Instruction *IncV, 984 Instruction *InsertPos, 985 bool allowScale) { 986 if (IncV == InsertPos) 987 return nullptr; 988 989 switch (IncV->getOpcode()) { 990 default: 991 return nullptr; 992 // Check for a simple Add/Sub or GEP of a loop invariant step. 993 case Instruction::Add: 994 case Instruction::Sub: { 995 Instruction *OInst = dyn_cast<Instruction>(IncV->getOperand(1)); 996 if (!OInst || SE.DT.dominates(OInst, InsertPos)) 997 return dyn_cast<Instruction>(IncV->getOperand(0)); 998 return nullptr; 999 } 1000 case Instruction::BitCast: 1001 return dyn_cast<Instruction>(IncV->getOperand(0)); 1002 case Instruction::GetElementPtr: 1003 for (Use &U : llvm::drop_begin(IncV->operands())) { 1004 if (isa<Constant>(U)) 1005 continue; 1006 if (Instruction *OInst = dyn_cast<Instruction>(U)) { 1007 if (!SE.DT.dominates(OInst, InsertPos)) 1008 return nullptr; 1009 } 1010 if (allowScale) { 1011 // allow any kind of GEP as long as it can be hoisted. 1012 continue; 1013 } 1014 // This must be a pointer addition of constants (pretty), which is already 1015 // handled, or some number of address-size elements (ugly). Ugly geps 1016 // have 2 operands. i1* is used by the expander to represent an 1017 // address-size element. 1018 if (IncV->getNumOperands() != 2) 1019 return nullptr; 1020 unsigned AS = cast<PointerType>(IncV->getType())->getAddressSpace(); 1021 if (IncV->getType() != Type::getInt1PtrTy(SE.getContext(), AS) 1022 && IncV->getType() != Type::getInt8PtrTy(SE.getContext(), AS)) 1023 return nullptr; 1024 break; 1025 } 1026 return dyn_cast<Instruction>(IncV->getOperand(0)); 1027 } 1028 } 1029 1030 /// If the insert point of the current builder or any of the builders on the 1031 /// stack of saved builders has 'I' as its insert point, update it to point to 1032 /// the instruction after 'I'. This is intended to be used when the instruction 1033 /// 'I' is being moved. If this fixup is not done and 'I' is moved to a 1034 /// different block, the inconsistent insert point (with a mismatched 1035 /// Instruction and Block) can lead to an instruction being inserted in a block 1036 /// other than its parent. 1037 void SCEVExpander::fixupInsertPoints(Instruction *I) { 1038 BasicBlock::iterator It(*I); 1039 BasicBlock::iterator NewInsertPt = std::next(It); 1040 if (Builder.GetInsertPoint() == It) 1041 Builder.SetInsertPoint(&*NewInsertPt); 1042 for (auto *InsertPtGuard : InsertPointGuards) 1043 if (InsertPtGuard->GetInsertPoint() == It) 1044 InsertPtGuard->SetInsertPoint(NewInsertPt); 1045 } 1046 1047 /// hoistStep - Attempt to hoist a simple IV increment above InsertPos to make 1048 /// it available to other uses in this loop. Recursively hoist any operands, 1049 /// until we reach a value that dominates InsertPos. 1050 bool SCEVExpander::hoistIVInc(Instruction *IncV, Instruction *InsertPos) { 1051 if (SE.DT.dominates(IncV, InsertPos)) 1052 return true; 1053 1054 // InsertPos must itself dominate IncV so that IncV's new position satisfies 1055 // its existing users. 1056 if (isa<PHINode>(InsertPos) || 1057 !SE.DT.dominates(InsertPos->getParent(), IncV->getParent())) 1058 return false; 1059 1060 if (!SE.LI.movementPreservesLCSSAForm(IncV, InsertPos)) 1061 return false; 1062 1063 // Check that the chain of IV operands leading back to Phi can be hoisted. 1064 SmallVector<Instruction*, 4> IVIncs; 1065 for(;;) { 1066 Instruction *Oper = getIVIncOperand(IncV, InsertPos, /*allowScale*/true); 1067 if (!Oper) 1068 return false; 1069 // IncV is safe to hoist. 1070 IVIncs.push_back(IncV); 1071 IncV = Oper; 1072 if (SE.DT.dominates(IncV, InsertPos)) 1073 break; 1074 } 1075 for (auto I = IVIncs.rbegin(), E = IVIncs.rend(); I != E; ++I) { 1076 fixupInsertPoints(*I); 1077 (*I)->moveBefore(InsertPos); 1078 } 1079 return true; 1080 } 1081 1082 /// Determine if this cyclic phi is in a form that would have been generated by 1083 /// LSR. We don't care if the phi was actually expanded in this pass, as long 1084 /// as it is in a low-cost form, for example, no implied multiplication. This 1085 /// should match any patterns generated by getAddRecExprPHILiterally and 1086 /// expandAddtoGEP. 1087 bool SCEVExpander::isExpandedAddRecExprPHI(PHINode *PN, Instruction *IncV, 1088 const Loop *L) { 1089 for(Instruction *IVOper = IncV; 1090 (IVOper = getIVIncOperand(IVOper, L->getLoopPreheader()->getTerminator(), 1091 /*allowScale=*/false));) { 1092 if (IVOper == PN) 1093 return true; 1094 } 1095 return false; 1096 } 1097 1098 /// expandIVInc - Expand an IV increment at Builder's current InsertPos. 1099 /// Typically this is the LatchBlock terminator or IVIncInsertPos, but we may 1100 /// need to materialize IV increments elsewhere to handle difficult situations. 1101 Value *SCEVExpander::expandIVInc(PHINode *PN, Value *StepV, const Loop *L, 1102 Type *ExpandTy, Type *IntTy, 1103 bool useSubtract) { 1104 Value *IncV; 1105 // If the PHI is a pointer, use a GEP, otherwise use an add or sub. 1106 if (ExpandTy->isPointerTy()) { 1107 PointerType *GEPPtrTy = cast<PointerType>(ExpandTy); 1108 // If the step isn't constant, don't use an implicitly scaled GEP, because 1109 // that would require a multiply inside the loop. 1110 if (!isa<ConstantInt>(StepV)) 1111 GEPPtrTy = PointerType::get(Type::getInt1Ty(SE.getContext()), 1112 GEPPtrTy->getAddressSpace()); 1113 IncV = expandAddToGEP(SE.getSCEV(StepV), GEPPtrTy, IntTy, PN); 1114 if (IncV->getType() != PN->getType()) 1115 IncV = Builder.CreateBitCast(IncV, PN->getType()); 1116 } else { 1117 IncV = useSubtract ? 1118 Builder.CreateSub(PN, StepV, Twine(IVName) + ".iv.next") : 1119 Builder.CreateAdd(PN, StepV, Twine(IVName) + ".iv.next"); 1120 } 1121 return IncV; 1122 } 1123 1124 /// Hoist the addrec instruction chain rooted in the loop phi above the 1125 /// position. This routine assumes that this is possible (has been checked). 1126 void SCEVExpander::hoistBeforePos(DominatorTree *DT, Instruction *InstToHoist, 1127 Instruction *Pos, PHINode *LoopPhi) { 1128 do { 1129 if (DT->dominates(InstToHoist, Pos)) 1130 break; 1131 // Make sure the increment is where we want it. But don't move it 1132 // down past a potential existing post-inc user. 1133 fixupInsertPoints(InstToHoist); 1134 InstToHoist->moveBefore(Pos); 1135 Pos = InstToHoist; 1136 InstToHoist = cast<Instruction>(InstToHoist->getOperand(0)); 1137 } while (InstToHoist != LoopPhi); 1138 } 1139 1140 /// Check whether we can cheaply express the requested SCEV in terms of 1141 /// the available PHI SCEV by truncation and/or inversion of the step. 1142 static bool canBeCheaplyTransformed(ScalarEvolution &SE, 1143 const SCEVAddRecExpr *Phi, 1144 const SCEVAddRecExpr *Requested, 1145 bool &InvertStep) { 1146 Type *PhiTy = SE.getEffectiveSCEVType(Phi->getType()); 1147 Type *RequestedTy = SE.getEffectiveSCEVType(Requested->getType()); 1148 1149 if (RequestedTy->getIntegerBitWidth() > PhiTy->getIntegerBitWidth()) 1150 return false; 1151 1152 // Try truncate it if necessary. 1153 Phi = dyn_cast<SCEVAddRecExpr>(SE.getTruncateOrNoop(Phi, RequestedTy)); 1154 if (!Phi) 1155 return false; 1156 1157 // Check whether truncation will help. 1158 if (Phi == Requested) { 1159 InvertStep = false; 1160 return true; 1161 } 1162 1163 // Check whether inverting will help: {R,+,-1} == R - {0,+,1}. 1164 if (SE.getAddExpr(Requested->getStart(), 1165 SE.getNegativeSCEV(Requested)) == Phi) { 1166 InvertStep = true; 1167 return true; 1168 } 1169 1170 return false; 1171 } 1172 1173 static bool IsIncrementNSW(ScalarEvolution &SE, const SCEVAddRecExpr *AR) { 1174 if (!isa<IntegerType>(AR->getType())) 1175 return false; 1176 1177 unsigned BitWidth = cast<IntegerType>(AR->getType())->getBitWidth(); 1178 Type *WideTy = IntegerType::get(AR->getType()->getContext(), BitWidth * 2); 1179 const SCEV *Step = AR->getStepRecurrence(SE); 1180 const SCEV *OpAfterExtend = SE.getAddExpr(SE.getSignExtendExpr(Step, WideTy), 1181 SE.getSignExtendExpr(AR, WideTy)); 1182 const SCEV *ExtendAfterOp = 1183 SE.getSignExtendExpr(SE.getAddExpr(AR, Step), WideTy); 1184 return ExtendAfterOp == OpAfterExtend; 1185 } 1186 1187 static bool IsIncrementNUW(ScalarEvolution &SE, const SCEVAddRecExpr *AR) { 1188 if (!isa<IntegerType>(AR->getType())) 1189 return false; 1190 1191 unsigned BitWidth = cast<IntegerType>(AR->getType())->getBitWidth(); 1192 Type *WideTy = IntegerType::get(AR->getType()->getContext(), BitWidth * 2); 1193 const SCEV *Step = AR->getStepRecurrence(SE); 1194 const SCEV *OpAfterExtend = SE.getAddExpr(SE.getZeroExtendExpr(Step, WideTy), 1195 SE.getZeroExtendExpr(AR, WideTy)); 1196 const SCEV *ExtendAfterOp = 1197 SE.getZeroExtendExpr(SE.getAddExpr(AR, Step), WideTy); 1198 return ExtendAfterOp == OpAfterExtend; 1199 } 1200 1201 /// getAddRecExprPHILiterally - Helper for expandAddRecExprLiterally. Expand 1202 /// the base addrec, which is the addrec without any non-loop-dominating 1203 /// values, and return the PHI. 1204 PHINode * 1205 SCEVExpander::getAddRecExprPHILiterally(const SCEVAddRecExpr *Normalized, 1206 const Loop *L, 1207 Type *ExpandTy, 1208 Type *IntTy, 1209 Type *&TruncTy, 1210 bool &InvertStep) { 1211 assert((!IVIncInsertLoop||IVIncInsertPos) && "Uninitialized insert position"); 1212 1213 // Reuse a previously-inserted PHI, if present. 1214 BasicBlock *LatchBlock = L->getLoopLatch(); 1215 if (LatchBlock) { 1216 PHINode *AddRecPhiMatch = nullptr; 1217 Instruction *IncV = nullptr; 1218 TruncTy = nullptr; 1219 InvertStep = false; 1220 1221 // Only try partially matching scevs that need truncation and/or 1222 // step-inversion if we know this loop is outside the current loop. 1223 bool TryNonMatchingSCEV = 1224 IVIncInsertLoop && 1225 SE.DT.properlyDominates(LatchBlock, IVIncInsertLoop->getHeader()); 1226 1227 for (PHINode &PN : L->getHeader()->phis()) { 1228 if (!SE.isSCEVable(PN.getType())) 1229 continue; 1230 1231 // We should not look for a incomplete PHI. Getting SCEV for a incomplete 1232 // PHI has no meaning at all. 1233 if (!PN.isComplete()) { 1234 SCEV_DEBUG_WITH_TYPE( 1235 DebugType, dbgs() << "One incomplete PHI is found: " << PN << "\n"); 1236 continue; 1237 } 1238 1239 const SCEVAddRecExpr *PhiSCEV = dyn_cast<SCEVAddRecExpr>(SE.getSCEV(&PN)); 1240 if (!PhiSCEV) 1241 continue; 1242 1243 bool IsMatchingSCEV = PhiSCEV == Normalized; 1244 // We only handle truncation and inversion of phi recurrences for the 1245 // expanded expression if the expanded expression's loop dominates the 1246 // loop we insert to. Check now, so we can bail out early. 1247 if (!IsMatchingSCEV && !TryNonMatchingSCEV) 1248 continue; 1249 1250 // TODO: this possibly can be reworked to avoid this cast at all. 1251 Instruction *TempIncV = 1252 dyn_cast<Instruction>(PN.getIncomingValueForBlock(LatchBlock)); 1253 if (!TempIncV) 1254 continue; 1255 1256 // Check whether we can reuse this PHI node. 1257 if (LSRMode) { 1258 if (!isExpandedAddRecExprPHI(&PN, TempIncV, L)) 1259 continue; 1260 if (L == IVIncInsertLoop && !hoistIVInc(TempIncV, IVIncInsertPos)) 1261 continue; 1262 } else { 1263 if (!isNormalAddRecExprPHI(&PN, TempIncV, L)) 1264 continue; 1265 } 1266 1267 // Stop if we have found an exact match SCEV. 1268 if (IsMatchingSCEV) { 1269 IncV = TempIncV; 1270 TruncTy = nullptr; 1271 InvertStep = false; 1272 AddRecPhiMatch = &PN; 1273 break; 1274 } 1275 1276 // Try whether the phi can be translated into the requested form 1277 // (truncated and/or offset by a constant). 1278 if ((!TruncTy || InvertStep) && 1279 canBeCheaplyTransformed(SE, PhiSCEV, Normalized, InvertStep)) { 1280 // Record the phi node. But don't stop we might find an exact match 1281 // later. 1282 AddRecPhiMatch = &PN; 1283 IncV = TempIncV; 1284 TruncTy = SE.getEffectiveSCEVType(Normalized->getType()); 1285 } 1286 } 1287 1288 if (AddRecPhiMatch) { 1289 // Potentially, move the increment. We have made sure in 1290 // isExpandedAddRecExprPHI or hoistIVInc that this is possible. 1291 if (L == IVIncInsertLoop) 1292 hoistBeforePos(&SE.DT, IncV, IVIncInsertPos, AddRecPhiMatch); 1293 1294 // Ok, the add recurrence looks usable. 1295 // Remember this PHI, even in post-inc mode. 1296 InsertedValues.insert(AddRecPhiMatch); 1297 // Remember the increment. 1298 rememberInstruction(IncV); 1299 // Those values were not actually inserted but re-used. 1300 ReusedValues.insert(AddRecPhiMatch); 1301 ReusedValues.insert(IncV); 1302 return AddRecPhiMatch; 1303 } 1304 } 1305 1306 // Save the original insertion point so we can restore it when we're done. 1307 SCEVInsertPointGuard Guard(Builder, this); 1308 1309 // Another AddRec may need to be recursively expanded below. For example, if 1310 // this AddRec is quadratic, the StepV may itself be an AddRec in this 1311 // loop. Remove this loop from the PostIncLoops set before expanding such 1312 // AddRecs. Otherwise, we cannot find a valid position for the step 1313 // (i.e. StepV can never dominate its loop header). Ideally, we could do 1314 // SavedIncLoops.swap(PostIncLoops), but we generally have a single element, 1315 // so it's not worth implementing SmallPtrSet::swap. 1316 PostIncLoopSet SavedPostIncLoops = PostIncLoops; 1317 PostIncLoops.clear(); 1318 1319 // Expand code for the start value into the loop preheader. 1320 assert(L->getLoopPreheader() && 1321 "Can't expand add recurrences without a loop preheader!"); 1322 Value *StartV = 1323 expandCodeForImpl(Normalized->getStart(), ExpandTy, 1324 L->getLoopPreheader()->getTerminator(), false); 1325 1326 // StartV must have been be inserted into L's preheader to dominate the new 1327 // phi. 1328 assert(!isa<Instruction>(StartV) || 1329 SE.DT.properlyDominates(cast<Instruction>(StartV)->getParent(), 1330 L->getHeader())); 1331 1332 // Expand code for the step value. Do this before creating the PHI so that PHI 1333 // reuse code doesn't see an incomplete PHI. 1334 const SCEV *Step = Normalized->getStepRecurrence(SE); 1335 // If the stride is negative, insert a sub instead of an add for the increment 1336 // (unless it's a constant, because subtracts of constants are canonicalized 1337 // to adds). 1338 bool useSubtract = !ExpandTy->isPointerTy() && Step->isNonConstantNegative(); 1339 if (useSubtract) 1340 Step = SE.getNegativeSCEV(Step); 1341 // Expand the step somewhere that dominates the loop header. 1342 Value *StepV = expandCodeForImpl( 1343 Step, IntTy, &*L->getHeader()->getFirstInsertionPt(), false); 1344 1345 // The no-wrap behavior proved by IsIncrement(NUW|NSW) is only applicable if 1346 // we actually do emit an addition. It does not apply if we emit a 1347 // subtraction. 1348 bool IncrementIsNUW = !useSubtract && IsIncrementNUW(SE, Normalized); 1349 bool IncrementIsNSW = !useSubtract && IsIncrementNSW(SE, Normalized); 1350 1351 // Create the PHI. 1352 BasicBlock *Header = L->getHeader(); 1353 Builder.SetInsertPoint(Header, Header->begin()); 1354 pred_iterator HPB = pred_begin(Header), HPE = pred_end(Header); 1355 PHINode *PN = Builder.CreatePHI(ExpandTy, std::distance(HPB, HPE), 1356 Twine(IVName) + ".iv"); 1357 1358 // Create the step instructions and populate the PHI. 1359 for (pred_iterator HPI = HPB; HPI != HPE; ++HPI) { 1360 BasicBlock *Pred = *HPI; 1361 1362 // Add a start value. 1363 if (!L->contains(Pred)) { 1364 PN->addIncoming(StartV, Pred); 1365 continue; 1366 } 1367 1368 // Create a step value and add it to the PHI. 1369 // If IVIncInsertLoop is non-null and equal to the addrec's loop, insert the 1370 // instructions at IVIncInsertPos. 1371 Instruction *InsertPos = L == IVIncInsertLoop ? 1372 IVIncInsertPos : Pred->getTerminator(); 1373 Builder.SetInsertPoint(InsertPos); 1374 Value *IncV = expandIVInc(PN, StepV, L, ExpandTy, IntTy, useSubtract); 1375 1376 if (isa<OverflowingBinaryOperator>(IncV)) { 1377 if (IncrementIsNUW) 1378 cast<BinaryOperator>(IncV)->setHasNoUnsignedWrap(); 1379 if (IncrementIsNSW) 1380 cast<BinaryOperator>(IncV)->setHasNoSignedWrap(); 1381 } 1382 PN->addIncoming(IncV, Pred); 1383 } 1384 1385 // After expanding subexpressions, restore the PostIncLoops set so the caller 1386 // can ensure that IVIncrement dominates the current uses. 1387 PostIncLoops = SavedPostIncLoops; 1388 1389 // Remember this PHI, even in post-inc mode. 1390 InsertedValues.insert(PN); 1391 1392 return PN; 1393 } 1394 1395 Value *SCEVExpander::expandAddRecExprLiterally(const SCEVAddRecExpr *S) { 1396 Type *STy = S->getType(); 1397 Type *IntTy = SE.getEffectiveSCEVType(STy); 1398 const Loop *L = S->getLoop(); 1399 1400 // Determine a normalized form of this expression, which is the expression 1401 // before any post-inc adjustment is made. 1402 const SCEVAddRecExpr *Normalized = S; 1403 if (PostIncLoops.count(L)) { 1404 PostIncLoopSet Loops; 1405 Loops.insert(L); 1406 Normalized = cast<SCEVAddRecExpr>(normalizeForPostIncUse(S, Loops, SE)); 1407 } 1408 1409 // Strip off any non-loop-dominating component from the addrec start. 1410 const SCEV *Start = Normalized->getStart(); 1411 const SCEV *PostLoopOffset = nullptr; 1412 if (!SE.properlyDominates(Start, L->getHeader())) { 1413 PostLoopOffset = Start; 1414 Start = SE.getConstant(Normalized->getType(), 0); 1415 Normalized = cast<SCEVAddRecExpr>( 1416 SE.getAddRecExpr(Start, Normalized->getStepRecurrence(SE), 1417 Normalized->getLoop(), 1418 Normalized->getNoWrapFlags(SCEV::FlagNW))); 1419 } 1420 1421 // Strip off any non-loop-dominating component from the addrec step. 1422 const SCEV *Step = Normalized->getStepRecurrence(SE); 1423 const SCEV *PostLoopScale = nullptr; 1424 if (!SE.dominates(Step, L->getHeader())) { 1425 PostLoopScale = Step; 1426 Step = SE.getConstant(Normalized->getType(), 1); 1427 if (!Start->isZero()) { 1428 // The normalization below assumes that Start is constant zero, so if 1429 // it isn't re-associate Start to PostLoopOffset. 1430 assert(!PostLoopOffset && "Start not-null but PostLoopOffset set?"); 1431 PostLoopOffset = Start; 1432 Start = SE.getConstant(Normalized->getType(), 0); 1433 } 1434 Normalized = 1435 cast<SCEVAddRecExpr>(SE.getAddRecExpr( 1436 Start, Step, Normalized->getLoop(), 1437 Normalized->getNoWrapFlags(SCEV::FlagNW))); 1438 } 1439 1440 // Expand the core addrec. If we need post-loop scaling, force it to 1441 // expand to an integer type to avoid the need for additional casting. 1442 Type *ExpandTy = PostLoopScale ? IntTy : STy; 1443 // We can't use a pointer type for the addrec if the pointer type is 1444 // non-integral. 1445 Type *AddRecPHIExpandTy = 1446 DL.isNonIntegralPointerType(STy) ? Normalized->getType() : ExpandTy; 1447 1448 // In some cases, we decide to reuse an existing phi node but need to truncate 1449 // it and/or invert the step. 1450 Type *TruncTy = nullptr; 1451 bool InvertStep = false; 1452 PHINode *PN = getAddRecExprPHILiterally(Normalized, L, AddRecPHIExpandTy, 1453 IntTy, TruncTy, InvertStep); 1454 1455 // Accommodate post-inc mode, if necessary. 1456 Value *Result; 1457 if (!PostIncLoops.count(L)) 1458 Result = PN; 1459 else { 1460 // In PostInc mode, use the post-incremented value. 1461 BasicBlock *LatchBlock = L->getLoopLatch(); 1462 assert(LatchBlock && "PostInc mode requires a unique loop latch!"); 1463 Result = PN->getIncomingValueForBlock(LatchBlock); 1464 1465 // We might be introducing a new use of the post-inc IV that is not poison 1466 // safe, in which case we should drop poison generating flags. Only keep 1467 // those flags for which SCEV has proven that they always hold. 1468 if (isa<OverflowingBinaryOperator>(Result)) { 1469 auto *I = cast<Instruction>(Result); 1470 if (!S->hasNoUnsignedWrap()) 1471 I->setHasNoUnsignedWrap(false); 1472 if (!S->hasNoSignedWrap()) 1473 I->setHasNoSignedWrap(false); 1474 } 1475 1476 // For an expansion to use the postinc form, the client must call 1477 // expandCodeFor with an InsertPoint that is either outside the PostIncLoop 1478 // or dominated by IVIncInsertPos. 1479 if (isa<Instruction>(Result) && 1480 !SE.DT.dominates(cast<Instruction>(Result), 1481 &*Builder.GetInsertPoint())) { 1482 // The induction variable's postinc expansion does not dominate this use. 1483 // IVUsers tries to prevent this case, so it is rare. However, it can 1484 // happen when an IVUser outside the loop is not dominated by the latch 1485 // block. Adjusting IVIncInsertPos before expansion begins cannot handle 1486 // all cases. Consider a phi outside whose operand is replaced during 1487 // expansion with the value of the postinc user. Without fundamentally 1488 // changing the way postinc users are tracked, the only remedy is 1489 // inserting an extra IV increment. StepV might fold into PostLoopOffset, 1490 // but hopefully expandCodeFor handles that. 1491 bool useSubtract = 1492 !ExpandTy->isPointerTy() && Step->isNonConstantNegative(); 1493 if (useSubtract) 1494 Step = SE.getNegativeSCEV(Step); 1495 Value *StepV; 1496 { 1497 // Expand the step somewhere that dominates the loop header. 1498 SCEVInsertPointGuard Guard(Builder, this); 1499 StepV = expandCodeForImpl( 1500 Step, IntTy, &*L->getHeader()->getFirstInsertionPt(), false); 1501 } 1502 Result = expandIVInc(PN, StepV, L, ExpandTy, IntTy, useSubtract); 1503 } 1504 } 1505 1506 // We have decided to reuse an induction variable of a dominating loop. Apply 1507 // truncation and/or inversion of the step. 1508 if (TruncTy) { 1509 Type *ResTy = Result->getType(); 1510 // Normalize the result type. 1511 if (ResTy != SE.getEffectiveSCEVType(ResTy)) 1512 Result = InsertNoopCastOfTo(Result, SE.getEffectiveSCEVType(ResTy)); 1513 // Truncate the result. 1514 if (TruncTy != Result->getType()) 1515 Result = Builder.CreateTrunc(Result, TruncTy); 1516 1517 // Invert the result. 1518 if (InvertStep) 1519 Result = Builder.CreateSub( 1520 expandCodeForImpl(Normalized->getStart(), TruncTy, false), Result); 1521 } 1522 1523 // Re-apply any non-loop-dominating scale. 1524 if (PostLoopScale) { 1525 assert(S->isAffine() && "Can't linearly scale non-affine recurrences."); 1526 Result = InsertNoopCastOfTo(Result, IntTy); 1527 Result = Builder.CreateMul(Result, 1528 expandCodeForImpl(PostLoopScale, IntTy, false)); 1529 } 1530 1531 // Re-apply any non-loop-dominating offset. 1532 if (PostLoopOffset) { 1533 if (PointerType *PTy = dyn_cast<PointerType>(ExpandTy)) { 1534 if (Result->getType()->isIntegerTy()) { 1535 Value *Base = expandCodeForImpl(PostLoopOffset, ExpandTy, false); 1536 Result = expandAddToGEP(SE.getUnknown(Result), PTy, IntTy, Base); 1537 } else { 1538 Result = expandAddToGEP(PostLoopOffset, PTy, IntTy, Result); 1539 } 1540 } else { 1541 Result = InsertNoopCastOfTo(Result, IntTy); 1542 Result = Builder.CreateAdd( 1543 Result, expandCodeForImpl(PostLoopOffset, IntTy, false)); 1544 } 1545 } 1546 1547 return Result; 1548 } 1549 1550 Value *SCEVExpander::visitAddRecExpr(const SCEVAddRecExpr *S) { 1551 // In canonical mode we compute the addrec as an expression of a canonical IV 1552 // using evaluateAtIteration and expand the resulting SCEV expression. This 1553 // way we avoid introducing new IVs to carry on the comutation of the addrec 1554 // throughout the loop. 1555 // 1556 // For nested addrecs evaluateAtIteration might need a canonical IV of a 1557 // type wider than the addrec itself. Emitting a canonical IV of the 1558 // proper type might produce non-legal types, for example expanding an i64 1559 // {0,+,2,+,1} addrec would need an i65 canonical IV. To avoid this just fall 1560 // back to non-canonical mode for nested addrecs. 1561 if (!CanonicalMode || (S->getNumOperands() > 2)) 1562 return expandAddRecExprLiterally(S); 1563 1564 Type *Ty = SE.getEffectiveSCEVType(S->getType()); 1565 const Loop *L = S->getLoop(); 1566 1567 // First check for an existing canonical IV in a suitable type. 1568 PHINode *CanonicalIV = nullptr; 1569 if (PHINode *PN = L->getCanonicalInductionVariable()) 1570 if (SE.getTypeSizeInBits(PN->getType()) >= SE.getTypeSizeInBits(Ty)) 1571 CanonicalIV = PN; 1572 1573 // Rewrite an AddRec in terms of the canonical induction variable, if 1574 // its type is more narrow. 1575 if (CanonicalIV && 1576 SE.getTypeSizeInBits(CanonicalIV->getType()) > 1577 SE.getTypeSizeInBits(Ty)) { 1578 SmallVector<const SCEV *, 4> NewOps(S->getNumOperands()); 1579 for (unsigned i = 0, e = S->getNumOperands(); i != e; ++i) 1580 NewOps[i] = SE.getAnyExtendExpr(S->op_begin()[i], CanonicalIV->getType()); 1581 Value *V = expand(SE.getAddRecExpr(NewOps, S->getLoop(), 1582 S->getNoWrapFlags(SCEV::FlagNW))); 1583 BasicBlock::iterator NewInsertPt = 1584 findInsertPointAfter(cast<Instruction>(V), &*Builder.GetInsertPoint()); 1585 V = expandCodeForImpl(SE.getTruncateExpr(SE.getUnknown(V), Ty), nullptr, 1586 &*NewInsertPt, false); 1587 return V; 1588 } 1589 1590 // {X,+,F} --> X + {0,+,F} 1591 if (!S->getStart()->isZero()) { 1592 SmallVector<const SCEV *, 4> NewOps(S->operands()); 1593 NewOps[0] = SE.getConstant(Ty, 0); 1594 const SCEV *Rest = SE.getAddRecExpr(NewOps, L, 1595 S->getNoWrapFlags(SCEV::FlagNW)); 1596 1597 // Turn things like ptrtoint+arithmetic+inttoptr into GEP. See the 1598 // comments on expandAddToGEP for details. 1599 const SCEV *Base = S->getStart(); 1600 // Dig into the expression to find the pointer base for a GEP. 1601 const SCEV *ExposedRest = Rest; 1602 ExposePointerBase(Base, ExposedRest, SE); 1603 // If we found a pointer, expand the AddRec with a GEP. 1604 if (PointerType *PTy = dyn_cast<PointerType>(Base->getType())) { 1605 // Make sure the Base isn't something exotic, such as a multiplied 1606 // or divided pointer value. In those cases, the result type isn't 1607 // actually a pointer type. 1608 if (!isa<SCEVMulExpr>(Base) && !isa<SCEVUDivExpr>(Base)) { 1609 Value *StartV = expand(Base); 1610 assert(StartV->getType() == PTy && "Pointer type mismatch for GEP!"); 1611 return expandAddToGEP(ExposedRest, PTy, Ty, StartV); 1612 } 1613 } 1614 1615 // Just do a normal add. Pre-expand the operands to suppress folding. 1616 // 1617 // The LHS and RHS values are factored out of the expand call to make the 1618 // output independent of the argument evaluation order. 1619 const SCEV *AddExprLHS = SE.getUnknown(expand(S->getStart())); 1620 const SCEV *AddExprRHS = SE.getUnknown(expand(Rest)); 1621 return expand(SE.getAddExpr(AddExprLHS, AddExprRHS)); 1622 } 1623 1624 // If we don't yet have a canonical IV, create one. 1625 if (!CanonicalIV) { 1626 // Create and insert the PHI node for the induction variable in the 1627 // specified loop. 1628 BasicBlock *Header = L->getHeader(); 1629 pred_iterator HPB = pred_begin(Header), HPE = pred_end(Header); 1630 CanonicalIV = PHINode::Create(Ty, std::distance(HPB, HPE), "indvar", 1631 &Header->front()); 1632 rememberInstruction(CanonicalIV); 1633 1634 SmallSet<BasicBlock *, 4> PredSeen; 1635 Constant *One = ConstantInt::get(Ty, 1); 1636 for (pred_iterator HPI = HPB; HPI != HPE; ++HPI) { 1637 BasicBlock *HP = *HPI; 1638 if (!PredSeen.insert(HP).second) { 1639 // There must be an incoming value for each predecessor, even the 1640 // duplicates! 1641 CanonicalIV->addIncoming(CanonicalIV->getIncomingValueForBlock(HP), HP); 1642 continue; 1643 } 1644 1645 if (L->contains(HP)) { 1646 // Insert a unit add instruction right before the terminator 1647 // corresponding to the back-edge. 1648 Instruction *Add = BinaryOperator::CreateAdd(CanonicalIV, One, 1649 "indvar.next", 1650 HP->getTerminator()); 1651 Add->setDebugLoc(HP->getTerminator()->getDebugLoc()); 1652 rememberInstruction(Add); 1653 CanonicalIV->addIncoming(Add, HP); 1654 } else { 1655 CanonicalIV->addIncoming(Constant::getNullValue(Ty), HP); 1656 } 1657 } 1658 } 1659 1660 // {0,+,1} --> Insert a canonical induction variable into the loop! 1661 if (S->isAffine() && S->getOperand(1)->isOne()) { 1662 assert(Ty == SE.getEffectiveSCEVType(CanonicalIV->getType()) && 1663 "IVs with types different from the canonical IV should " 1664 "already have been handled!"); 1665 return CanonicalIV; 1666 } 1667 1668 // {0,+,F} --> {0,+,1} * F 1669 1670 // If this is a simple linear addrec, emit it now as a special case. 1671 if (S->isAffine()) // {0,+,F} --> i*F 1672 return 1673 expand(SE.getTruncateOrNoop( 1674 SE.getMulExpr(SE.getUnknown(CanonicalIV), 1675 SE.getNoopOrAnyExtend(S->getOperand(1), 1676 CanonicalIV->getType())), 1677 Ty)); 1678 1679 // If this is a chain of recurrences, turn it into a closed form, using the 1680 // folders, then expandCodeFor the closed form. This allows the folders to 1681 // simplify the expression without having to build a bunch of special code 1682 // into this folder. 1683 const SCEV *IH = SE.getUnknown(CanonicalIV); // Get I as a "symbolic" SCEV. 1684 1685 // Promote S up to the canonical IV type, if the cast is foldable. 1686 const SCEV *NewS = S; 1687 const SCEV *Ext = SE.getNoopOrAnyExtend(S, CanonicalIV->getType()); 1688 if (isa<SCEVAddRecExpr>(Ext)) 1689 NewS = Ext; 1690 1691 const SCEV *V = cast<SCEVAddRecExpr>(NewS)->evaluateAtIteration(IH, SE); 1692 //cerr << "Evaluated: " << *this << "\n to: " << *V << "\n"; 1693 1694 // Truncate the result down to the original type, if needed. 1695 const SCEV *T = SE.getTruncateOrNoop(V, Ty); 1696 return expand(T); 1697 } 1698 1699 Value *SCEVExpander::visitPtrToIntExpr(const SCEVPtrToIntExpr *S) { 1700 Value *V = 1701 expandCodeForImpl(S->getOperand(), S->getOperand()->getType(), false); 1702 return ReuseOrCreateCast(V, S->getType(), CastInst::PtrToInt, 1703 GetOptimalInsertionPointForCastOf(V)); 1704 } 1705 1706 Value *SCEVExpander::visitTruncateExpr(const SCEVTruncateExpr *S) { 1707 Type *Ty = SE.getEffectiveSCEVType(S->getType()); 1708 Value *V = expandCodeForImpl( 1709 S->getOperand(), SE.getEffectiveSCEVType(S->getOperand()->getType()), 1710 false); 1711 return Builder.CreateTrunc(V, Ty); 1712 } 1713 1714 Value *SCEVExpander::visitZeroExtendExpr(const SCEVZeroExtendExpr *S) { 1715 Type *Ty = SE.getEffectiveSCEVType(S->getType()); 1716 Value *V = expandCodeForImpl( 1717 S->getOperand(), SE.getEffectiveSCEVType(S->getOperand()->getType()), 1718 false); 1719 return Builder.CreateZExt(V, Ty); 1720 } 1721 1722 Value *SCEVExpander::visitSignExtendExpr(const SCEVSignExtendExpr *S) { 1723 Type *Ty = SE.getEffectiveSCEVType(S->getType()); 1724 Value *V = expandCodeForImpl( 1725 S->getOperand(), SE.getEffectiveSCEVType(S->getOperand()->getType()), 1726 false); 1727 return Builder.CreateSExt(V, Ty); 1728 } 1729 1730 Value *SCEVExpander::visitSMaxExpr(const SCEVSMaxExpr *S) { 1731 Value *LHS = expand(S->getOperand(S->getNumOperands()-1)); 1732 Type *Ty = LHS->getType(); 1733 for (int i = S->getNumOperands()-2; i >= 0; --i) { 1734 // In the case of mixed integer and pointer types, do the 1735 // rest of the comparisons as integer. 1736 Type *OpTy = S->getOperand(i)->getType(); 1737 if (OpTy->isIntegerTy() != Ty->isIntegerTy()) { 1738 Ty = SE.getEffectiveSCEVType(Ty); 1739 LHS = InsertNoopCastOfTo(LHS, Ty); 1740 } 1741 Value *RHS = expandCodeForImpl(S->getOperand(i), Ty, false); 1742 Value *Sel; 1743 if (Ty->isIntegerTy()) 1744 Sel = Builder.CreateIntrinsic(Intrinsic::smax, {Ty}, {LHS, RHS}, 1745 /*FMFSource=*/nullptr, "smax"); 1746 else { 1747 Value *ICmp = Builder.CreateICmpSGT(LHS, RHS); 1748 Sel = Builder.CreateSelect(ICmp, LHS, RHS, "smax"); 1749 } 1750 LHS = Sel; 1751 } 1752 // In the case of mixed integer and pointer types, cast the 1753 // final result back to the pointer type. 1754 if (LHS->getType() != S->getType()) 1755 LHS = InsertNoopCastOfTo(LHS, S->getType()); 1756 return LHS; 1757 } 1758 1759 Value *SCEVExpander::visitUMaxExpr(const SCEVUMaxExpr *S) { 1760 Value *LHS = expand(S->getOperand(S->getNumOperands()-1)); 1761 Type *Ty = LHS->getType(); 1762 for (int i = S->getNumOperands()-2; i >= 0; --i) { 1763 // In the case of mixed integer and pointer types, do the 1764 // rest of the comparisons as integer. 1765 Type *OpTy = S->getOperand(i)->getType(); 1766 if (OpTy->isIntegerTy() != Ty->isIntegerTy()) { 1767 Ty = SE.getEffectiveSCEVType(Ty); 1768 LHS = InsertNoopCastOfTo(LHS, Ty); 1769 } 1770 Value *RHS = expandCodeForImpl(S->getOperand(i), Ty, false); 1771 Value *Sel; 1772 if (Ty->isIntegerTy()) 1773 Sel = Builder.CreateIntrinsic(Intrinsic::umax, {Ty}, {LHS, RHS}, 1774 /*FMFSource=*/nullptr, "umax"); 1775 else { 1776 Value *ICmp = Builder.CreateICmpUGT(LHS, RHS); 1777 Sel = Builder.CreateSelect(ICmp, LHS, RHS, "umax"); 1778 } 1779 LHS = Sel; 1780 } 1781 // In the case of mixed integer and pointer types, cast the 1782 // final result back to the pointer type. 1783 if (LHS->getType() != S->getType()) 1784 LHS = InsertNoopCastOfTo(LHS, S->getType()); 1785 return LHS; 1786 } 1787 1788 Value *SCEVExpander::visitSMinExpr(const SCEVSMinExpr *S) { 1789 Value *LHS = expand(S->getOperand(S->getNumOperands() - 1)); 1790 Type *Ty = LHS->getType(); 1791 for (int i = S->getNumOperands() - 2; i >= 0; --i) { 1792 // In the case of mixed integer and pointer types, do the 1793 // rest of the comparisons as integer. 1794 Type *OpTy = S->getOperand(i)->getType(); 1795 if (OpTy->isIntegerTy() != Ty->isIntegerTy()) { 1796 Ty = SE.getEffectiveSCEVType(Ty); 1797 LHS = InsertNoopCastOfTo(LHS, Ty); 1798 } 1799 Value *RHS = expandCodeForImpl(S->getOperand(i), Ty, false); 1800 Value *Sel; 1801 if (Ty->isIntegerTy()) 1802 Sel = Builder.CreateIntrinsic(Intrinsic::smin, {Ty}, {LHS, RHS}, 1803 /*FMFSource=*/nullptr, "smin"); 1804 else { 1805 Value *ICmp = Builder.CreateICmpSLT(LHS, RHS); 1806 Sel = Builder.CreateSelect(ICmp, LHS, RHS, "smin"); 1807 } 1808 LHS = Sel; 1809 } 1810 // In the case of mixed integer and pointer types, cast the 1811 // final result back to the pointer type. 1812 if (LHS->getType() != S->getType()) 1813 LHS = InsertNoopCastOfTo(LHS, S->getType()); 1814 return LHS; 1815 } 1816 1817 Value *SCEVExpander::visitUMinExpr(const SCEVUMinExpr *S) { 1818 Value *LHS = expand(S->getOperand(S->getNumOperands() - 1)); 1819 Type *Ty = LHS->getType(); 1820 for (int i = S->getNumOperands() - 2; i >= 0; --i) { 1821 // In the case of mixed integer and pointer types, do the 1822 // rest of the comparisons as integer. 1823 Type *OpTy = S->getOperand(i)->getType(); 1824 if (OpTy->isIntegerTy() != Ty->isIntegerTy()) { 1825 Ty = SE.getEffectiveSCEVType(Ty); 1826 LHS = InsertNoopCastOfTo(LHS, Ty); 1827 } 1828 Value *RHS = expandCodeForImpl(S->getOperand(i), Ty, false); 1829 Value *Sel; 1830 if (Ty->isIntegerTy()) 1831 Sel = Builder.CreateIntrinsic(Intrinsic::umin, {Ty}, {LHS, RHS}, 1832 /*FMFSource=*/nullptr, "umin"); 1833 else { 1834 Value *ICmp = Builder.CreateICmpULT(LHS, RHS); 1835 Sel = Builder.CreateSelect(ICmp, LHS, RHS, "umin"); 1836 } 1837 LHS = Sel; 1838 } 1839 // In the case of mixed integer and pointer types, cast the 1840 // final result back to the pointer type. 1841 if (LHS->getType() != S->getType()) 1842 LHS = InsertNoopCastOfTo(LHS, S->getType()); 1843 return LHS; 1844 } 1845 1846 Value *SCEVExpander::expandCodeForImpl(const SCEV *SH, Type *Ty, 1847 Instruction *IP, bool Root) { 1848 setInsertPoint(IP); 1849 Value *V = expandCodeForImpl(SH, Ty, Root); 1850 return V; 1851 } 1852 1853 Value *SCEVExpander::expandCodeForImpl(const SCEV *SH, Type *Ty, bool Root) { 1854 // Expand the code for this SCEV. 1855 Value *V = expand(SH); 1856 1857 if (PreserveLCSSA) { 1858 if (auto *Inst = dyn_cast<Instruction>(V)) { 1859 // Create a temporary instruction to at the current insertion point, so we 1860 // can hand it off to the helper to create LCSSA PHIs if required for the 1861 // new use. 1862 // FIXME: Ideally formLCSSAForInstructions (used in fixupLCSSAFormFor) 1863 // would accept a insertion point and return an LCSSA phi for that 1864 // insertion point, so there is no need to insert & remove the temporary 1865 // instruction. 1866 Instruction *Tmp; 1867 if (Inst->getType()->isIntegerTy()) 1868 Tmp = 1869 cast<Instruction>(Builder.CreateAdd(Inst, Inst, "tmp.lcssa.user")); 1870 else { 1871 assert(Inst->getType()->isPointerTy()); 1872 Tmp = cast<Instruction>(Builder.CreatePtrToInt( 1873 Inst, Type::getInt32Ty(Inst->getContext()), "tmp.lcssa.user")); 1874 } 1875 V = fixupLCSSAFormFor(Tmp, 0); 1876 1877 // Clean up temporary instruction. 1878 InsertedValues.erase(Tmp); 1879 InsertedPostIncValues.erase(Tmp); 1880 Tmp->eraseFromParent(); 1881 } 1882 } 1883 1884 InsertedExpressions[std::make_pair(SH, &*Builder.GetInsertPoint())] = V; 1885 if (Ty) { 1886 assert(SE.getTypeSizeInBits(Ty) == SE.getTypeSizeInBits(SH->getType()) && 1887 "non-trivial casts should be done with the SCEVs directly!"); 1888 V = InsertNoopCastOfTo(V, Ty); 1889 } 1890 return V; 1891 } 1892 1893 ScalarEvolution::ValueOffsetPair 1894 SCEVExpander::FindValueInExprValueMap(const SCEV *S, 1895 const Instruction *InsertPt) { 1896 auto *Set = SE.getSCEVValues(S); 1897 // If the expansion is not in CanonicalMode, and the SCEV contains any 1898 // sub scAddRecExpr type SCEV, it is required to expand the SCEV literally. 1899 if (CanonicalMode || !SE.containsAddRecurrence(S)) { 1900 // If S is scConstant, it may be worse to reuse an existing Value. 1901 if (S->getSCEVType() != scConstant && Set) { 1902 // Choose a Value from the set which dominates the insertPt. 1903 // insertPt should be inside the Value's parent loop so as not to break 1904 // the LCSSA form. 1905 for (auto const &VOPair : *Set) { 1906 Value *V = VOPair.first; 1907 ConstantInt *Offset = VOPair.second; 1908 Instruction *EntInst = nullptr; 1909 if (V && isa<Instruction>(V) && (EntInst = cast<Instruction>(V)) && 1910 S->getType() == V->getType() && 1911 EntInst->getFunction() == InsertPt->getFunction() && 1912 SE.DT.dominates(EntInst, InsertPt) && 1913 (SE.LI.getLoopFor(EntInst->getParent()) == nullptr || 1914 SE.LI.getLoopFor(EntInst->getParent())->contains(InsertPt))) 1915 return {V, Offset}; 1916 } 1917 } 1918 } 1919 return {nullptr, nullptr}; 1920 } 1921 1922 // The expansion of SCEV will either reuse a previous Value in ExprValueMap, 1923 // or expand the SCEV literally. Specifically, if the expansion is in LSRMode, 1924 // and the SCEV contains any sub scAddRecExpr type SCEV, it will be expanded 1925 // literally, to prevent LSR's transformed SCEV from being reverted. Otherwise, 1926 // the expansion will try to reuse Value from ExprValueMap, and only when it 1927 // fails, expand the SCEV literally. 1928 Value *SCEVExpander::expand(const SCEV *S) { 1929 // Compute an insertion point for this SCEV object. Hoist the instructions 1930 // as far out in the loop nest as possible. 1931 Instruction *InsertPt = &*Builder.GetInsertPoint(); 1932 1933 // We can move insertion point only if there is no div or rem operations 1934 // otherwise we are risky to move it over the check for zero denominator. 1935 auto SafeToHoist = [](const SCEV *S) { 1936 return !SCEVExprContains(S, [](const SCEV *S) { 1937 if (const auto *D = dyn_cast<SCEVUDivExpr>(S)) { 1938 if (const auto *SC = dyn_cast<SCEVConstant>(D->getRHS())) 1939 // Division by non-zero constants can be hoisted. 1940 return SC->getValue()->isZero(); 1941 // All other divisions should not be moved as they may be 1942 // divisions by zero and should be kept within the 1943 // conditions of the surrounding loops that guard their 1944 // execution (see PR35406). 1945 return true; 1946 } 1947 return false; 1948 }); 1949 }; 1950 if (SafeToHoist(S)) { 1951 for (Loop *L = SE.LI.getLoopFor(Builder.GetInsertBlock());; 1952 L = L->getParentLoop()) { 1953 if (SE.isLoopInvariant(S, L)) { 1954 if (!L) break; 1955 if (BasicBlock *Preheader = L->getLoopPreheader()) 1956 InsertPt = Preheader->getTerminator(); 1957 else 1958 // LSR sets the insertion point for AddRec start/step values to the 1959 // block start to simplify value reuse, even though it's an invalid 1960 // position. SCEVExpander must correct for this in all cases. 1961 InsertPt = &*L->getHeader()->getFirstInsertionPt(); 1962 } else { 1963 // If the SCEV is computable at this level, insert it into the header 1964 // after the PHIs (and after any other instructions that we've inserted 1965 // there) so that it is guaranteed to dominate any user inside the loop. 1966 if (L && SE.hasComputableLoopEvolution(S, L) && !PostIncLoops.count(L)) 1967 InsertPt = &*L->getHeader()->getFirstInsertionPt(); 1968 1969 while (InsertPt->getIterator() != Builder.GetInsertPoint() && 1970 (isInsertedInstruction(InsertPt) || 1971 isa<DbgInfoIntrinsic>(InsertPt))) { 1972 InsertPt = &*std::next(InsertPt->getIterator()); 1973 } 1974 break; 1975 } 1976 } 1977 } 1978 1979 // Check to see if we already expanded this here. 1980 auto I = InsertedExpressions.find(std::make_pair(S, InsertPt)); 1981 if (I != InsertedExpressions.end()) 1982 return I->second; 1983 1984 SCEVInsertPointGuard Guard(Builder, this); 1985 Builder.SetInsertPoint(InsertPt); 1986 1987 // Expand the expression into instructions. 1988 ScalarEvolution::ValueOffsetPair VO = FindValueInExprValueMap(S, InsertPt); 1989 Value *V = VO.first; 1990 1991 if (!V) 1992 V = visit(S); 1993 else if (VO.second) { 1994 if (PointerType *Vty = dyn_cast<PointerType>(V->getType())) { 1995 Type *Ety = Vty->getPointerElementType(); 1996 int64_t Offset = VO.second->getSExtValue(); 1997 int64_t ESize = SE.getTypeSizeInBits(Ety); 1998 if ((Offset * 8) % ESize == 0) { 1999 ConstantInt *Idx = 2000 ConstantInt::getSigned(VO.second->getType(), -(Offset * 8) / ESize); 2001 V = Builder.CreateGEP(Ety, V, Idx, "scevgep"); 2002 } else { 2003 ConstantInt *Idx = 2004 ConstantInt::getSigned(VO.second->getType(), -Offset); 2005 unsigned AS = Vty->getAddressSpace(); 2006 V = Builder.CreateBitCast(V, Type::getInt8PtrTy(SE.getContext(), AS)); 2007 V = Builder.CreateGEP(Type::getInt8Ty(SE.getContext()), V, Idx, 2008 "uglygep"); 2009 V = Builder.CreateBitCast(V, Vty); 2010 } 2011 } else { 2012 V = Builder.CreateSub(V, VO.second); 2013 } 2014 } 2015 // Remember the expanded value for this SCEV at this location. 2016 // 2017 // This is independent of PostIncLoops. The mapped value simply materializes 2018 // the expression at this insertion point. If the mapped value happened to be 2019 // a postinc expansion, it could be reused by a non-postinc user, but only if 2020 // its insertion point was already at the head of the loop. 2021 InsertedExpressions[std::make_pair(S, InsertPt)] = V; 2022 return V; 2023 } 2024 2025 void SCEVExpander::rememberInstruction(Value *I) { 2026 auto DoInsert = [this](Value *V) { 2027 if (!PostIncLoops.empty()) 2028 InsertedPostIncValues.insert(V); 2029 else 2030 InsertedValues.insert(V); 2031 }; 2032 DoInsert(I); 2033 2034 if (!PreserveLCSSA) 2035 return; 2036 2037 if (auto *Inst = dyn_cast<Instruction>(I)) { 2038 // A new instruction has been added, which might introduce new uses outside 2039 // a defining loop. Fix LCSSA from for each operand of the new instruction, 2040 // if required. 2041 for (unsigned OpIdx = 0, OpEnd = Inst->getNumOperands(); OpIdx != OpEnd; 2042 OpIdx++) 2043 fixupLCSSAFormFor(Inst, OpIdx); 2044 } 2045 } 2046 2047 /// replaceCongruentIVs - Check for congruent phis in this loop header and 2048 /// replace them with their most canonical representative. Return the number of 2049 /// phis eliminated. 2050 /// 2051 /// This does not depend on any SCEVExpander state but should be used in 2052 /// the same context that SCEVExpander is used. 2053 unsigned 2054 SCEVExpander::replaceCongruentIVs(Loop *L, const DominatorTree *DT, 2055 SmallVectorImpl<WeakTrackingVH> &DeadInsts, 2056 const TargetTransformInfo *TTI) { 2057 // Find integer phis in order of increasing width. 2058 SmallVector<PHINode*, 8> Phis; 2059 for (PHINode &PN : L->getHeader()->phis()) 2060 Phis.push_back(&PN); 2061 2062 if (TTI) 2063 llvm::sort(Phis, [](Value *LHS, Value *RHS) { 2064 // Put pointers at the back and make sure pointer < pointer = false. 2065 if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy()) 2066 return RHS->getType()->isIntegerTy() && !LHS->getType()->isIntegerTy(); 2067 return RHS->getType()->getPrimitiveSizeInBits().getFixedSize() < 2068 LHS->getType()->getPrimitiveSizeInBits().getFixedSize(); 2069 }); 2070 2071 unsigned NumElim = 0; 2072 DenseMap<const SCEV *, PHINode *> ExprToIVMap; 2073 // Process phis from wide to narrow. Map wide phis to their truncation 2074 // so narrow phis can reuse them. 2075 for (PHINode *Phi : Phis) { 2076 auto SimplifyPHINode = [&](PHINode *PN) -> Value * { 2077 if (Value *V = SimplifyInstruction(PN, {DL, &SE.TLI, &SE.DT, &SE.AC})) 2078 return V; 2079 if (!SE.isSCEVable(PN->getType())) 2080 return nullptr; 2081 auto *Const = dyn_cast<SCEVConstant>(SE.getSCEV(PN)); 2082 if (!Const) 2083 return nullptr; 2084 return Const->getValue(); 2085 }; 2086 2087 // Fold constant phis. They may be congruent to other constant phis and 2088 // would confuse the logic below that expects proper IVs. 2089 if (Value *V = SimplifyPHINode(Phi)) { 2090 if (V->getType() != Phi->getType()) 2091 continue; 2092 Phi->replaceAllUsesWith(V); 2093 DeadInsts.emplace_back(Phi); 2094 ++NumElim; 2095 SCEV_DEBUG_WITH_TYPE(DebugType, 2096 dbgs() << "INDVARS: Eliminated constant iv: " << *Phi 2097 << '\n'); 2098 continue; 2099 } 2100 2101 if (!SE.isSCEVable(Phi->getType())) 2102 continue; 2103 2104 PHINode *&OrigPhiRef = ExprToIVMap[SE.getSCEV(Phi)]; 2105 if (!OrigPhiRef) { 2106 OrigPhiRef = Phi; 2107 if (Phi->getType()->isIntegerTy() && TTI && 2108 TTI->isTruncateFree(Phi->getType(), Phis.back()->getType())) { 2109 // This phi can be freely truncated to the narrowest phi type. Map the 2110 // truncated expression to it so it will be reused for narrow types. 2111 const SCEV *TruncExpr = 2112 SE.getTruncateExpr(SE.getSCEV(Phi), Phis.back()->getType()); 2113 ExprToIVMap[TruncExpr] = Phi; 2114 } 2115 continue; 2116 } 2117 2118 // Replacing a pointer phi with an integer phi or vice-versa doesn't make 2119 // sense. 2120 if (OrigPhiRef->getType()->isPointerTy() != Phi->getType()->isPointerTy()) 2121 continue; 2122 2123 if (BasicBlock *LatchBlock = L->getLoopLatch()) { 2124 Instruction *OrigInc = dyn_cast<Instruction>( 2125 OrigPhiRef->getIncomingValueForBlock(LatchBlock)); 2126 Instruction *IsomorphicInc = 2127 dyn_cast<Instruction>(Phi->getIncomingValueForBlock(LatchBlock)); 2128 2129 if (OrigInc && IsomorphicInc) { 2130 // If this phi has the same width but is more canonical, replace the 2131 // original with it. As part of the "more canonical" determination, 2132 // respect a prior decision to use an IV chain. 2133 if (OrigPhiRef->getType() == Phi->getType() && 2134 !(ChainedPhis.count(Phi) || 2135 isExpandedAddRecExprPHI(OrigPhiRef, OrigInc, L)) && 2136 (ChainedPhis.count(Phi) || 2137 isExpandedAddRecExprPHI(Phi, IsomorphicInc, L))) { 2138 std::swap(OrigPhiRef, Phi); 2139 std::swap(OrigInc, IsomorphicInc); 2140 } 2141 // Replacing the congruent phi is sufficient because acyclic 2142 // redundancy elimination, CSE/GVN, should handle the 2143 // rest. However, once SCEV proves that a phi is congruent, 2144 // it's often the head of an IV user cycle that is isomorphic 2145 // with the original phi. It's worth eagerly cleaning up the 2146 // common case of a single IV increment so that DeleteDeadPHIs 2147 // can remove cycles that had postinc uses. 2148 const SCEV *TruncExpr = 2149 SE.getTruncateOrNoop(SE.getSCEV(OrigInc), IsomorphicInc->getType()); 2150 if (OrigInc != IsomorphicInc && 2151 TruncExpr == SE.getSCEV(IsomorphicInc) && 2152 SE.LI.replacementPreservesLCSSAForm(IsomorphicInc, OrigInc) && 2153 hoistIVInc(OrigInc, IsomorphicInc)) { 2154 SCEV_DEBUG_WITH_TYPE( 2155 DebugType, dbgs() << "INDVARS: Eliminated congruent iv.inc: " 2156 << *IsomorphicInc << '\n'); 2157 Value *NewInc = OrigInc; 2158 if (OrigInc->getType() != IsomorphicInc->getType()) { 2159 Instruction *IP = nullptr; 2160 if (PHINode *PN = dyn_cast<PHINode>(OrigInc)) 2161 IP = &*PN->getParent()->getFirstInsertionPt(); 2162 else 2163 IP = OrigInc->getNextNode(); 2164 2165 IRBuilder<> Builder(IP); 2166 Builder.SetCurrentDebugLocation(IsomorphicInc->getDebugLoc()); 2167 NewInc = Builder.CreateTruncOrBitCast( 2168 OrigInc, IsomorphicInc->getType(), IVName); 2169 } 2170 IsomorphicInc->replaceAllUsesWith(NewInc); 2171 DeadInsts.emplace_back(IsomorphicInc); 2172 } 2173 } 2174 } 2175 SCEV_DEBUG_WITH_TYPE(DebugType, 2176 dbgs() << "INDVARS: Eliminated congruent iv: " << *Phi 2177 << '\n'); 2178 SCEV_DEBUG_WITH_TYPE( 2179 DebugType, dbgs() << "INDVARS: Original iv: " << *OrigPhiRef << '\n'); 2180 ++NumElim; 2181 Value *NewIV = OrigPhiRef; 2182 if (OrigPhiRef->getType() != Phi->getType()) { 2183 IRBuilder<> Builder(&*L->getHeader()->getFirstInsertionPt()); 2184 Builder.SetCurrentDebugLocation(Phi->getDebugLoc()); 2185 NewIV = Builder.CreateTruncOrBitCast(OrigPhiRef, Phi->getType(), IVName); 2186 } 2187 Phi->replaceAllUsesWith(NewIV); 2188 DeadInsts.emplace_back(Phi); 2189 } 2190 return NumElim; 2191 } 2192 2193 Optional<ScalarEvolution::ValueOffsetPair> 2194 SCEVExpander::getRelatedExistingExpansion(const SCEV *S, const Instruction *At, 2195 Loop *L) { 2196 using namespace llvm::PatternMatch; 2197 2198 SmallVector<BasicBlock *, 4> ExitingBlocks; 2199 L->getExitingBlocks(ExitingBlocks); 2200 2201 // Look for suitable value in simple conditions at the loop exits. 2202 for (BasicBlock *BB : ExitingBlocks) { 2203 ICmpInst::Predicate Pred; 2204 Instruction *LHS, *RHS; 2205 2206 if (!match(BB->getTerminator(), 2207 m_Br(m_ICmp(Pred, m_Instruction(LHS), m_Instruction(RHS)), 2208 m_BasicBlock(), m_BasicBlock()))) 2209 continue; 2210 2211 if (SE.getSCEV(LHS) == S && SE.DT.dominates(LHS, At)) 2212 return ScalarEvolution::ValueOffsetPair(LHS, nullptr); 2213 2214 if (SE.getSCEV(RHS) == S && SE.DT.dominates(RHS, At)) 2215 return ScalarEvolution::ValueOffsetPair(RHS, nullptr); 2216 } 2217 2218 // Use expand's logic which is used for reusing a previous Value in 2219 // ExprValueMap. 2220 ScalarEvolution::ValueOffsetPair VO = FindValueInExprValueMap(S, At); 2221 if (VO.first) 2222 return VO; 2223 2224 // There is potential to make this significantly smarter, but this simple 2225 // heuristic already gets some interesting cases. 2226 2227 // Can not find suitable value. 2228 return None; 2229 } 2230 2231 template<typename T> static InstructionCost costAndCollectOperands( 2232 const SCEVOperand &WorkItem, const TargetTransformInfo &TTI, 2233 TargetTransformInfo::TargetCostKind CostKind, 2234 SmallVectorImpl<SCEVOperand> &Worklist) { 2235 2236 const T *S = cast<T>(WorkItem.S); 2237 InstructionCost Cost = 0; 2238 // Object to help map SCEV operands to expanded IR instructions. 2239 struct OperationIndices { 2240 OperationIndices(unsigned Opc, size_t min, size_t max) : 2241 Opcode(Opc), MinIdx(min), MaxIdx(max) { } 2242 unsigned Opcode; 2243 size_t MinIdx; 2244 size_t MaxIdx; 2245 }; 2246 2247 // Collect the operations of all the instructions that will be needed to 2248 // expand the SCEVExpr. This is so that when we come to cost the operands, 2249 // we know what the generated user(s) will be. 2250 SmallVector<OperationIndices, 2> Operations; 2251 2252 auto CastCost = [&](unsigned Opcode) -> InstructionCost { 2253 Operations.emplace_back(Opcode, 0, 0); 2254 return TTI.getCastInstrCost(Opcode, S->getType(), 2255 S->getOperand(0)->getType(), 2256 TTI::CastContextHint::None, CostKind); 2257 }; 2258 2259 auto ArithCost = [&](unsigned Opcode, unsigned NumRequired, 2260 unsigned MinIdx = 0, 2261 unsigned MaxIdx = 1) -> InstructionCost { 2262 Operations.emplace_back(Opcode, MinIdx, MaxIdx); 2263 return NumRequired * 2264 TTI.getArithmeticInstrCost(Opcode, S->getType(), CostKind); 2265 }; 2266 2267 auto CmpSelCost = [&](unsigned Opcode, unsigned NumRequired, unsigned MinIdx, 2268 unsigned MaxIdx) -> InstructionCost { 2269 Operations.emplace_back(Opcode, MinIdx, MaxIdx); 2270 Type *OpType = S->getOperand(0)->getType(); 2271 return NumRequired * TTI.getCmpSelInstrCost( 2272 Opcode, OpType, CmpInst::makeCmpResultType(OpType), 2273 CmpInst::BAD_ICMP_PREDICATE, CostKind); 2274 }; 2275 2276 switch (S->getSCEVType()) { 2277 case scCouldNotCompute: 2278 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 2279 case scUnknown: 2280 case scConstant: 2281 return 0; 2282 case scPtrToInt: 2283 Cost = CastCost(Instruction::PtrToInt); 2284 break; 2285 case scTruncate: 2286 Cost = CastCost(Instruction::Trunc); 2287 break; 2288 case scZeroExtend: 2289 Cost = CastCost(Instruction::ZExt); 2290 break; 2291 case scSignExtend: 2292 Cost = CastCost(Instruction::SExt); 2293 break; 2294 case scUDivExpr: { 2295 unsigned Opcode = Instruction::UDiv; 2296 if (auto *SC = dyn_cast<SCEVConstant>(S->getOperand(1))) 2297 if (SC->getAPInt().isPowerOf2()) 2298 Opcode = Instruction::LShr; 2299 Cost = ArithCost(Opcode, 1); 2300 break; 2301 } 2302 case scAddExpr: 2303 Cost = ArithCost(Instruction::Add, S->getNumOperands() - 1); 2304 break; 2305 case scMulExpr: 2306 // TODO: this is a very pessimistic cost modelling for Mul, 2307 // because of Bin Pow algorithm actually used by the expander, 2308 // see SCEVExpander::visitMulExpr(), ExpandOpBinPowN(). 2309 Cost = ArithCost(Instruction::Mul, S->getNumOperands() - 1); 2310 break; 2311 case scSMaxExpr: 2312 case scUMaxExpr: 2313 case scSMinExpr: 2314 case scUMinExpr: { 2315 // FIXME: should this ask the cost for Intrinsic's? 2316 Cost += CmpSelCost(Instruction::ICmp, S->getNumOperands() - 1, 0, 1); 2317 Cost += CmpSelCost(Instruction::Select, S->getNumOperands() - 1, 0, 2); 2318 break; 2319 } 2320 case scAddRecExpr: { 2321 // In this polynominal, we may have some zero operands, and we shouldn't 2322 // really charge for those. So how many non-zero coeffients are there? 2323 int NumTerms = llvm::count_if(S->operands(), [](const SCEV *Op) { 2324 return !Op->isZero(); 2325 }); 2326 2327 assert(NumTerms >= 1 && "Polynominal should have at least one term."); 2328 assert(!(*std::prev(S->operands().end()))->isZero() && 2329 "Last operand should not be zero"); 2330 2331 // Ignoring constant term (operand 0), how many of the coeffients are u> 1? 2332 int NumNonZeroDegreeNonOneTerms = 2333 llvm::count_if(S->operands(), [](const SCEV *Op) { 2334 auto *SConst = dyn_cast<SCEVConstant>(Op); 2335 return !SConst || SConst->getAPInt().ugt(1); 2336 }); 2337 2338 // Much like with normal add expr, the polynominal will require 2339 // one less addition than the number of it's terms. 2340 InstructionCost AddCost = ArithCost(Instruction::Add, NumTerms - 1, 2341 /*MinIdx*/ 1, /*MaxIdx*/ 1); 2342 // Here, *each* one of those will require a multiplication. 2343 InstructionCost MulCost = 2344 ArithCost(Instruction::Mul, NumNonZeroDegreeNonOneTerms); 2345 Cost = AddCost + MulCost; 2346 2347 // What is the degree of this polynominal? 2348 int PolyDegree = S->getNumOperands() - 1; 2349 assert(PolyDegree >= 1 && "Should be at least affine."); 2350 2351 // The final term will be: 2352 // Op_{PolyDegree} * x ^ {PolyDegree} 2353 // Where x ^ {PolyDegree} will again require PolyDegree-1 mul operations. 2354 // Note that x ^ {PolyDegree} = x * x ^ {PolyDegree-1} so charging for 2355 // x ^ {PolyDegree} will give us x ^ {2} .. x ^ {PolyDegree-1} for free. 2356 // FIXME: this is conservatively correct, but might be overly pessimistic. 2357 Cost += MulCost * (PolyDegree - 1); 2358 break; 2359 } 2360 } 2361 2362 for (auto &CostOp : Operations) { 2363 for (auto SCEVOp : enumerate(S->operands())) { 2364 // Clamp the index to account for multiple IR operations being chained. 2365 size_t MinIdx = std::max(SCEVOp.index(), CostOp.MinIdx); 2366 size_t OpIdx = std::min(MinIdx, CostOp.MaxIdx); 2367 Worklist.emplace_back(CostOp.Opcode, OpIdx, SCEVOp.value()); 2368 } 2369 } 2370 return Cost; 2371 } 2372 2373 bool SCEVExpander::isHighCostExpansionHelper( 2374 const SCEVOperand &WorkItem, Loop *L, const Instruction &At, 2375 InstructionCost &Cost, unsigned Budget, const TargetTransformInfo &TTI, 2376 SmallPtrSetImpl<const SCEV *> &Processed, 2377 SmallVectorImpl<SCEVOperand> &Worklist) { 2378 if (Cost > Budget) 2379 return true; // Already run out of budget, give up. 2380 2381 const SCEV *S = WorkItem.S; 2382 // Was the cost of expansion of this expression already accounted for? 2383 if (!isa<SCEVConstant>(S) && !Processed.insert(S).second) 2384 return false; // We have already accounted for this expression. 2385 2386 // If we can find an existing value for this scev available at the point "At" 2387 // then consider the expression cheap. 2388 if (getRelatedExistingExpansion(S, &At, L)) 2389 return false; // Consider the expression to be free. 2390 2391 TargetTransformInfo::TargetCostKind CostKind = 2392 L->getHeader()->getParent()->hasMinSize() 2393 ? TargetTransformInfo::TCK_CodeSize 2394 : TargetTransformInfo::TCK_RecipThroughput; 2395 2396 switch (S->getSCEVType()) { 2397 case scCouldNotCompute: 2398 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!"); 2399 case scUnknown: 2400 // Assume to be zero-cost. 2401 return false; 2402 case scConstant: { 2403 // Only evalulate the costs of constants when optimizing for size. 2404 if (CostKind != TargetTransformInfo::TCK_CodeSize) 2405 return 0; 2406 const APInt &Imm = cast<SCEVConstant>(S)->getAPInt(); 2407 Type *Ty = S->getType(); 2408 Cost += TTI.getIntImmCostInst( 2409 WorkItem.ParentOpcode, WorkItem.OperandIdx, Imm, Ty, CostKind); 2410 return Cost > Budget; 2411 } 2412 case scTruncate: 2413 case scPtrToInt: 2414 case scZeroExtend: 2415 case scSignExtend: { 2416 Cost += 2417 costAndCollectOperands<SCEVCastExpr>(WorkItem, TTI, CostKind, Worklist); 2418 return false; // Will answer upon next entry into this function. 2419 } 2420 case scUDivExpr: { 2421 // UDivExpr is very likely a UDiv that ScalarEvolution's HowFarToZero or 2422 // HowManyLessThans produced to compute a precise expression, rather than a 2423 // UDiv from the user's code. If we can't find a UDiv in the code with some 2424 // simple searching, we need to account for it's cost. 2425 2426 // At the beginning of this function we already tried to find existing 2427 // value for plain 'S'. Now try to lookup 'S + 1' since it is common 2428 // pattern involving division. This is just a simple search heuristic. 2429 if (getRelatedExistingExpansion( 2430 SE.getAddExpr(S, SE.getConstant(S->getType(), 1)), &At, L)) 2431 return false; // Consider it to be free. 2432 2433 Cost += 2434 costAndCollectOperands<SCEVUDivExpr>(WorkItem, TTI, CostKind, Worklist); 2435 return false; // Will answer upon next entry into this function. 2436 } 2437 case scAddExpr: 2438 case scMulExpr: 2439 case scUMaxExpr: 2440 case scSMaxExpr: 2441 case scUMinExpr: 2442 case scSMinExpr: { 2443 assert(cast<SCEVNAryExpr>(S)->getNumOperands() > 1 && 2444 "Nary expr should have more than 1 operand."); 2445 // The simple nary expr will require one less op (or pair of ops) 2446 // than the number of it's terms. 2447 Cost += 2448 costAndCollectOperands<SCEVNAryExpr>(WorkItem, TTI, CostKind, Worklist); 2449 return Cost > Budget; 2450 } 2451 case scAddRecExpr: { 2452 assert(cast<SCEVAddRecExpr>(S)->getNumOperands() >= 2 && 2453 "Polynomial should be at least linear"); 2454 Cost += costAndCollectOperands<SCEVAddRecExpr>( 2455 WorkItem, TTI, CostKind, Worklist); 2456 return Cost > Budget; 2457 } 2458 } 2459 llvm_unreachable("Unknown SCEV kind!"); 2460 } 2461 2462 Value *SCEVExpander::expandCodeForPredicate(const SCEVPredicate *Pred, 2463 Instruction *IP) { 2464 assert(IP); 2465 switch (Pred->getKind()) { 2466 case SCEVPredicate::P_Union: 2467 return expandUnionPredicate(cast<SCEVUnionPredicate>(Pred), IP); 2468 case SCEVPredicate::P_Equal: 2469 return expandEqualPredicate(cast<SCEVEqualPredicate>(Pred), IP); 2470 case SCEVPredicate::P_Wrap: { 2471 auto *AddRecPred = cast<SCEVWrapPredicate>(Pred); 2472 return expandWrapPredicate(AddRecPred, IP); 2473 } 2474 } 2475 llvm_unreachable("Unknown SCEV predicate type"); 2476 } 2477 2478 Value *SCEVExpander::expandEqualPredicate(const SCEVEqualPredicate *Pred, 2479 Instruction *IP) { 2480 Value *Expr0 = 2481 expandCodeForImpl(Pred->getLHS(), Pred->getLHS()->getType(), IP, false); 2482 Value *Expr1 = 2483 expandCodeForImpl(Pred->getRHS(), Pred->getRHS()->getType(), IP, false); 2484 2485 Builder.SetInsertPoint(IP); 2486 auto *I = Builder.CreateICmpNE(Expr0, Expr1, "ident.check"); 2487 return I; 2488 } 2489 2490 Value *SCEVExpander::generateOverflowCheck(const SCEVAddRecExpr *AR, 2491 Instruction *Loc, bool Signed) { 2492 assert(AR->isAffine() && "Cannot generate RT check for " 2493 "non-affine expression"); 2494 2495 SCEVUnionPredicate Pred; 2496 const SCEV *ExitCount = 2497 SE.getPredicatedBackedgeTakenCount(AR->getLoop(), Pred); 2498 2499 assert(!isa<SCEVCouldNotCompute>(ExitCount) && "Invalid loop count"); 2500 2501 const SCEV *Step = AR->getStepRecurrence(SE); 2502 const SCEV *Start = AR->getStart(); 2503 2504 Type *ARTy = AR->getType(); 2505 unsigned SrcBits = SE.getTypeSizeInBits(ExitCount->getType()); 2506 unsigned DstBits = SE.getTypeSizeInBits(ARTy); 2507 2508 // The expression {Start,+,Step} has nusw/nssw if 2509 // Step < 0, Start - |Step| * Backedge <= Start 2510 // Step >= 0, Start + |Step| * Backedge > Start 2511 // and |Step| * Backedge doesn't unsigned overflow. 2512 2513 IntegerType *CountTy = IntegerType::get(Loc->getContext(), SrcBits); 2514 Builder.SetInsertPoint(Loc); 2515 Value *TripCountVal = expandCodeForImpl(ExitCount, CountTy, Loc, false); 2516 2517 IntegerType *Ty = 2518 IntegerType::get(Loc->getContext(), SE.getTypeSizeInBits(ARTy)); 2519 Type *ARExpandTy = DL.isNonIntegralPointerType(ARTy) ? ARTy : Ty; 2520 2521 Value *StepValue = expandCodeForImpl(Step, Ty, Loc, false); 2522 Value *NegStepValue = 2523 expandCodeForImpl(SE.getNegativeSCEV(Step), Ty, Loc, false); 2524 Value *StartValue = expandCodeForImpl( 2525 isa<PointerType>(ARExpandTy) ? Start 2526 : SE.getPtrToIntExpr(Start, ARExpandTy), 2527 ARExpandTy, Loc, false); 2528 2529 ConstantInt *Zero = 2530 ConstantInt::get(Loc->getContext(), APInt::getNullValue(DstBits)); 2531 2532 Builder.SetInsertPoint(Loc); 2533 // Compute |Step| 2534 Value *StepCompare = Builder.CreateICmp(ICmpInst::ICMP_SLT, StepValue, Zero); 2535 Value *AbsStep = Builder.CreateSelect(StepCompare, NegStepValue, StepValue); 2536 2537 // Get the backedge taken count and truncate or extended to the AR type. 2538 Value *TruncTripCount = Builder.CreateZExtOrTrunc(TripCountVal, Ty); 2539 auto *MulF = Intrinsic::getDeclaration(Loc->getModule(), 2540 Intrinsic::umul_with_overflow, Ty); 2541 2542 // Compute |Step| * Backedge 2543 CallInst *Mul = Builder.CreateCall(MulF, {AbsStep, TruncTripCount}, "mul"); 2544 Value *MulV = Builder.CreateExtractValue(Mul, 0, "mul.result"); 2545 Value *OfMul = Builder.CreateExtractValue(Mul, 1, "mul.overflow"); 2546 2547 // Compute: 2548 // Start + |Step| * Backedge < Start 2549 // Start - |Step| * Backedge > Start 2550 Value *Add = nullptr, *Sub = nullptr; 2551 if (PointerType *ARPtrTy = dyn_cast<PointerType>(ARExpandTy)) { 2552 const SCEV *MulS = SE.getSCEV(MulV); 2553 const SCEV *NegMulS = SE.getNegativeSCEV(MulS); 2554 Add = Builder.CreateBitCast(expandAddToGEP(MulS, ARPtrTy, Ty, StartValue), 2555 ARPtrTy); 2556 Sub = Builder.CreateBitCast( 2557 expandAddToGEP(NegMulS, ARPtrTy, Ty, StartValue), ARPtrTy); 2558 } else { 2559 Add = Builder.CreateAdd(StartValue, MulV); 2560 Sub = Builder.CreateSub(StartValue, MulV); 2561 } 2562 2563 Value *EndCompareGT = Builder.CreateICmp( 2564 Signed ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT, Sub, StartValue); 2565 2566 Value *EndCompareLT = Builder.CreateICmp( 2567 Signed ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT, Add, StartValue); 2568 2569 // Select the answer based on the sign of Step. 2570 Value *EndCheck = 2571 Builder.CreateSelect(StepCompare, EndCompareGT, EndCompareLT); 2572 2573 // If the backedge taken count type is larger than the AR type, 2574 // check that we don't drop any bits by truncating it. If we are 2575 // dropping bits, then we have overflow (unless the step is zero). 2576 if (SE.getTypeSizeInBits(CountTy) > SE.getTypeSizeInBits(Ty)) { 2577 auto MaxVal = APInt::getMaxValue(DstBits).zext(SrcBits); 2578 auto *BackedgeCheck = 2579 Builder.CreateICmp(ICmpInst::ICMP_UGT, TripCountVal, 2580 ConstantInt::get(Loc->getContext(), MaxVal)); 2581 BackedgeCheck = Builder.CreateAnd( 2582 BackedgeCheck, Builder.CreateICmp(ICmpInst::ICMP_NE, StepValue, Zero)); 2583 2584 EndCheck = Builder.CreateOr(EndCheck, BackedgeCheck); 2585 } 2586 2587 return Builder.CreateOr(EndCheck, OfMul); 2588 } 2589 2590 Value *SCEVExpander::expandWrapPredicate(const SCEVWrapPredicate *Pred, 2591 Instruction *IP) { 2592 const auto *A = cast<SCEVAddRecExpr>(Pred->getExpr()); 2593 Value *NSSWCheck = nullptr, *NUSWCheck = nullptr; 2594 2595 // Add a check for NUSW 2596 if (Pred->getFlags() & SCEVWrapPredicate::IncrementNUSW) 2597 NUSWCheck = generateOverflowCheck(A, IP, false); 2598 2599 // Add a check for NSSW 2600 if (Pred->getFlags() & SCEVWrapPredicate::IncrementNSSW) 2601 NSSWCheck = generateOverflowCheck(A, IP, true); 2602 2603 if (NUSWCheck && NSSWCheck) 2604 return Builder.CreateOr(NUSWCheck, NSSWCheck); 2605 2606 if (NUSWCheck) 2607 return NUSWCheck; 2608 2609 if (NSSWCheck) 2610 return NSSWCheck; 2611 2612 return ConstantInt::getFalse(IP->getContext()); 2613 } 2614 2615 Value *SCEVExpander::expandUnionPredicate(const SCEVUnionPredicate *Union, 2616 Instruction *IP) { 2617 auto *BoolType = IntegerType::get(IP->getContext(), 1); 2618 Value *Check = ConstantInt::getNullValue(BoolType); 2619 2620 // Loop over all checks in this set. 2621 for (auto Pred : Union->getPredicates()) { 2622 auto *NextCheck = expandCodeForPredicate(Pred, IP); 2623 Builder.SetInsertPoint(IP); 2624 Check = Builder.CreateOr(Check, NextCheck); 2625 } 2626 2627 return Check; 2628 } 2629 2630 Value *SCEVExpander::fixupLCSSAFormFor(Instruction *User, unsigned OpIdx) { 2631 assert(PreserveLCSSA); 2632 SmallVector<Instruction *, 1> ToUpdate; 2633 2634 auto *OpV = User->getOperand(OpIdx); 2635 auto *OpI = dyn_cast<Instruction>(OpV); 2636 if (!OpI) 2637 return OpV; 2638 2639 Loop *DefLoop = SE.LI.getLoopFor(OpI->getParent()); 2640 Loop *UseLoop = SE.LI.getLoopFor(User->getParent()); 2641 if (!DefLoop || UseLoop == DefLoop || DefLoop->contains(UseLoop)) 2642 return OpV; 2643 2644 ToUpdate.push_back(OpI); 2645 SmallVector<PHINode *, 16> PHIsToRemove; 2646 formLCSSAForInstructions(ToUpdate, SE.DT, SE.LI, &SE, Builder, &PHIsToRemove); 2647 for (PHINode *PN : PHIsToRemove) { 2648 if (!PN->use_empty()) 2649 continue; 2650 InsertedValues.erase(PN); 2651 InsertedPostIncValues.erase(PN); 2652 PN->eraseFromParent(); 2653 } 2654 2655 return User->getOperand(OpIdx); 2656 } 2657 2658 namespace { 2659 // Search for a SCEV subexpression that is not safe to expand. Any expression 2660 // that may expand to a !isSafeToSpeculativelyExecute value is unsafe, namely 2661 // UDiv expressions. We don't know if the UDiv is derived from an IR divide 2662 // instruction, but the important thing is that we prove the denominator is 2663 // nonzero before expansion. 2664 // 2665 // IVUsers already checks that IV-derived expressions are safe. So this check is 2666 // only needed when the expression includes some subexpression that is not IV 2667 // derived. 2668 // 2669 // Currently, we only allow division by a nonzero constant here. If this is 2670 // inadequate, we could easily allow division by SCEVUnknown by using 2671 // ValueTracking to check isKnownNonZero(). 2672 // 2673 // We cannot generally expand recurrences unless the step dominates the loop 2674 // header. The expander handles the special case of affine recurrences by 2675 // scaling the recurrence outside the loop, but this technique isn't generally 2676 // applicable. Expanding a nested recurrence outside a loop requires computing 2677 // binomial coefficients. This could be done, but the recurrence has to be in a 2678 // perfectly reduced form, which can't be guaranteed. 2679 struct SCEVFindUnsafe { 2680 ScalarEvolution &SE; 2681 bool IsUnsafe; 2682 2683 SCEVFindUnsafe(ScalarEvolution &se): SE(se), IsUnsafe(false) {} 2684 2685 bool follow(const SCEV *S) { 2686 if (const SCEVUDivExpr *D = dyn_cast<SCEVUDivExpr>(S)) { 2687 const SCEVConstant *SC = dyn_cast<SCEVConstant>(D->getRHS()); 2688 if (!SC || SC->getValue()->isZero()) { 2689 IsUnsafe = true; 2690 return false; 2691 } 2692 } 2693 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) { 2694 const SCEV *Step = AR->getStepRecurrence(SE); 2695 if (!AR->isAffine() && !SE.dominates(Step, AR->getLoop()->getHeader())) { 2696 IsUnsafe = true; 2697 return false; 2698 } 2699 } 2700 return true; 2701 } 2702 bool isDone() const { return IsUnsafe; } 2703 }; 2704 } 2705 2706 namespace llvm { 2707 bool isSafeToExpand(const SCEV *S, ScalarEvolution &SE) { 2708 SCEVFindUnsafe Search(SE); 2709 visitAll(S, Search); 2710 return !Search.IsUnsafe; 2711 } 2712 2713 bool isSafeToExpandAt(const SCEV *S, const Instruction *InsertionPoint, 2714 ScalarEvolution &SE) { 2715 if (!isSafeToExpand(S, SE)) 2716 return false; 2717 // We have to prove that the expanded site of S dominates InsertionPoint. 2718 // This is easy when not in the same block, but hard when S is an instruction 2719 // to be expanded somewhere inside the same block as our insertion point. 2720 // What we really need here is something analogous to an OrderedBasicBlock, 2721 // but for the moment, we paper over the problem by handling two common and 2722 // cheap to check cases. 2723 if (SE.properlyDominates(S, InsertionPoint->getParent())) 2724 return true; 2725 if (SE.dominates(S, InsertionPoint->getParent())) { 2726 if (InsertionPoint->getParent()->getTerminator() == InsertionPoint) 2727 return true; 2728 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) 2729 if (llvm::is_contained(InsertionPoint->operand_values(), U->getValue())) 2730 return true; 2731 } 2732 return false; 2733 } 2734 2735 void SCEVExpanderCleaner::cleanup() { 2736 // Result is used, nothing to remove. 2737 if (ResultUsed) 2738 return; 2739 2740 auto InsertedInstructions = Expander.getAllInsertedInstructions(); 2741 #ifndef NDEBUG 2742 SmallPtrSet<Instruction *, 8> InsertedSet(InsertedInstructions.begin(), 2743 InsertedInstructions.end()); 2744 (void)InsertedSet; 2745 #endif 2746 // Remove sets with value handles. 2747 Expander.clear(); 2748 2749 // Sort so that earlier instructions do not dominate later instructions. 2750 stable_sort(InsertedInstructions, [this](Instruction *A, Instruction *B) { 2751 return DT.dominates(B, A); 2752 }); 2753 // Remove all inserted instructions. 2754 for (Instruction *I : InsertedInstructions) { 2755 2756 #ifndef NDEBUG 2757 assert(all_of(I->users(), 2758 [&InsertedSet](Value *U) { 2759 return InsertedSet.contains(cast<Instruction>(U)); 2760 }) && 2761 "removed instruction should only be used by instructions inserted " 2762 "during expansion"); 2763 #endif 2764 assert(!I->getType()->isVoidTy() && 2765 "inserted instruction should have non-void types"); 2766 I->replaceAllUsesWith(UndefValue::get(I->getType())); 2767 I->eraseFromParent(); 2768 } 2769 } 2770 } 2771