1 //===- LoopVectorize.cpp - A Loop Vectorizer ------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This is the LLVM loop vectorizer. This pass modifies 'vectorizable' loops 10 // and generates target-independent LLVM-IR. 11 // The vectorizer uses the TargetTransformInfo analysis to estimate the costs 12 // of instructions in order to estimate the profitability of vectorization. 13 // 14 // The loop vectorizer combines consecutive loop iterations into a single 15 // 'wide' iteration. After this transformation the index is incremented 16 // by the SIMD vector width, and not by one. 17 // 18 // This pass has three parts: 19 // 1. The main loop pass that drives the different parts. 20 // 2. LoopVectorizationLegality - A unit that checks for the legality 21 // of the vectorization. 22 // 3. InnerLoopVectorizer - A unit that performs the actual 23 // widening of instructions. 24 // 4. LoopVectorizationCostModel - A unit that checks for the profitability 25 // of vectorization. It decides on the optimal vector width, which 26 // can be one, if vectorization is not profitable. 27 // 28 // There is a development effort going on to migrate loop vectorizer to the 29 // VPlan infrastructure and to introduce outer loop vectorization support (see 30 // docs/Proposal/VectorizationPlan.rst and 31 // http://lists.llvm.org/pipermail/llvm-dev/2017-December/119523.html). For this 32 // purpose, we temporarily introduced the VPlan-native vectorization path: an 33 // alternative vectorization path that is natively implemented on top of the 34 // VPlan infrastructure. See EnableVPlanNativePath for enabling. 35 // 36 //===----------------------------------------------------------------------===// 37 // 38 // The reduction-variable vectorization is based on the paper: 39 // D. Nuzman and R. Henderson. Multi-platform Auto-vectorization. 40 // 41 // Variable uniformity checks are inspired by: 42 // Karrenberg, R. and Hack, S. Whole Function Vectorization. 43 // 44 // The interleaved access vectorization is based on the paper: 45 // Dorit Nuzman, Ira Rosen and Ayal Zaks. Auto-Vectorization of Interleaved 46 // Data for SIMD 47 // 48 // Other ideas/concepts are from: 49 // A. Zaks and D. Nuzman. Autovectorization in GCC-two years later. 50 // 51 // S. Maleki, Y. Gao, M. Garzaran, T. Wong and D. Padua. An Evaluation of 52 // Vectorizing Compilers. 53 // 54 //===----------------------------------------------------------------------===// 55 56 #include "llvm/Transforms/Vectorize/LoopVectorize.h" 57 #include "LoopVectorizationPlanner.h" 58 #include "VPRecipeBuilder.h" 59 #include "VPlan.h" 60 #include "VPlanHCFGBuilder.h" 61 #include "VPlanPredicator.h" 62 #include "VPlanTransforms.h" 63 #include "llvm/ADT/APInt.h" 64 #include "llvm/ADT/ArrayRef.h" 65 #include "llvm/ADT/DenseMap.h" 66 #include "llvm/ADT/DenseMapInfo.h" 67 #include "llvm/ADT/Hashing.h" 68 #include "llvm/ADT/MapVector.h" 69 #include "llvm/ADT/None.h" 70 #include "llvm/ADT/Optional.h" 71 #include "llvm/ADT/STLExtras.h" 72 #include "llvm/ADT/SmallPtrSet.h" 73 #include "llvm/ADT/SmallSet.h" 74 #include "llvm/ADT/SmallVector.h" 75 #include "llvm/ADT/Statistic.h" 76 #include "llvm/ADT/StringRef.h" 77 #include "llvm/ADT/Twine.h" 78 #include "llvm/ADT/iterator_range.h" 79 #include "llvm/Analysis/AssumptionCache.h" 80 #include "llvm/Analysis/BasicAliasAnalysis.h" 81 #include "llvm/Analysis/BlockFrequencyInfo.h" 82 #include "llvm/Analysis/CFG.h" 83 #include "llvm/Analysis/CodeMetrics.h" 84 #include "llvm/Analysis/DemandedBits.h" 85 #include "llvm/Analysis/GlobalsModRef.h" 86 #include "llvm/Analysis/LoopAccessAnalysis.h" 87 #include "llvm/Analysis/LoopAnalysisManager.h" 88 #include "llvm/Analysis/LoopInfo.h" 89 #include "llvm/Analysis/LoopIterator.h" 90 #include "llvm/Analysis/MemorySSA.h" 91 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 92 #include "llvm/Analysis/ProfileSummaryInfo.h" 93 #include "llvm/Analysis/ScalarEvolution.h" 94 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 95 #include "llvm/Analysis/TargetLibraryInfo.h" 96 #include "llvm/Analysis/TargetTransformInfo.h" 97 #include "llvm/Analysis/VectorUtils.h" 98 #include "llvm/IR/Attributes.h" 99 #include "llvm/IR/BasicBlock.h" 100 #include "llvm/IR/CFG.h" 101 #include "llvm/IR/Constant.h" 102 #include "llvm/IR/Constants.h" 103 #include "llvm/IR/DataLayout.h" 104 #include "llvm/IR/DebugInfoMetadata.h" 105 #include "llvm/IR/DebugLoc.h" 106 #include "llvm/IR/DerivedTypes.h" 107 #include "llvm/IR/DiagnosticInfo.h" 108 #include "llvm/IR/Dominators.h" 109 #include "llvm/IR/Function.h" 110 #include "llvm/IR/IRBuilder.h" 111 #include "llvm/IR/InstrTypes.h" 112 #include "llvm/IR/Instruction.h" 113 #include "llvm/IR/Instructions.h" 114 #include "llvm/IR/IntrinsicInst.h" 115 #include "llvm/IR/Intrinsics.h" 116 #include "llvm/IR/LLVMContext.h" 117 #include "llvm/IR/Metadata.h" 118 #include "llvm/IR/Module.h" 119 #include "llvm/IR/Operator.h" 120 #include "llvm/IR/PatternMatch.h" 121 #include "llvm/IR/Type.h" 122 #include "llvm/IR/Use.h" 123 #include "llvm/IR/User.h" 124 #include "llvm/IR/Value.h" 125 #include "llvm/IR/ValueHandle.h" 126 #include "llvm/IR/Verifier.h" 127 #include "llvm/InitializePasses.h" 128 #include "llvm/Pass.h" 129 #include "llvm/Support/Casting.h" 130 #include "llvm/Support/CommandLine.h" 131 #include "llvm/Support/Compiler.h" 132 #include "llvm/Support/Debug.h" 133 #include "llvm/Support/ErrorHandling.h" 134 #include "llvm/Support/InstructionCost.h" 135 #include "llvm/Support/MathExtras.h" 136 #include "llvm/Support/raw_ostream.h" 137 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 138 #include "llvm/Transforms/Utils/InjectTLIMappings.h" 139 #include "llvm/Transforms/Utils/LoopSimplify.h" 140 #include "llvm/Transforms/Utils/LoopUtils.h" 141 #include "llvm/Transforms/Utils/LoopVersioning.h" 142 #include "llvm/Transforms/Utils/ScalarEvolutionExpander.h" 143 #include "llvm/Transforms/Utils/SizeOpts.h" 144 #include "llvm/Transforms/Vectorize/LoopVectorizationLegality.h" 145 #include <algorithm> 146 #include <cassert> 147 #include <cstdint> 148 #include <cstdlib> 149 #include <functional> 150 #include <iterator> 151 #include <limits> 152 #include <memory> 153 #include <string> 154 #include <tuple> 155 #include <utility> 156 157 using namespace llvm; 158 159 #define LV_NAME "loop-vectorize" 160 #define DEBUG_TYPE LV_NAME 161 162 #ifndef NDEBUG 163 const char VerboseDebug[] = DEBUG_TYPE "-verbose"; 164 #endif 165 166 /// @{ 167 /// Metadata attribute names 168 const char LLVMLoopVectorizeFollowupAll[] = "llvm.loop.vectorize.followup_all"; 169 const char LLVMLoopVectorizeFollowupVectorized[] = 170 "llvm.loop.vectorize.followup_vectorized"; 171 const char LLVMLoopVectorizeFollowupEpilogue[] = 172 "llvm.loop.vectorize.followup_epilogue"; 173 /// @} 174 175 STATISTIC(LoopsVectorized, "Number of loops vectorized"); 176 STATISTIC(LoopsAnalyzed, "Number of loops analyzed for vectorization"); 177 STATISTIC(LoopsEpilogueVectorized, "Number of epilogues vectorized"); 178 179 static cl::opt<bool> EnableEpilogueVectorization( 180 "enable-epilogue-vectorization", cl::init(true), cl::Hidden, 181 cl::desc("Enable vectorization of epilogue loops.")); 182 183 static cl::opt<unsigned> EpilogueVectorizationForceVF( 184 "epilogue-vectorization-force-VF", cl::init(1), cl::Hidden, 185 cl::desc("When epilogue vectorization is enabled, and a value greater than " 186 "1 is specified, forces the given VF for all applicable epilogue " 187 "loops.")); 188 189 static cl::opt<unsigned> EpilogueVectorizationMinVF( 190 "epilogue-vectorization-minimum-VF", cl::init(16), cl::Hidden, 191 cl::desc("Only loops with vectorization factor equal to or larger than " 192 "the specified value are considered for epilogue vectorization.")); 193 194 /// Loops with a known constant trip count below this number are vectorized only 195 /// if no scalar iteration overheads are incurred. 196 static cl::opt<unsigned> TinyTripCountVectorThreshold( 197 "vectorizer-min-trip-count", cl::init(16), cl::Hidden, 198 cl::desc("Loops with a constant trip count that is smaller than this " 199 "value are vectorized only if no scalar iteration overheads " 200 "are incurred.")); 201 202 static cl::opt<unsigned> PragmaVectorizeMemoryCheckThreshold( 203 "pragma-vectorize-memory-check-threshold", cl::init(128), cl::Hidden, 204 cl::desc("The maximum allowed number of runtime memory checks with a " 205 "vectorize(enable) pragma.")); 206 207 // Option prefer-predicate-over-epilogue indicates that an epilogue is undesired, 208 // that predication is preferred, and this lists all options. I.e., the 209 // vectorizer will try to fold the tail-loop (epilogue) into the vector body 210 // and predicate the instructions accordingly. If tail-folding fails, there are 211 // different fallback strategies depending on these values: 212 namespace PreferPredicateTy { 213 enum Option { 214 ScalarEpilogue = 0, 215 PredicateElseScalarEpilogue, 216 PredicateOrDontVectorize 217 }; 218 } // namespace PreferPredicateTy 219 220 static cl::opt<PreferPredicateTy::Option> PreferPredicateOverEpilogue( 221 "prefer-predicate-over-epilogue", 222 cl::init(PreferPredicateTy::ScalarEpilogue), 223 cl::Hidden, 224 cl::desc("Tail-folding and predication preferences over creating a scalar " 225 "epilogue loop."), 226 cl::values(clEnumValN(PreferPredicateTy::ScalarEpilogue, 227 "scalar-epilogue", 228 "Don't tail-predicate loops, create scalar epilogue"), 229 clEnumValN(PreferPredicateTy::PredicateElseScalarEpilogue, 230 "predicate-else-scalar-epilogue", 231 "prefer tail-folding, create scalar epilogue if tail " 232 "folding fails."), 233 clEnumValN(PreferPredicateTy::PredicateOrDontVectorize, 234 "predicate-dont-vectorize", 235 "prefers tail-folding, don't attempt vectorization if " 236 "tail-folding fails."))); 237 238 static cl::opt<bool> MaximizeBandwidth( 239 "vectorizer-maximize-bandwidth", cl::init(false), cl::Hidden, 240 cl::desc("Maximize bandwidth when selecting vectorization factor which " 241 "will be determined by the smallest type in loop.")); 242 243 static cl::opt<bool> EnableInterleavedMemAccesses( 244 "enable-interleaved-mem-accesses", cl::init(false), cl::Hidden, 245 cl::desc("Enable vectorization on interleaved memory accesses in a loop")); 246 247 /// An interleave-group may need masking if it resides in a block that needs 248 /// predication, or in order to mask away gaps. 249 static cl::opt<bool> EnableMaskedInterleavedMemAccesses( 250 "enable-masked-interleaved-mem-accesses", cl::init(false), cl::Hidden, 251 cl::desc("Enable vectorization on masked interleaved memory accesses in a loop")); 252 253 static cl::opt<unsigned> TinyTripCountInterleaveThreshold( 254 "tiny-trip-count-interleave-threshold", cl::init(128), cl::Hidden, 255 cl::desc("We don't interleave loops with a estimated constant trip count " 256 "below this number")); 257 258 static cl::opt<unsigned> ForceTargetNumScalarRegs( 259 "force-target-num-scalar-regs", cl::init(0), cl::Hidden, 260 cl::desc("A flag that overrides the target's number of scalar registers.")); 261 262 static cl::opt<unsigned> ForceTargetNumVectorRegs( 263 "force-target-num-vector-regs", cl::init(0), cl::Hidden, 264 cl::desc("A flag that overrides the target's number of vector registers.")); 265 266 static cl::opt<unsigned> ForceTargetMaxScalarInterleaveFactor( 267 "force-target-max-scalar-interleave", cl::init(0), cl::Hidden, 268 cl::desc("A flag that overrides the target's max interleave factor for " 269 "scalar loops.")); 270 271 static cl::opt<unsigned> ForceTargetMaxVectorInterleaveFactor( 272 "force-target-max-vector-interleave", cl::init(0), cl::Hidden, 273 cl::desc("A flag that overrides the target's max interleave factor for " 274 "vectorized loops.")); 275 276 static cl::opt<unsigned> ForceTargetInstructionCost( 277 "force-target-instruction-cost", cl::init(0), cl::Hidden, 278 cl::desc("A flag that overrides the target's expected cost for " 279 "an instruction to a single constant value. Mostly " 280 "useful for getting consistent testing.")); 281 282 static cl::opt<bool> ForceTargetSupportsScalableVectors( 283 "force-target-supports-scalable-vectors", cl::init(false), cl::Hidden, 284 cl::desc( 285 "Pretend that scalable vectors are supported, even if the target does " 286 "not support them. This flag should only be used for testing.")); 287 288 static cl::opt<unsigned> SmallLoopCost( 289 "small-loop-cost", cl::init(20), cl::Hidden, 290 cl::desc( 291 "The cost of a loop that is considered 'small' by the interleaver.")); 292 293 static cl::opt<bool> LoopVectorizeWithBlockFrequency( 294 "loop-vectorize-with-block-frequency", cl::init(true), cl::Hidden, 295 cl::desc("Enable the use of the block frequency analysis to access PGO " 296 "heuristics minimizing code growth in cold regions and being more " 297 "aggressive in hot regions.")); 298 299 // Runtime interleave loops for load/store throughput. 300 static cl::opt<bool> EnableLoadStoreRuntimeInterleave( 301 "enable-loadstore-runtime-interleave", cl::init(true), cl::Hidden, 302 cl::desc( 303 "Enable runtime interleaving until load/store ports are saturated")); 304 305 /// Interleave small loops with scalar reductions. 306 static cl::opt<bool> InterleaveSmallLoopScalarReduction( 307 "interleave-small-loop-scalar-reduction", cl::init(false), cl::Hidden, 308 cl::desc("Enable interleaving for loops with small iteration counts that " 309 "contain scalar reductions to expose ILP.")); 310 311 /// The number of stores in a loop that are allowed to need predication. 312 static cl::opt<unsigned> NumberOfStoresToPredicate( 313 "vectorize-num-stores-pred", cl::init(1), cl::Hidden, 314 cl::desc("Max number of stores to be predicated behind an if.")); 315 316 static cl::opt<bool> EnableIndVarRegisterHeur( 317 "enable-ind-var-reg-heur", cl::init(true), cl::Hidden, 318 cl::desc("Count the induction variable only once when interleaving")); 319 320 static cl::opt<bool> EnableCondStoresVectorization( 321 "enable-cond-stores-vec", cl::init(true), cl::Hidden, 322 cl::desc("Enable if predication of stores during vectorization.")); 323 324 static cl::opt<unsigned> MaxNestedScalarReductionIC( 325 "max-nested-scalar-reduction-interleave", cl::init(2), cl::Hidden, 326 cl::desc("The maximum interleave count to use when interleaving a scalar " 327 "reduction in a nested loop.")); 328 329 static cl::opt<bool> 330 PreferInLoopReductions("prefer-inloop-reductions", cl::init(false), 331 cl::Hidden, 332 cl::desc("Prefer in-loop vector reductions, " 333 "overriding the targets preference.")); 334 335 cl::opt<bool> EnableStrictReductions( 336 "enable-strict-reductions", cl::init(false), cl::Hidden, 337 cl::desc("Enable the vectorisation of loops with in-order (strict) " 338 "FP reductions")); 339 340 static cl::opt<bool> PreferPredicatedReductionSelect( 341 "prefer-predicated-reduction-select", cl::init(false), cl::Hidden, 342 cl::desc( 343 "Prefer predicating a reduction operation over an after loop select.")); 344 345 cl::opt<bool> EnableVPlanNativePath( 346 "enable-vplan-native-path", cl::init(false), cl::Hidden, 347 cl::desc("Enable VPlan-native vectorization path with " 348 "support for outer loop vectorization.")); 349 350 // FIXME: Remove this switch once we have divergence analysis. Currently we 351 // assume divergent non-backedge branches when this switch is true. 352 cl::opt<bool> EnableVPlanPredication( 353 "enable-vplan-predication", cl::init(false), cl::Hidden, 354 cl::desc("Enable VPlan-native vectorization path predicator with " 355 "support for outer loop vectorization.")); 356 357 // This flag enables the stress testing of the VPlan H-CFG construction in the 358 // VPlan-native vectorization path. It must be used in conjuction with 359 // -enable-vplan-native-path. -vplan-verify-hcfg can also be used to enable the 360 // verification of the H-CFGs built. 361 static cl::opt<bool> VPlanBuildStressTest( 362 "vplan-build-stress-test", cl::init(false), cl::Hidden, 363 cl::desc( 364 "Build VPlan for every supported loop nest in the function and bail " 365 "out right after the build (stress test the VPlan H-CFG construction " 366 "in the VPlan-native vectorization path).")); 367 368 cl::opt<bool> llvm::EnableLoopInterleaving( 369 "interleave-loops", cl::init(true), cl::Hidden, 370 cl::desc("Enable loop interleaving in Loop vectorization passes")); 371 cl::opt<bool> llvm::EnableLoopVectorization( 372 "vectorize-loops", cl::init(true), cl::Hidden, 373 cl::desc("Run the Loop vectorization passes")); 374 375 cl::opt<bool> PrintVPlansInDotFormat( 376 "vplan-print-in-dot-format", cl::init(false), cl::Hidden, 377 cl::desc("Use dot format instead of plain text when dumping VPlans")); 378 379 /// A helper function that returns true if the given type is irregular. The 380 /// type is irregular if its allocated size doesn't equal the store size of an 381 /// element of the corresponding vector type. 382 static bool hasIrregularType(Type *Ty, const DataLayout &DL) { 383 // Determine if an array of N elements of type Ty is "bitcast compatible" 384 // with a <N x Ty> vector. 385 // This is only true if there is no padding between the array elements. 386 return DL.getTypeAllocSizeInBits(Ty) != DL.getTypeSizeInBits(Ty); 387 } 388 389 /// A helper function that returns the reciprocal of the block probability of 390 /// predicated blocks. If we return X, we are assuming the predicated block 391 /// will execute once for every X iterations of the loop header. 392 /// 393 /// TODO: We should use actual block probability here, if available. Currently, 394 /// we always assume predicated blocks have a 50% chance of executing. 395 static unsigned getReciprocalPredBlockProb() { return 2; } 396 397 /// A helper function that returns an integer or floating-point constant with 398 /// value C. 399 static Constant *getSignedIntOrFpConstant(Type *Ty, int64_t C) { 400 return Ty->isIntegerTy() ? ConstantInt::getSigned(Ty, C) 401 : ConstantFP::get(Ty, C); 402 } 403 404 /// Returns "best known" trip count for the specified loop \p L as defined by 405 /// the following procedure: 406 /// 1) Returns exact trip count if it is known. 407 /// 2) Returns expected trip count according to profile data if any. 408 /// 3) Returns upper bound estimate if it is known. 409 /// 4) Returns None if all of the above failed. 410 static Optional<unsigned> getSmallBestKnownTC(ScalarEvolution &SE, Loop *L) { 411 // Check if exact trip count is known. 412 if (unsigned ExpectedTC = SE.getSmallConstantTripCount(L)) 413 return ExpectedTC; 414 415 // Check if there is an expected trip count available from profile data. 416 if (LoopVectorizeWithBlockFrequency) 417 if (auto EstimatedTC = getLoopEstimatedTripCount(L)) 418 return EstimatedTC; 419 420 // Check if upper bound estimate is known. 421 if (unsigned ExpectedTC = SE.getSmallConstantMaxTripCount(L)) 422 return ExpectedTC; 423 424 return None; 425 } 426 427 // Forward declare GeneratedRTChecks. 428 class GeneratedRTChecks; 429 430 namespace llvm { 431 432 /// InnerLoopVectorizer vectorizes loops which contain only one basic 433 /// block to a specified vectorization factor (VF). 434 /// This class performs the widening of scalars into vectors, or multiple 435 /// scalars. This class also implements the following features: 436 /// * It inserts an epilogue loop for handling loops that don't have iteration 437 /// counts that are known to be a multiple of the vectorization factor. 438 /// * It handles the code generation for reduction variables. 439 /// * Scalarization (implementation using scalars) of un-vectorizable 440 /// instructions. 441 /// InnerLoopVectorizer does not perform any vectorization-legality 442 /// checks, and relies on the caller to check for the different legality 443 /// aspects. The InnerLoopVectorizer relies on the 444 /// LoopVectorizationLegality class to provide information about the induction 445 /// and reduction variables that were found to a given vectorization factor. 446 class InnerLoopVectorizer { 447 public: 448 InnerLoopVectorizer(Loop *OrigLoop, PredicatedScalarEvolution &PSE, 449 LoopInfo *LI, DominatorTree *DT, 450 const TargetLibraryInfo *TLI, 451 const TargetTransformInfo *TTI, AssumptionCache *AC, 452 OptimizationRemarkEmitter *ORE, ElementCount VecWidth, 453 unsigned UnrollFactor, LoopVectorizationLegality *LVL, 454 LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI, 455 ProfileSummaryInfo *PSI, GeneratedRTChecks &RTChecks) 456 : OrigLoop(OrigLoop), PSE(PSE), LI(LI), DT(DT), TLI(TLI), TTI(TTI), 457 AC(AC), ORE(ORE), VF(VecWidth), UF(UnrollFactor), 458 Builder(PSE.getSE()->getContext()), Legal(LVL), Cost(CM), BFI(BFI), 459 PSI(PSI), RTChecks(RTChecks) { 460 // Query this against the original loop and save it here because the profile 461 // of the original loop header may change as the transformation happens. 462 OptForSizeBasedOnProfile = llvm::shouldOptimizeForSize( 463 OrigLoop->getHeader(), PSI, BFI, PGSOQueryType::IRPass); 464 } 465 466 virtual ~InnerLoopVectorizer() = default; 467 468 /// Create a new empty loop that will contain vectorized instructions later 469 /// on, while the old loop will be used as the scalar remainder. Control flow 470 /// is generated around the vectorized (and scalar epilogue) loops consisting 471 /// of various checks and bypasses. Return the pre-header block of the new 472 /// loop. 473 /// In the case of epilogue vectorization, this function is overriden to 474 /// handle the more complex control flow around the loops. 475 virtual BasicBlock *createVectorizedLoopSkeleton(); 476 477 /// Widen a single instruction within the innermost loop. 478 void widenInstruction(Instruction &I, VPValue *Def, VPUser &Operands, 479 VPTransformState &State); 480 481 /// Widen a single call instruction within the innermost loop. 482 void widenCallInstruction(CallInst &I, VPValue *Def, VPUser &ArgOperands, 483 VPTransformState &State); 484 485 /// Widen a single select instruction within the innermost loop. 486 void widenSelectInstruction(SelectInst &I, VPValue *VPDef, VPUser &Operands, 487 bool InvariantCond, VPTransformState &State); 488 489 /// Fix the vectorized code, taking care of header phi's, live-outs, and more. 490 void fixVectorizedLoop(VPTransformState &State); 491 492 // Return true if any runtime check is added. 493 bool areSafetyChecksAdded() { return AddedSafetyChecks; } 494 495 /// A type for vectorized values in the new loop. Each value from the 496 /// original loop, when vectorized, is represented by UF vector values in the 497 /// new unrolled loop, where UF is the unroll factor. 498 using VectorParts = SmallVector<Value *, 2>; 499 500 /// Vectorize a single GetElementPtrInst based on information gathered and 501 /// decisions taken during planning. 502 void widenGEP(GetElementPtrInst *GEP, VPValue *VPDef, VPUser &Indices, 503 unsigned UF, ElementCount VF, bool IsPtrLoopInvariant, 504 SmallBitVector &IsIndexLoopInvariant, VPTransformState &State); 505 506 /// Vectorize a single first-order recurrence or pointer induction PHINode in 507 /// a block. This method handles the induction variable canonicalization. It 508 /// supports both VF = 1 for unrolled loops and arbitrary length vectors. 509 void widenPHIInstruction(Instruction *PN, VPWidenPHIRecipe *PhiR, 510 VPTransformState &State); 511 512 /// A helper function to scalarize a single Instruction in the innermost loop. 513 /// Generates a sequence of scalar instances for each lane between \p MinLane 514 /// and \p MaxLane, times each part between \p MinPart and \p MaxPart, 515 /// inclusive. Uses the VPValue operands from \p Operands instead of \p 516 /// Instr's operands. 517 void scalarizeInstruction(Instruction *Instr, VPValue *Def, VPUser &Operands, 518 const VPIteration &Instance, bool IfPredicateInstr, 519 VPTransformState &State); 520 521 /// Widen an integer or floating-point induction variable \p IV. If \p Trunc 522 /// is provided, the integer induction variable will first be truncated to 523 /// the corresponding type. 524 void widenIntOrFpInduction(PHINode *IV, Value *Start, TruncInst *Trunc, 525 VPValue *Def, VPValue *CastDef, 526 VPTransformState &State); 527 528 /// Construct the vector value of a scalarized value \p V one lane at a time. 529 void packScalarIntoVectorValue(VPValue *Def, const VPIteration &Instance, 530 VPTransformState &State); 531 532 /// Try to vectorize interleaved access group \p Group with the base address 533 /// given in \p Addr, optionally masking the vector operations if \p 534 /// BlockInMask is non-null. Use \p State to translate given VPValues to IR 535 /// values in the vectorized loop. 536 void vectorizeInterleaveGroup(const InterleaveGroup<Instruction> *Group, 537 ArrayRef<VPValue *> VPDefs, 538 VPTransformState &State, VPValue *Addr, 539 ArrayRef<VPValue *> StoredValues, 540 VPValue *BlockInMask = nullptr); 541 542 /// Vectorize Load and Store instructions with the base address given in \p 543 /// Addr, optionally masking the vector operations if \p BlockInMask is 544 /// non-null. Use \p State to translate given VPValues to IR values in the 545 /// vectorized loop. 546 void vectorizeMemoryInstruction(Instruction *Instr, VPTransformState &State, 547 VPValue *Def, VPValue *Addr, 548 VPValue *StoredValue, VPValue *BlockInMask); 549 550 /// Set the debug location in the builder \p Ptr using the debug location in 551 /// \p V. If \p Ptr is None then it uses the class member's Builder. 552 void setDebugLocFromInst(const Value *V, 553 Optional<IRBuilder<> *> CustomBuilder = None); 554 555 /// Fix the non-induction PHIs in the OrigPHIsToFix vector. 556 void fixNonInductionPHIs(VPTransformState &State); 557 558 /// Returns true if the reordering of FP operations is not allowed, but we are 559 /// able to vectorize with strict in-order reductions for the given RdxDesc. 560 bool useOrderedReductions(RecurrenceDescriptor &RdxDesc); 561 562 /// Create a broadcast instruction. This method generates a broadcast 563 /// instruction (shuffle) for loop invariant values and for the induction 564 /// value. If this is the induction variable then we extend it to N, N+1, ... 565 /// this is needed because each iteration in the loop corresponds to a SIMD 566 /// element. 567 virtual Value *getBroadcastInstrs(Value *V); 568 569 protected: 570 friend class LoopVectorizationPlanner; 571 572 /// A small list of PHINodes. 573 using PhiVector = SmallVector<PHINode *, 4>; 574 575 /// A type for scalarized values in the new loop. Each value from the 576 /// original loop, when scalarized, is represented by UF x VF scalar values 577 /// in the new unrolled loop, where UF is the unroll factor and VF is the 578 /// vectorization factor. 579 using ScalarParts = SmallVector<SmallVector<Value *, 4>, 2>; 580 581 /// Set up the values of the IVs correctly when exiting the vector loop. 582 void fixupIVUsers(PHINode *OrigPhi, const InductionDescriptor &II, 583 Value *CountRoundDown, Value *EndValue, 584 BasicBlock *MiddleBlock); 585 586 /// Create a new induction variable inside L. 587 PHINode *createInductionVariable(Loop *L, Value *Start, Value *End, 588 Value *Step, Instruction *DL); 589 590 /// Handle all cross-iteration phis in the header. 591 void fixCrossIterationPHIs(VPTransformState &State); 592 593 /// Fix a first-order recurrence. This is the second phase of vectorizing 594 /// this phi node. 595 void fixFirstOrderRecurrence(VPWidenPHIRecipe *PhiR, VPTransformState &State); 596 597 /// Fix a reduction cross-iteration phi. This is the second phase of 598 /// vectorizing this phi node. 599 void fixReduction(VPReductionPHIRecipe *Phi, VPTransformState &State); 600 601 /// Clear NSW/NUW flags from reduction instructions if necessary. 602 void clearReductionWrapFlags(const RecurrenceDescriptor &RdxDesc, 603 VPTransformState &State); 604 605 /// Fixup the LCSSA phi nodes in the unique exit block. This simply 606 /// means we need to add the appropriate incoming value from the middle 607 /// block as exiting edges from the scalar epilogue loop (if present) are 608 /// already in place, and we exit the vector loop exclusively to the middle 609 /// block. 610 void fixLCSSAPHIs(VPTransformState &State); 611 612 /// Iteratively sink the scalarized operands of a predicated instruction into 613 /// the block that was created for it. 614 void sinkScalarOperands(Instruction *PredInst); 615 616 /// Shrinks vector element sizes to the smallest bitwidth they can be legally 617 /// represented as. 618 void truncateToMinimalBitwidths(VPTransformState &State); 619 620 /// This function adds 621 /// (StartIdx * Step, (StartIdx + 1) * Step, (StartIdx + 2) * Step, ...) 622 /// to each vector element of Val. The sequence starts at StartIndex. 623 /// \p Opcode is relevant for FP induction variable. 624 virtual Value *getStepVector(Value *Val, int StartIdx, Value *Step, 625 Instruction::BinaryOps Opcode = 626 Instruction::BinaryOpsEnd); 627 628 /// Compute scalar induction steps. \p ScalarIV is the scalar induction 629 /// variable on which to base the steps, \p Step is the size of the step, and 630 /// \p EntryVal is the value from the original loop that maps to the steps. 631 /// Note that \p EntryVal doesn't have to be an induction variable - it 632 /// can also be a truncate instruction. 633 void buildScalarSteps(Value *ScalarIV, Value *Step, Instruction *EntryVal, 634 const InductionDescriptor &ID, VPValue *Def, 635 VPValue *CastDef, VPTransformState &State); 636 637 /// Create a vector induction phi node based on an existing scalar one. \p 638 /// EntryVal is the value from the original loop that maps to the vector phi 639 /// node, and \p Step is the loop-invariant step. If \p EntryVal is a 640 /// truncate instruction, instead of widening the original IV, we widen a 641 /// version of the IV truncated to \p EntryVal's type. 642 void createVectorIntOrFpInductionPHI(const InductionDescriptor &II, 643 Value *Step, Value *Start, 644 Instruction *EntryVal, VPValue *Def, 645 VPValue *CastDef, 646 VPTransformState &State); 647 648 /// Returns true if an instruction \p I should be scalarized instead of 649 /// vectorized for the chosen vectorization factor. 650 bool shouldScalarizeInstruction(Instruction *I) const; 651 652 /// Returns true if we should generate a scalar version of \p IV. 653 bool needsScalarInduction(Instruction *IV) const; 654 655 /// If there is a cast involved in the induction variable \p ID, which should 656 /// be ignored in the vectorized loop body, this function records the 657 /// VectorLoopValue of the respective Phi also as the VectorLoopValue of the 658 /// cast. We had already proved that the casted Phi is equal to the uncasted 659 /// Phi in the vectorized loop (under a runtime guard), and therefore 660 /// there is no need to vectorize the cast - the same value can be used in the 661 /// vector loop for both the Phi and the cast. 662 /// If \p VectorLoopValue is a scalarized value, \p Lane is also specified, 663 /// Otherwise, \p VectorLoopValue is a widened/vectorized value. 664 /// 665 /// \p EntryVal is the value from the original loop that maps to the vector 666 /// phi node and is used to distinguish what is the IV currently being 667 /// processed - original one (if \p EntryVal is a phi corresponding to the 668 /// original IV) or the "newly-created" one based on the proof mentioned above 669 /// (see also buildScalarSteps() and createVectorIntOrFPInductionPHI()). In the 670 /// latter case \p EntryVal is a TruncInst and we must not record anything for 671 /// that IV, but it's error-prone to expect callers of this routine to care 672 /// about that, hence this explicit parameter. 673 void recordVectorLoopValueForInductionCast( 674 const InductionDescriptor &ID, const Instruction *EntryVal, 675 Value *VectorLoopValue, VPValue *CastDef, VPTransformState &State, 676 unsigned Part, unsigned Lane = UINT_MAX); 677 678 /// Generate a shuffle sequence that will reverse the vector Vec. 679 virtual Value *reverseVector(Value *Vec); 680 681 /// Returns (and creates if needed) the original loop trip count. 682 Value *getOrCreateTripCount(Loop *NewLoop); 683 684 /// Returns (and creates if needed) the trip count of the widened loop. 685 Value *getOrCreateVectorTripCount(Loop *NewLoop); 686 687 /// Returns a bitcasted value to the requested vector type. 688 /// Also handles bitcasts of vector<float> <-> vector<pointer> types. 689 Value *createBitOrPointerCast(Value *V, VectorType *DstVTy, 690 const DataLayout &DL); 691 692 /// Emit a bypass check to see if the vector trip count is zero, including if 693 /// it overflows. 694 void emitMinimumIterationCountCheck(Loop *L, BasicBlock *Bypass); 695 696 /// Emit a bypass check to see if all of the SCEV assumptions we've 697 /// had to make are correct. Returns the block containing the checks or 698 /// nullptr if no checks have been added. 699 BasicBlock *emitSCEVChecks(Loop *L, BasicBlock *Bypass); 700 701 /// Emit bypass checks to check any memory assumptions we may have made. 702 /// Returns the block containing the checks or nullptr if no checks have been 703 /// added. 704 BasicBlock *emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass); 705 706 /// Compute the transformed value of Index at offset StartValue using step 707 /// StepValue. 708 /// For integer induction, returns StartValue + Index * StepValue. 709 /// For pointer induction, returns StartValue[Index * StepValue]. 710 /// FIXME: The newly created binary instructions should contain nsw/nuw 711 /// flags, which can be found from the original scalar operations. 712 Value *emitTransformedIndex(IRBuilder<> &B, Value *Index, ScalarEvolution *SE, 713 const DataLayout &DL, 714 const InductionDescriptor &ID) const; 715 716 /// Emit basic blocks (prefixed with \p Prefix) for the iteration check, 717 /// vector loop preheader, middle block and scalar preheader. Also 718 /// allocate a loop object for the new vector loop and return it. 719 Loop *createVectorLoopSkeleton(StringRef Prefix); 720 721 /// Create new phi nodes for the induction variables to resume iteration count 722 /// in the scalar epilogue, from where the vectorized loop left off (given by 723 /// \p VectorTripCount). 724 /// In cases where the loop skeleton is more complicated (eg. epilogue 725 /// vectorization) and the resume values can come from an additional bypass 726 /// block, the \p AdditionalBypass pair provides information about the bypass 727 /// block and the end value on the edge from bypass to this loop. 728 void createInductionResumeValues( 729 Loop *L, Value *VectorTripCount, 730 std::pair<BasicBlock *, Value *> AdditionalBypass = {nullptr, nullptr}); 731 732 /// Complete the loop skeleton by adding debug MDs, creating appropriate 733 /// conditional branches in the middle block, preparing the builder and 734 /// running the verifier. Take in the vector loop \p L as argument, and return 735 /// the preheader of the completed vector loop. 736 BasicBlock *completeLoopSkeleton(Loop *L, MDNode *OrigLoopID); 737 738 /// Add additional metadata to \p To that was not present on \p Orig. 739 /// 740 /// Currently this is used to add the noalias annotations based on the 741 /// inserted memchecks. Use this for instructions that are *cloned* into the 742 /// vector loop. 743 void addNewMetadata(Instruction *To, const Instruction *Orig); 744 745 /// Add metadata from one instruction to another. 746 /// 747 /// This includes both the original MDs from \p From and additional ones (\see 748 /// addNewMetadata). Use this for *newly created* instructions in the vector 749 /// loop. 750 void addMetadata(Instruction *To, Instruction *From); 751 752 /// Similar to the previous function but it adds the metadata to a 753 /// vector of instructions. 754 void addMetadata(ArrayRef<Value *> To, Instruction *From); 755 756 /// Allow subclasses to override and print debug traces before/after vplan 757 /// execution, when trace information is requested. 758 virtual void printDebugTracesAtStart(){}; 759 virtual void printDebugTracesAtEnd(){}; 760 761 /// The original loop. 762 Loop *OrigLoop; 763 764 /// A wrapper around ScalarEvolution used to add runtime SCEV checks. Applies 765 /// dynamic knowledge to simplify SCEV expressions and converts them to a 766 /// more usable form. 767 PredicatedScalarEvolution &PSE; 768 769 /// Loop Info. 770 LoopInfo *LI; 771 772 /// Dominator Tree. 773 DominatorTree *DT; 774 775 /// Alias Analysis. 776 AAResults *AA; 777 778 /// Target Library Info. 779 const TargetLibraryInfo *TLI; 780 781 /// Target Transform Info. 782 const TargetTransformInfo *TTI; 783 784 /// Assumption Cache. 785 AssumptionCache *AC; 786 787 /// Interface to emit optimization remarks. 788 OptimizationRemarkEmitter *ORE; 789 790 /// LoopVersioning. It's only set up (non-null) if memchecks were 791 /// used. 792 /// 793 /// This is currently only used to add no-alias metadata based on the 794 /// memchecks. The actually versioning is performed manually. 795 std::unique_ptr<LoopVersioning> LVer; 796 797 /// The vectorization SIMD factor to use. Each vector will have this many 798 /// vector elements. 799 ElementCount VF; 800 801 /// The vectorization unroll factor to use. Each scalar is vectorized to this 802 /// many different vector instructions. 803 unsigned UF; 804 805 /// The builder that we use 806 IRBuilder<> Builder; 807 808 // --- Vectorization state --- 809 810 /// The vector-loop preheader. 811 BasicBlock *LoopVectorPreHeader; 812 813 /// The scalar-loop preheader. 814 BasicBlock *LoopScalarPreHeader; 815 816 /// Middle Block between the vector and the scalar. 817 BasicBlock *LoopMiddleBlock; 818 819 /// The unique ExitBlock of the scalar loop if one exists. Note that 820 /// there can be multiple exiting edges reaching this block. 821 BasicBlock *LoopExitBlock; 822 823 /// The vector loop body. 824 BasicBlock *LoopVectorBody; 825 826 /// The scalar loop body. 827 BasicBlock *LoopScalarBody; 828 829 /// A list of all bypass blocks. The first block is the entry of the loop. 830 SmallVector<BasicBlock *, 4> LoopBypassBlocks; 831 832 /// The new Induction variable which was added to the new block. 833 PHINode *Induction = nullptr; 834 835 /// The induction variable of the old basic block. 836 PHINode *OldInduction = nullptr; 837 838 /// Store instructions that were predicated. 839 SmallVector<Instruction *, 4> PredicatedInstructions; 840 841 /// Trip count of the original loop. 842 Value *TripCount = nullptr; 843 844 /// Trip count of the widened loop (TripCount - TripCount % (VF*UF)) 845 Value *VectorTripCount = nullptr; 846 847 /// The legality analysis. 848 LoopVectorizationLegality *Legal; 849 850 /// The profitablity analysis. 851 LoopVectorizationCostModel *Cost; 852 853 // Record whether runtime checks are added. 854 bool AddedSafetyChecks = false; 855 856 // Holds the end values for each induction variable. We save the end values 857 // so we can later fix-up the external users of the induction variables. 858 DenseMap<PHINode *, Value *> IVEndValues; 859 860 // Vector of original scalar PHIs whose corresponding widened PHIs need to be 861 // fixed up at the end of vector code generation. 862 SmallVector<PHINode *, 8> OrigPHIsToFix; 863 864 /// BFI and PSI are used to check for profile guided size optimizations. 865 BlockFrequencyInfo *BFI; 866 ProfileSummaryInfo *PSI; 867 868 // Whether this loop should be optimized for size based on profile guided size 869 // optimizatios. 870 bool OptForSizeBasedOnProfile; 871 872 /// Structure to hold information about generated runtime checks, responsible 873 /// for cleaning the checks, if vectorization turns out unprofitable. 874 GeneratedRTChecks &RTChecks; 875 }; 876 877 class InnerLoopUnroller : public InnerLoopVectorizer { 878 public: 879 InnerLoopUnroller(Loop *OrigLoop, PredicatedScalarEvolution &PSE, 880 LoopInfo *LI, DominatorTree *DT, 881 const TargetLibraryInfo *TLI, 882 const TargetTransformInfo *TTI, AssumptionCache *AC, 883 OptimizationRemarkEmitter *ORE, unsigned UnrollFactor, 884 LoopVectorizationLegality *LVL, 885 LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI, 886 ProfileSummaryInfo *PSI, GeneratedRTChecks &Check) 887 : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 888 ElementCount::getFixed(1), UnrollFactor, LVL, CM, 889 BFI, PSI, Check) {} 890 891 private: 892 Value *getBroadcastInstrs(Value *V) override; 893 Value *getStepVector(Value *Val, int StartIdx, Value *Step, 894 Instruction::BinaryOps Opcode = 895 Instruction::BinaryOpsEnd) override; 896 Value *reverseVector(Value *Vec) override; 897 }; 898 899 /// Encapsulate information regarding vectorization of a loop and its epilogue. 900 /// This information is meant to be updated and used across two stages of 901 /// epilogue vectorization. 902 struct EpilogueLoopVectorizationInfo { 903 ElementCount MainLoopVF = ElementCount::getFixed(0); 904 unsigned MainLoopUF = 0; 905 ElementCount EpilogueVF = ElementCount::getFixed(0); 906 unsigned EpilogueUF = 0; 907 BasicBlock *MainLoopIterationCountCheck = nullptr; 908 BasicBlock *EpilogueIterationCountCheck = nullptr; 909 BasicBlock *SCEVSafetyCheck = nullptr; 910 BasicBlock *MemSafetyCheck = nullptr; 911 Value *TripCount = nullptr; 912 Value *VectorTripCount = nullptr; 913 914 EpilogueLoopVectorizationInfo(unsigned MVF, unsigned MUF, unsigned EVF, 915 unsigned EUF) 916 : MainLoopVF(ElementCount::getFixed(MVF)), MainLoopUF(MUF), 917 EpilogueVF(ElementCount::getFixed(EVF)), EpilogueUF(EUF) { 918 assert(EUF == 1 && 919 "A high UF for the epilogue loop is likely not beneficial."); 920 } 921 }; 922 923 /// An extension of the inner loop vectorizer that creates a skeleton for a 924 /// vectorized loop that has its epilogue (residual) also vectorized. 925 /// The idea is to run the vplan on a given loop twice, firstly to setup the 926 /// skeleton and vectorize the main loop, and secondly to complete the skeleton 927 /// from the first step and vectorize the epilogue. This is achieved by 928 /// deriving two concrete strategy classes from this base class and invoking 929 /// them in succession from the loop vectorizer planner. 930 class InnerLoopAndEpilogueVectorizer : public InnerLoopVectorizer { 931 public: 932 InnerLoopAndEpilogueVectorizer( 933 Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, 934 DominatorTree *DT, const TargetLibraryInfo *TLI, 935 const TargetTransformInfo *TTI, AssumptionCache *AC, 936 OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI, 937 LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM, 938 BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI, 939 GeneratedRTChecks &Checks) 940 : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 941 EPI.MainLoopVF, EPI.MainLoopUF, LVL, CM, BFI, PSI, 942 Checks), 943 EPI(EPI) {} 944 945 // Override this function to handle the more complex control flow around the 946 // three loops. 947 BasicBlock *createVectorizedLoopSkeleton() final override { 948 return createEpilogueVectorizedLoopSkeleton(); 949 } 950 951 /// The interface for creating a vectorized skeleton using one of two 952 /// different strategies, each corresponding to one execution of the vplan 953 /// as described above. 954 virtual BasicBlock *createEpilogueVectorizedLoopSkeleton() = 0; 955 956 /// Holds and updates state information required to vectorize the main loop 957 /// and its epilogue in two separate passes. This setup helps us avoid 958 /// regenerating and recomputing runtime safety checks. It also helps us to 959 /// shorten the iteration-count-check path length for the cases where the 960 /// iteration count of the loop is so small that the main vector loop is 961 /// completely skipped. 962 EpilogueLoopVectorizationInfo &EPI; 963 }; 964 965 /// A specialized derived class of inner loop vectorizer that performs 966 /// vectorization of *main* loops in the process of vectorizing loops and their 967 /// epilogues. 968 class EpilogueVectorizerMainLoop : public InnerLoopAndEpilogueVectorizer { 969 public: 970 EpilogueVectorizerMainLoop( 971 Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, 972 DominatorTree *DT, const TargetLibraryInfo *TLI, 973 const TargetTransformInfo *TTI, AssumptionCache *AC, 974 OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI, 975 LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM, 976 BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI, 977 GeneratedRTChecks &Check) 978 : InnerLoopAndEpilogueVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 979 EPI, LVL, CM, BFI, PSI, Check) {} 980 /// Implements the interface for creating a vectorized skeleton using the 981 /// *main loop* strategy (ie the first pass of vplan execution). 982 BasicBlock *createEpilogueVectorizedLoopSkeleton() final override; 983 984 protected: 985 /// Emits an iteration count bypass check once for the main loop (when \p 986 /// ForEpilogue is false) and once for the epilogue loop (when \p 987 /// ForEpilogue is true). 988 BasicBlock *emitMinimumIterationCountCheck(Loop *L, BasicBlock *Bypass, 989 bool ForEpilogue); 990 void printDebugTracesAtStart() override; 991 void printDebugTracesAtEnd() override; 992 }; 993 994 // A specialized derived class of inner loop vectorizer that performs 995 // vectorization of *epilogue* loops in the process of vectorizing loops and 996 // their epilogues. 997 class EpilogueVectorizerEpilogueLoop : public InnerLoopAndEpilogueVectorizer { 998 public: 999 EpilogueVectorizerEpilogueLoop( 1000 Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, 1001 DominatorTree *DT, const TargetLibraryInfo *TLI, 1002 const TargetTransformInfo *TTI, AssumptionCache *AC, 1003 OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI, 1004 LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM, 1005 BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI, 1006 GeneratedRTChecks &Checks) 1007 : InnerLoopAndEpilogueVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 1008 EPI, LVL, CM, BFI, PSI, Checks) {} 1009 /// Implements the interface for creating a vectorized skeleton using the 1010 /// *epilogue loop* strategy (ie the second pass of vplan execution). 1011 BasicBlock *createEpilogueVectorizedLoopSkeleton() final override; 1012 1013 protected: 1014 /// Emits an iteration count bypass check after the main vector loop has 1015 /// finished to see if there are any iterations left to execute by either 1016 /// the vector epilogue or the scalar epilogue. 1017 BasicBlock *emitMinimumVectorEpilogueIterCountCheck(Loop *L, 1018 BasicBlock *Bypass, 1019 BasicBlock *Insert); 1020 void printDebugTracesAtStart() override; 1021 void printDebugTracesAtEnd() override; 1022 }; 1023 } // end namespace llvm 1024 1025 /// Look for a meaningful debug location on the instruction or it's 1026 /// operands. 1027 static Instruction *getDebugLocFromInstOrOperands(Instruction *I) { 1028 if (!I) 1029 return I; 1030 1031 DebugLoc Empty; 1032 if (I->getDebugLoc() != Empty) 1033 return I; 1034 1035 for (Use &Op : I->operands()) { 1036 if (Instruction *OpInst = dyn_cast<Instruction>(Op)) 1037 if (OpInst->getDebugLoc() != Empty) 1038 return OpInst; 1039 } 1040 1041 return I; 1042 } 1043 1044 void InnerLoopVectorizer::setDebugLocFromInst( 1045 const Value *V, Optional<IRBuilder<> *> CustomBuilder) { 1046 IRBuilder<> *B = (CustomBuilder == None) ? &Builder : *CustomBuilder; 1047 if (const Instruction *Inst = dyn_cast_or_null<Instruction>(V)) { 1048 const DILocation *DIL = Inst->getDebugLoc(); 1049 1050 // When a FSDiscriminator is enabled, we don't need to add the multiply 1051 // factors to the discriminators. 1052 if (DIL && Inst->getFunction()->isDebugInfoForProfiling() && 1053 !isa<DbgInfoIntrinsic>(Inst) && !EnableFSDiscriminator) { 1054 // FIXME: For scalable vectors, assume vscale=1. 1055 auto NewDIL = 1056 DIL->cloneByMultiplyingDuplicationFactor(UF * VF.getKnownMinValue()); 1057 if (NewDIL) 1058 B->SetCurrentDebugLocation(NewDIL.getValue()); 1059 else 1060 LLVM_DEBUG(dbgs() 1061 << "Failed to create new discriminator: " 1062 << DIL->getFilename() << " Line: " << DIL->getLine()); 1063 } else 1064 B->SetCurrentDebugLocation(DIL); 1065 } else 1066 B->SetCurrentDebugLocation(DebugLoc()); 1067 } 1068 1069 /// Write a \p DebugMsg about vectorization to the debug output stream. If \p I 1070 /// is passed, the message relates to that particular instruction. 1071 #ifndef NDEBUG 1072 static void debugVectorizationMessage(const StringRef Prefix, 1073 const StringRef DebugMsg, 1074 Instruction *I) { 1075 dbgs() << "LV: " << Prefix << DebugMsg; 1076 if (I != nullptr) 1077 dbgs() << " " << *I; 1078 else 1079 dbgs() << '.'; 1080 dbgs() << '\n'; 1081 } 1082 #endif 1083 1084 /// Create an analysis remark that explains why vectorization failed 1085 /// 1086 /// \p PassName is the name of the pass (e.g. can be AlwaysPrint). \p 1087 /// RemarkName is the identifier for the remark. If \p I is passed it is an 1088 /// instruction that prevents vectorization. Otherwise \p TheLoop is used for 1089 /// the location of the remark. \return the remark object that can be 1090 /// streamed to. 1091 static OptimizationRemarkAnalysis createLVAnalysis(const char *PassName, 1092 StringRef RemarkName, Loop *TheLoop, Instruction *I) { 1093 Value *CodeRegion = TheLoop->getHeader(); 1094 DebugLoc DL = TheLoop->getStartLoc(); 1095 1096 if (I) { 1097 CodeRegion = I->getParent(); 1098 // If there is no debug location attached to the instruction, revert back to 1099 // using the loop's. 1100 if (I->getDebugLoc()) 1101 DL = I->getDebugLoc(); 1102 } 1103 1104 return OptimizationRemarkAnalysis(PassName, RemarkName, DL, CodeRegion); 1105 } 1106 1107 /// Return a value for Step multiplied by VF. 1108 static Value *createStepForVF(IRBuilder<> &B, Constant *Step, ElementCount VF) { 1109 assert(isa<ConstantInt>(Step) && "Expected an integer step"); 1110 Constant *StepVal = ConstantInt::get( 1111 Step->getType(), 1112 cast<ConstantInt>(Step)->getSExtValue() * VF.getKnownMinValue()); 1113 return VF.isScalable() ? B.CreateVScale(StepVal) : StepVal; 1114 } 1115 1116 namespace llvm { 1117 1118 /// Return the runtime value for VF. 1119 Value *getRuntimeVF(IRBuilder<> &B, Type *Ty, ElementCount VF) { 1120 Constant *EC = ConstantInt::get(Ty, VF.getKnownMinValue()); 1121 return VF.isScalable() ? B.CreateVScale(EC) : EC; 1122 } 1123 1124 void reportVectorizationFailure(const StringRef DebugMsg, 1125 const StringRef OREMsg, const StringRef ORETag, 1126 OptimizationRemarkEmitter *ORE, Loop *TheLoop, 1127 Instruction *I) { 1128 LLVM_DEBUG(debugVectorizationMessage("Not vectorizing: ", DebugMsg, I)); 1129 LoopVectorizeHints Hints(TheLoop, true /* doesn't matter */, *ORE); 1130 ORE->emit( 1131 createLVAnalysis(Hints.vectorizeAnalysisPassName(), ORETag, TheLoop, I) 1132 << "loop not vectorized: " << OREMsg); 1133 } 1134 1135 void reportVectorizationInfo(const StringRef Msg, const StringRef ORETag, 1136 OptimizationRemarkEmitter *ORE, Loop *TheLoop, 1137 Instruction *I) { 1138 LLVM_DEBUG(debugVectorizationMessage("", Msg, I)); 1139 LoopVectorizeHints Hints(TheLoop, true /* doesn't matter */, *ORE); 1140 ORE->emit( 1141 createLVAnalysis(Hints.vectorizeAnalysisPassName(), ORETag, TheLoop, I) 1142 << Msg); 1143 } 1144 1145 } // end namespace llvm 1146 1147 #ifndef NDEBUG 1148 /// \return string containing a file name and a line # for the given loop. 1149 static std::string getDebugLocString(const Loop *L) { 1150 std::string Result; 1151 if (L) { 1152 raw_string_ostream OS(Result); 1153 if (const DebugLoc LoopDbgLoc = L->getStartLoc()) 1154 LoopDbgLoc.print(OS); 1155 else 1156 // Just print the module name. 1157 OS << L->getHeader()->getParent()->getParent()->getModuleIdentifier(); 1158 OS.flush(); 1159 } 1160 return Result; 1161 } 1162 #endif 1163 1164 void InnerLoopVectorizer::addNewMetadata(Instruction *To, 1165 const Instruction *Orig) { 1166 // If the loop was versioned with memchecks, add the corresponding no-alias 1167 // metadata. 1168 if (LVer && (isa<LoadInst>(Orig) || isa<StoreInst>(Orig))) 1169 LVer->annotateInstWithNoAlias(To, Orig); 1170 } 1171 1172 void InnerLoopVectorizer::addMetadata(Instruction *To, 1173 Instruction *From) { 1174 propagateMetadata(To, From); 1175 addNewMetadata(To, From); 1176 } 1177 1178 void InnerLoopVectorizer::addMetadata(ArrayRef<Value *> To, 1179 Instruction *From) { 1180 for (Value *V : To) { 1181 if (Instruction *I = dyn_cast<Instruction>(V)) 1182 addMetadata(I, From); 1183 } 1184 } 1185 1186 namespace llvm { 1187 1188 // Loop vectorization cost-model hints how the scalar epilogue loop should be 1189 // lowered. 1190 enum ScalarEpilogueLowering { 1191 1192 // The default: allowing scalar epilogues. 1193 CM_ScalarEpilogueAllowed, 1194 1195 // Vectorization with OptForSize: don't allow epilogues. 1196 CM_ScalarEpilogueNotAllowedOptSize, 1197 1198 // A special case of vectorisation with OptForSize: loops with a very small 1199 // trip count are considered for vectorization under OptForSize, thereby 1200 // making sure the cost of their loop body is dominant, free of runtime 1201 // guards and scalar iteration overheads. 1202 CM_ScalarEpilogueNotAllowedLowTripLoop, 1203 1204 // Loop hint predicate indicating an epilogue is undesired. 1205 CM_ScalarEpilogueNotNeededUsePredicate, 1206 1207 // Directive indicating we must either tail fold or not vectorize 1208 CM_ScalarEpilogueNotAllowedUsePredicate 1209 }; 1210 1211 /// ElementCountComparator creates a total ordering for ElementCount 1212 /// for the purposes of using it in a set structure. 1213 struct ElementCountComparator { 1214 bool operator()(const ElementCount &LHS, const ElementCount &RHS) const { 1215 return std::make_tuple(LHS.isScalable(), LHS.getKnownMinValue()) < 1216 std::make_tuple(RHS.isScalable(), RHS.getKnownMinValue()); 1217 } 1218 }; 1219 using ElementCountSet = SmallSet<ElementCount, 16, ElementCountComparator>; 1220 1221 /// LoopVectorizationCostModel - estimates the expected speedups due to 1222 /// vectorization. 1223 /// In many cases vectorization is not profitable. This can happen because of 1224 /// a number of reasons. In this class we mainly attempt to predict the 1225 /// expected speedup/slowdowns due to the supported instruction set. We use the 1226 /// TargetTransformInfo to query the different backends for the cost of 1227 /// different operations. 1228 class LoopVectorizationCostModel { 1229 public: 1230 LoopVectorizationCostModel(ScalarEpilogueLowering SEL, Loop *L, 1231 PredicatedScalarEvolution &PSE, LoopInfo *LI, 1232 LoopVectorizationLegality *Legal, 1233 const TargetTransformInfo &TTI, 1234 const TargetLibraryInfo *TLI, DemandedBits *DB, 1235 AssumptionCache *AC, 1236 OptimizationRemarkEmitter *ORE, const Function *F, 1237 const LoopVectorizeHints *Hints, 1238 InterleavedAccessInfo &IAI) 1239 : ScalarEpilogueStatus(SEL), TheLoop(L), PSE(PSE), LI(LI), Legal(Legal), 1240 TTI(TTI), TLI(TLI), DB(DB), AC(AC), ORE(ORE), TheFunction(F), 1241 Hints(Hints), InterleaveInfo(IAI) {} 1242 1243 /// \return An upper bound for the vectorization factors (both fixed and 1244 /// scalable). If the factors are 0, vectorization and interleaving should be 1245 /// avoided up front. 1246 FixedScalableVFPair computeMaxVF(ElementCount UserVF, unsigned UserIC); 1247 1248 /// \return True if runtime checks are required for vectorization, and false 1249 /// otherwise. 1250 bool runtimeChecksRequired(); 1251 1252 /// \return The most profitable vectorization factor and the cost of that VF. 1253 /// This method checks every VF in \p CandidateVFs. If UserVF is not ZERO 1254 /// then this vectorization factor will be selected if vectorization is 1255 /// possible. 1256 VectorizationFactor 1257 selectVectorizationFactor(const ElementCountSet &CandidateVFs); 1258 1259 VectorizationFactor 1260 selectEpilogueVectorizationFactor(const ElementCount MaxVF, 1261 const LoopVectorizationPlanner &LVP); 1262 1263 /// Setup cost-based decisions for user vectorization factor. 1264 /// \return true if the UserVF is a feasible VF to be chosen. 1265 bool selectUserVectorizationFactor(ElementCount UserVF) { 1266 collectUniformsAndScalars(UserVF); 1267 collectInstsToScalarize(UserVF); 1268 return expectedCost(UserVF).first.isValid(); 1269 } 1270 1271 /// \return The size (in bits) of the smallest and widest types in the code 1272 /// that needs to be vectorized. We ignore values that remain scalar such as 1273 /// 64 bit loop indices. 1274 std::pair<unsigned, unsigned> getSmallestAndWidestTypes(); 1275 1276 /// \return The desired interleave count. 1277 /// If interleave count has been specified by metadata it will be returned. 1278 /// Otherwise, the interleave count is computed and returned. VF and LoopCost 1279 /// are the selected vectorization factor and the cost of the selected VF. 1280 unsigned selectInterleaveCount(ElementCount VF, unsigned LoopCost); 1281 1282 /// Memory access instruction may be vectorized in more than one way. 1283 /// Form of instruction after vectorization depends on cost. 1284 /// This function takes cost-based decisions for Load/Store instructions 1285 /// and collects them in a map. This decisions map is used for building 1286 /// the lists of loop-uniform and loop-scalar instructions. 1287 /// The calculated cost is saved with widening decision in order to 1288 /// avoid redundant calculations. 1289 void setCostBasedWideningDecision(ElementCount VF); 1290 1291 /// A struct that represents some properties of the register usage 1292 /// of a loop. 1293 struct RegisterUsage { 1294 /// Holds the number of loop invariant values that are used in the loop. 1295 /// The key is ClassID of target-provided register class. 1296 SmallMapVector<unsigned, unsigned, 4> LoopInvariantRegs; 1297 /// Holds the maximum number of concurrent live intervals in the loop. 1298 /// The key is ClassID of target-provided register class. 1299 SmallMapVector<unsigned, unsigned, 4> MaxLocalUsers; 1300 }; 1301 1302 /// \return Returns information about the register usages of the loop for the 1303 /// given vectorization factors. 1304 SmallVector<RegisterUsage, 8> 1305 calculateRegisterUsage(ArrayRef<ElementCount> VFs); 1306 1307 /// Collect values we want to ignore in the cost model. 1308 void collectValuesToIgnore(); 1309 1310 /// Collect all element types in the loop for which widening is needed. 1311 void collectElementTypesForWidening(); 1312 1313 /// Split reductions into those that happen in the loop, and those that happen 1314 /// outside. In loop reductions are collected into InLoopReductionChains. 1315 void collectInLoopReductions(); 1316 1317 /// Returns true if we should use strict in-order reductions for the given 1318 /// RdxDesc. This is true if the -enable-strict-reductions flag is passed, 1319 /// the IsOrdered flag of RdxDesc is set and we do not allow reordering 1320 /// of FP operations. 1321 bool useOrderedReductions(const RecurrenceDescriptor &RdxDesc) { 1322 return EnableStrictReductions && !Hints->allowReordering() && 1323 RdxDesc.isOrdered(); 1324 } 1325 1326 /// \returns The smallest bitwidth each instruction can be represented with. 1327 /// The vector equivalents of these instructions should be truncated to this 1328 /// type. 1329 const MapVector<Instruction *, uint64_t> &getMinimalBitwidths() const { 1330 return MinBWs; 1331 } 1332 1333 /// \returns True if it is more profitable to scalarize instruction \p I for 1334 /// vectorization factor \p VF. 1335 bool isProfitableToScalarize(Instruction *I, ElementCount VF) const { 1336 assert(VF.isVector() && 1337 "Profitable to scalarize relevant only for VF > 1."); 1338 1339 // Cost model is not run in the VPlan-native path - return conservative 1340 // result until this changes. 1341 if (EnableVPlanNativePath) 1342 return false; 1343 1344 auto Scalars = InstsToScalarize.find(VF); 1345 assert(Scalars != InstsToScalarize.end() && 1346 "VF not yet analyzed for scalarization profitability"); 1347 return Scalars->second.find(I) != Scalars->second.end(); 1348 } 1349 1350 /// Returns true if \p I is known to be uniform after vectorization. 1351 bool isUniformAfterVectorization(Instruction *I, ElementCount VF) const { 1352 if (VF.isScalar()) 1353 return true; 1354 1355 // Cost model is not run in the VPlan-native path - return conservative 1356 // result until this changes. 1357 if (EnableVPlanNativePath) 1358 return false; 1359 1360 auto UniformsPerVF = Uniforms.find(VF); 1361 assert(UniformsPerVF != Uniforms.end() && 1362 "VF not yet analyzed for uniformity"); 1363 return UniformsPerVF->second.count(I); 1364 } 1365 1366 /// Returns true if \p I is known to be scalar after vectorization. 1367 bool isScalarAfterVectorization(Instruction *I, ElementCount VF) const { 1368 if (VF.isScalar()) 1369 return true; 1370 1371 // Cost model is not run in the VPlan-native path - return conservative 1372 // result until this changes. 1373 if (EnableVPlanNativePath) 1374 return false; 1375 1376 auto ScalarsPerVF = Scalars.find(VF); 1377 assert(ScalarsPerVF != Scalars.end() && 1378 "Scalar values are not calculated for VF"); 1379 return ScalarsPerVF->second.count(I); 1380 } 1381 1382 /// \returns True if instruction \p I can be truncated to a smaller bitwidth 1383 /// for vectorization factor \p VF. 1384 bool canTruncateToMinimalBitwidth(Instruction *I, ElementCount VF) const { 1385 return VF.isVector() && MinBWs.find(I) != MinBWs.end() && 1386 !isProfitableToScalarize(I, VF) && 1387 !isScalarAfterVectorization(I, VF); 1388 } 1389 1390 /// Decision that was taken during cost calculation for memory instruction. 1391 enum InstWidening { 1392 CM_Unknown, 1393 CM_Widen, // For consecutive accesses with stride +1. 1394 CM_Widen_Reverse, // For consecutive accesses with stride -1. 1395 CM_Interleave, 1396 CM_GatherScatter, 1397 CM_Scalarize 1398 }; 1399 1400 /// Save vectorization decision \p W and \p Cost taken by the cost model for 1401 /// instruction \p I and vector width \p VF. 1402 void setWideningDecision(Instruction *I, ElementCount VF, InstWidening W, 1403 InstructionCost Cost) { 1404 assert(VF.isVector() && "Expected VF >=2"); 1405 WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost); 1406 } 1407 1408 /// Save vectorization decision \p W and \p Cost taken by the cost model for 1409 /// interleaving group \p Grp and vector width \p VF. 1410 void setWideningDecision(const InterleaveGroup<Instruction> *Grp, 1411 ElementCount VF, InstWidening W, 1412 InstructionCost Cost) { 1413 assert(VF.isVector() && "Expected VF >=2"); 1414 /// Broadcast this decicion to all instructions inside the group. 1415 /// But the cost will be assigned to one instruction only. 1416 for (unsigned i = 0; i < Grp->getFactor(); ++i) { 1417 if (auto *I = Grp->getMember(i)) { 1418 if (Grp->getInsertPos() == I) 1419 WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost); 1420 else 1421 WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, 0); 1422 } 1423 } 1424 } 1425 1426 /// Return the cost model decision for the given instruction \p I and vector 1427 /// width \p VF. Return CM_Unknown if this instruction did not pass 1428 /// through the cost modeling. 1429 InstWidening getWideningDecision(Instruction *I, ElementCount VF) const { 1430 assert(VF.isVector() && "Expected VF to be a vector VF"); 1431 // Cost model is not run in the VPlan-native path - return conservative 1432 // result until this changes. 1433 if (EnableVPlanNativePath) 1434 return CM_GatherScatter; 1435 1436 std::pair<Instruction *, ElementCount> InstOnVF = std::make_pair(I, VF); 1437 auto Itr = WideningDecisions.find(InstOnVF); 1438 if (Itr == WideningDecisions.end()) 1439 return CM_Unknown; 1440 return Itr->second.first; 1441 } 1442 1443 /// Return the vectorization cost for the given instruction \p I and vector 1444 /// width \p VF. 1445 InstructionCost getWideningCost(Instruction *I, ElementCount VF) { 1446 assert(VF.isVector() && "Expected VF >=2"); 1447 std::pair<Instruction *, ElementCount> InstOnVF = std::make_pair(I, VF); 1448 assert(WideningDecisions.find(InstOnVF) != WideningDecisions.end() && 1449 "The cost is not calculated"); 1450 return WideningDecisions[InstOnVF].second; 1451 } 1452 1453 /// Return True if instruction \p I is an optimizable truncate whose operand 1454 /// is an induction variable. Such a truncate will be removed by adding a new 1455 /// induction variable with the destination type. 1456 bool isOptimizableIVTruncate(Instruction *I, ElementCount VF) { 1457 // If the instruction is not a truncate, return false. 1458 auto *Trunc = dyn_cast<TruncInst>(I); 1459 if (!Trunc) 1460 return false; 1461 1462 // Get the source and destination types of the truncate. 1463 Type *SrcTy = ToVectorTy(cast<CastInst>(I)->getSrcTy(), VF); 1464 Type *DestTy = ToVectorTy(cast<CastInst>(I)->getDestTy(), VF); 1465 1466 // If the truncate is free for the given types, return false. Replacing a 1467 // free truncate with an induction variable would add an induction variable 1468 // update instruction to each iteration of the loop. We exclude from this 1469 // check the primary induction variable since it will need an update 1470 // instruction regardless. 1471 Value *Op = Trunc->getOperand(0); 1472 if (Op != Legal->getPrimaryInduction() && TTI.isTruncateFree(SrcTy, DestTy)) 1473 return false; 1474 1475 // If the truncated value is not an induction variable, return false. 1476 return Legal->isInductionPhi(Op); 1477 } 1478 1479 /// Collects the instructions to scalarize for each predicated instruction in 1480 /// the loop. 1481 void collectInstsToScalarize(ElementCount VF); 1482 1483 /// Collect Uniform and Scalar values for the given \p VF. 1484 /// The sets depend on CM decision for Load/Store instructions 1485 /// that may be vectorized as interleave, gather-scatter or scalarized. 1486 void collectUniformsAndScalars(ElementCount VF) { 1487 // Do the analysis once. 1488 if (VF.isScalar() || Uniforms.find(VF) != Uniforms.end()) 1489 return; 1490 setCostBasedWideningDecision(VF); 1491 collectLoopUniforms(VF); 1492 collectLoopScalars(VF); 1493 } 1494 1495 /// Returns true if the target machine supports masked store operation 1496 /// for the given \p DataType and kind of access to \p Ptr. 1497 bool isLegalMaskedStore(Type *DataType, Value *Ptr, Align Alignment) const { 1498 return Legal->isConsecutivePtr(Ptr) && 1499 TTI.isLegalMaskedStore(DataType, Alignment); 1500 } 1501 1502 /// Returns true if the target machine supports masked load operation 1503 /// for the given \p DataType and kind of access to \p Ptr. 1504 bool isLegalMaskedLoad(Type *DataType, Value *Ptr, Align Alignment) const { 1505 return Legal->isConsecutivePtr(Ptr) && 1506 TTI.isLegalMaskedLoad(DataType, Alignment); 1507 } 1508 1509 /// Returns true if the target machine can represent \p V as a masked gather 1510 /// or scatter operation. 1511 bool isLegalGatherOrScatter(Value *V) { 1512 bool LI = isa<LoadInst>(V); 1513 bool SI = isa<StoreInst>(V); 1514 if (!LI && !SI) 1515 return false; 1516 auto *Ty = getLoadStoreType(V); 1517 Align Align = getLoadStoreAlignment(V); 1518 return (LI && TTI.isLegalMaskedGather(Ty, Align)) || 1519 (SI && TTI.isLegalMaskedScatter(Ty, Align)); 1520 } 1521 1522 /// Returns true if the target machine supports all of the reduction 1523 /// variables found for the given VF. 1524 bool canVectorizeReductions(ElementCount VF) const { 1525 return (all_of(Legal->getReductionVars(), [&](auto &Reduction) -> bool { 1526 const RecurrenceDescriptor &RdxDesc = Reduction.second; 1527 return TTI.isLegalToVectorizeReduction(RdxDesc, VF); 1528 })); 1529 } 1530 1531 /// Returns true if \p I is an instruction that will be scalarized with 1532 /// predication. Such instructions include conditional stores and 1533 /// instructions that may divide by zero. 1534 /// If a non-zero VF has been calculated, we check if I will be scalarized 1535 /// predication for that VF. 1536 bool isScalarWithPredication(Instruction *I) const; 1537 1538 // Returns true if \p I is an instruction that will be predicated either 1539 // through scalar predication or masked load/store or masked gather/scatter. 1540 // Superset of instructions that return true for isScalarWithPredication. 1541 bool isPredicatedInst(Instruction *I) { 1542 if (!blockNeedsPredication(I->getParent())) 1543 return false; 1544 // Loads and stores that need some form of masked operation are predicated 1545 // instructions. 1546 if (isa<LoadInst>(I) || isa<StoreInst>(I)) 1547 return Legal->isMaskRequired(I); 1548 return isScalarWithPredication(I); 1549 } 1550 1551 /// Returns true if \p I is a memory instruction with consecutive memory 1552 /// access that can be widened. 1553 bool 1554 memoryInstructionCanBeWidened(Instruction *I, 1555 ElementCount VF = ElementCount::getFixed(1)); 1556 1557 /// Returns true if \p I is a memory instruction in an interleaved-group 1558 /// of memory accesses that can be vectorized with wide vector loads/stores 1559 /// and shuffles. 1560 bool 1561 interleavedAccessCanBeWidened(Instruction *I, 1562 ElementCount VF = ElementCount::getFixed(1)); 1563 1564 /// Check if \p Instr belongs to any interleaved access group. 1565 bool isAccessInterleaved(Instruction *Instr) { 1566 return InterleaveInfo.isInterleaved(Instr); 1567 } 1568 1569 /// Get the interleaved access group that \p Instr belongs to. 1570 const InterleaveGroup<Instruction> * 1571 getInterleavedAccessGroup(Instruction *Instr) { 1572 return InterleaveInfo.getInterleaveGroup(Instr); 1573 } 1574 1575 /// Returns true if we're required to use a scalar epilogue for at least 1576 /// the final iteration of the original loop. 1577 bool requiresScalarEpilogue(ElementCount VF) const { 1578 if (!isScalarEpilogueAllowed()) 1579 return false; 1580 // If we might exit from anywhere but the latch, must run the exiting 1581 // iteration in scalar form. 1582 if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch()) 1583 return true; 1584 return VF.isVector() && InterleaveInfo.requiresScalarEpilogue(); 1585 } 1586 1587 /// Returns true if a scalar epilogue is not allowed due to optsize or a 1588 /// loop hint annotation. 1589 bool isScalarEpilogueAllowed() const { 1590 return ScalarEpilogueStatus == CM_ScalarEpilogueAllowed; 1591 } 1592 1593 /// Returns true if all loop blocks should be masked to fold tail loop. 1594 bool foldTailByMasking() const { return FoldTailByMasking; } 1595 1596 bool blockNeedsPredication(BasicBlock *BB) const { 1597 return foldTailByMasking() || Legal->blockNeedsPredication(BB); 1598 } 1599 1600 /// A SmallMapVector to store the InLoop reduction op chains, mapping phi 1601 /// nodes to the chain of instructions representing the reductions. Uses a 1602 /// MapVector to ensure deterministic iteration order. 1603 using ReductionChainMap = 1604 SmallMapVector<PHINode *, SmallVector<Instruction *, 4>, 4>; 1605 1606 /// Return the chain of instructions representing an inloop reduction. 1607 const ReductionChainMap &getInLoopReductionChains() const { 1608 return InLoopReductionChains; 1609 } 1610 1611 /// Returns true if the Phi is part of an inloop reduction. 1612 bool isInLoopReduction(PHINode *Phi) const { 1613 return InLoopReductionChains.count(Phi); 1614 } 1615 1616 /// Estimate cost of an intrinsic call instruction CI if it were vectorized 1617 /// with factor VF. Return the cost of the instruction, including 1618 /// scalarization overhead if it's needed. 1619 InstructionCost getVectorIntrinsicCost(CallInst *CI, ElementCount VF) const; 1620 1621 /// Estimate cost of a call instruction CI if it were vectorized with factor 1622 /// VF. Return the cost of the instruction, including scalarization overhead 1623 /// if it's needed. The flag NeedToScalarize shows if the call needs to be 1624 /// scalarized - 1625 /// i.e. either vector version isn't available, or is too expensive. 1626 InstructionCost getVectorCallCost(CallInst *CI, ElementCount VF, 1627 bool &NeedToScalarize) const; 1628 1629 /// Returns true if the per-lane cost of VectorizationFactor A is lower than 1630 /// that of B. 1631 bool isMoreProfitable(const VectorizationFactor &A, 1632 const VectorizationFactor &B) const; 1633 1634 /// Invalidates decisions already taken by the cost model. 1635 void invalidateCostModelingDecisions() { 1636 WideningDecisions.clear(); 1637 Uniforms.clear(); 1638 Scalars.clear(); 1639 } 1640 1641 private: 1642 unsigned NumPredStores = 0; 1643 1644 /// \return An upper bound for the vectorization factors for both 1645 /// fixed and scalable vectorization, where the minimum-known number of 1646 /// elements is a power-of-2 larger than zero. If scalable vectorization is 1647 /// disabled or unsupported, then the scalable part will be equal to 1648 /// ElementCount::getScalable(0). 1649 FixedScalableVFPair computeFeasibleMaxVF(unsigned ConstTripCount, 1650 ElementCount UserVF); 1651 1652 /// \return the maximized element count based on the targets vector 1653 /// registers and the loop trip-count, but limited to a maximum safe VF. 1654 /// This is a helper function of computeFeasibleMaxVF. 1655 /// FIXME: MaxSafeVF is currently passed by reference to avoid some obscure 1656 /// issue that occurred on one of the buildbots which cannot be reproduced 1657 /// without having access to the properietary compiler (see comments on 1658 /// D98509). The issue is currently under investigation and this workaround 1659 /// will be removed as soon as possible. 1660 ElementCount getMaximizedVFForTarget(unsigned ConstTripCount, 1661 unsigned SmallestType, 1662 unsigned WidestType, 1663 const ElementCount &MaxSafeVF); 1664 1665 /// \return the maximum legal scalable VF, based on the safe max number 1666 /// of elements. 1667 ElementCount getMaxLegalScalableVF(unsigned MaxSafeElements); 1668 1669 /// The vectorization cost is a combination of the cost itself and a boolean 1670 /// indicating whether any of the contributing operations will actually 1671 /// operate on vector values after type legalization in the backend. If this 1672 /// latter value is false, then all operations will be scalarized (i.e. no 1673 /// vectorization has actually taken place). 1674 using VectorizationCostTy = std::pair<InstructionCost, bool>; 1675 1676 /// Returns the expected execution cost. The unit of the cost does 1677 /// not matter because we use the 'cost' units to compare different 1678 /// vector widths. The cost that is returned is *not* normalized by 1679 /// the factor width. If \p Invalid is not nullptr, this function 1680 /// will add a pair(Instruction*, ElementCount) to \p Invalid for 1681 /// each instruction that has an Invalid cost for the given VF. 1682 using InstructionVFPair = std::pair<Instruction *, ElementCount>; 1683 VectorizationCostTy 1684 expectedCost(ElementCount VF, 1685 SmallVectorImpl<InstructionVFPair> *Invalid = nullptr); 1686 1687 /// Returns the execution time cost of an instruction for a given vector 1688 /// width. Vector width of one means scalar. 1689 VectorizationCostTy getInstructionCost(Instruction *I, ElementCount VF); 1690 1691 /// The cost-computation logic from getInstructionCost which provides 1692 /// the vector type as an output parameter. 1693 InstructionCost getInstructionCost(Instruction *I, ElementCount VF, 1694 Type *&VectorTy); 1695 1696 /// Return the cost of instructions in an inloop reduction pattern, if I is 1697 /// part of that pattern. 1698 Optional<InstructionCost> 1699 getReductionPatternCost(Instruction *I, ElementCount VF, Type *VectorTy, 1700 TTI::TargetCostKind CostKind); 1701 1702 /// Calculate vectorization cost of memory instruction \p I. 1703 InstructionCost getMemoryInstructionCost(Instruction *I, ElementCount VF); 1704 1705 /// The cost computation for scalarized memory instruction. 1706 InstructionCost getMemInstScalarizationCost(Instruction *I, ElementCount VF); 1707 1708 /// The cost computation for interleaving group of memory instructions. 1709 InstructionCost getInterleaveGroupCost(Instruction *I, ElementCount VF); 1710 1711 /// The cost computation for Gather/Scatter instruction. 1712 InstructionCost getGatherScatterCost(Instruction *I, ElementCount VF); 1713 1714 /// The cost computation for widening instruction \p I with consecutive 1715 /// memory access. 1716 InstructionCost getConsecutiveMemOpCost(Instruction *I, ElementCount VF); 1717 1718 /// The cost calculation for Load/Store instruction \p I with uniform pointer - 1719 /// Load: scalar load + broadcast. 1720 /// Store: scalar store + (loop invariant value stored? 0 : extract of last 1721 /// element) 1722 InstructionCost getUniformMemOpCost(Instruction *I, ElementCount VF); 1723 1724 /// Estimate the overhead of scalarizing an instruction. This is a 1725 /// convenience wrapper for the type-based getScalarizationOverhead API. 1726 InstructionCost getScalarizationOverhead(Instruction *I, 1727 ElementCount VF) const; 1728 1729 /// Returns whether the instruction is a load or store and will be a emitted 1730 /// as a vector operation. 1731 bool isConsecutiveLoadOrStore(Instruction *I); 1732 1733 /// Returns true if an artificially high cost for emulated masked memrefs 1734 /// should be used. 1735 bool useEmulatedMaskMemRefHack(Instruction *I); 1736 1737 /// Map of scalar integer values to the smallest bitwidth they can be legally 1738 /// represented as. The vector equivalents of these values should be truncated 1739 /// to this type. 1740 MapVector<Instruction *, uint64_t> MinBWs; 1741 1742 /// A type representing the costs for instructions if they were to be 1743 /// scalarized rather than vectorized. The entries are Instruction-Cost 1744 /// pairs. 1745 using ScalarCostsTy = DenseMap<Instruction *, InstructionCost>; 1746 1747 /// A set containing all BasicBlocks that are known to present after 1748 /// vectorization as a predicated block. 1749 SmallPtrSet<BasicBlock *, 4> PredicatedBBsAfterVectorization; 1750 1751 /// Records whether it is allowed to have the original scalar loop execute at 1752 /// least once. This may be needed as a fallback loop in case runtime 1753 /// aliasing/dependence checks fail, or to handle the tail/remainder 1754 /// iterations when the trip count is unknown or doesn't divide by the VF, 1755 /// or as a peel-loop to handle gaps in interleave-groups. 1756 /// Under optsize and when the trip count is very small we don't allow any 1757 /// iterations to execute in the scalar loop. 1758 ScalarEpilogueLowering ScalarEpilogueStatus = CM_ScalarEpilogueAllowed; 1759 1760 /// All blocks of loop are to be masked to fold tail of scalar iterations. 1761 bool FoldTailByMasking = false; 1762 1763 /// A map holding scalar costs for different vectorization factors. The 1764 /// presence of a cost for an instruction in the mapping indicates that the 1765 /// instruction will be scalarized when vectorizing with the associated 1766 /// vectorization factor. The entries are VF-ScalarCostTy pairs. 1767 DenseMap<ElementCount, ScalarCostsTy> InstsToScalarize; 1768 1769 /// Holds the instructions known to be uniform after vectorization. 1770 /// The data is collected per VF. 1771 DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> Uniforms; 1772 1773 /// Holds the instructions known to be scalar after vectorization. 1774 /// The data is collected per VF. 1775 DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> Scalars; 1776 1777 /// Holds the instructions (address computations) that are forced to be 1778 /// scalarized. 1779 DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> ForcedScalars; 1780 1781 /// PHINodes of the reductions that should be expanded in-loop along with 1782 /// their associated chains of reduction operations, in program order from top 1783 /// (PHI) to bottom 1784 ReductionChainMap InLoopReductionChains; 1785 1786 /// A Map of inloop reduction operations and their immediate chain operand. 1787 /// FIXME: This can be removed once reductions can be costed correctly in 1788 /// vplan. This was added to allow quick lookup to the inloop operations, 1789 /// without having to loop through InLoopReductionChains. 1790 DenseMap<Instruction *, Instruction *> InLoopReductionImmediateChains; 1791 1792 /// Returns the expected difference in cost from scalarizing the expression 1793 /// feeding a predicated instruction \p PredInst. The instructions to 1794 /// scalarize and their scalar costs are collected in \p ScalarCosts. A 1795 /// non-negative return value implies the expression will be scalarized. 1796 /// Currently, only single-use chains are considered for scalarization. 1797 int computePredInstDiscount(Instruction *PredInst, ScalarCostsTy &ScalarCosts, 1798 ElementCount VF); 1799 1800 /// Collect the instructions that are uniform after vectorization. An 1801 /// instruction is uniform if we represent it with a single scalar value in 1802 /// the vectorized loop corresponding to each vector iteration. Examples of 1803 /// uniform instructions include pointer operands of consecutive or 1804 /// interleaved memory accesses. Note that although uniformity implies an 1805 /// instruction will be scalar, the reverse is not true. In general, a 1806 /// scalarized instruction will be represented by VF scalar values in the 1807 /// vectorized loop, each corresponding to an iteration of the original 1808 /// scalar loop. 1809 void collectLoopUniforms(ElementCount VF); 1810 1811 /// Collect the instructions that are scalar after vectorization. An 1812 /// instruction is scalar if it is known to be uniform or will be scalarized 1813 /// during vectorization. Non-uniform scalarized instructions will be 1814 /// represented by VF values in the vectorized loop, each corresponding to an 1815 /// iteration of the original scalar loop. 1816 void collectLoopScalars(ElementCount VF); 1817 1818 /// Keeps cost model vectorization decision and cost for instructions. 1819 /// Right now it is used for memory instructions only. 1820 using DecisionList = DenseMap<std::pair<Instruction *, ElementCount>, 1821 std::pair<InstWidening, InstructionCost>>; 1822 1823 DecisionList WideningDecisions; 1824 1825 /// Returns true if \p V is expected to be vectorized and it needs to be 1826 /// extracted. 1827 bool needsExtract(Value *V, ElementCount VF) const { 1828 Instruction *I = dyn_cast<Instruction>(V); 1829 if (VF.isScalar() || !I || !TheLoop->contains(I) || 1830 TheLoop->isLoopInvariant(I)) 1831 return false; 1832 1833 // Assume we can vectorize V (and hence we need extraction) if the 1834 // scalars are not computed yet. This can happen, because it is called 1835 // via getScalarizationOverhead from setCostBasedWideningDecision, before 1836 // the scalars are collected. That should be a safe assumption in most 1837 // cases, because we check if the operands have vectorizable types 1838 // beforehand in LoopVectorizationLegality. 1839 return Scalars.find(VF) == Scalars.end() || 1840 !isScalarAfterVectorization(I, VF); 1841 }; 1842 1843 /// Returns a range containing only operands needing to be extracted. 1844 SmallVector<Value *, 4> filterExtractingOperands(Instruction::op_range Ops, 1845 ElementCount VF) const { 1846 return SmallVector<Value *, 4>(make_filter_range( 1847 Ops, [this, VF](Value *V) { return this->needsExtract(V, VF); })); 1848 } 1849 1850 /// Determines if we have the infrastructure to vectorize loop \p L and its 1851 /// epilogue, assuming the main loop is vectorized by \p VF. 1852 bool isCandidateForEpilogueVectorization(const Loop &L, 1853 const ElementCount VF) const; 1854 1855 /// Returns true if epilogue vectorization is considered profitable, and 1856 /// false otherwise. 1857 /// \p VF is the vectorization factor chosen for the original loop. 1858 bool isEpilogueVectorizationProfitable(const ElementCount VF) const; 1859 1860 public: 1861 /// The loop that we evaluate. 1862 Loop *TheLoop; 1863 1864 /// Predicated scalar evolution analysis. 1865 PredicatedScalarEvolution &PSE; 1866 1867 /// Loop Info analysis. 1868 LoopInfo *LI; 1869 1870 /// Vectorization legality. 1871 LoopVectorizationLegality *Legal; 1872 1873 /// Vector target information. 1874 const TargetTransformInfo &TTI; 1875 1876 /// Target Library Info. 1877 const TargetLibraryInfo *TLI; 1878 1879 /// Demanded bits analysis. 1880 DemandedBits *DB; 1881 1882 /// Assumption cache. 1883 AssumptionCache *AC; 1884 1885 /// Interface to emit optimization remarks. 1886 OptimizationRemarkEmitter *ORE; 1887 1888 const Function *TheFunction; 1889 1890 /// Loop Vectorize Hint. 1891 const LoopVectorizeHints *Hints; 1892 1893 /// The interleave access information contains groups of interleaved accesses 1894 /// with the same stride and close to each other. 1895 InterleavedAccessInfo &InterleaveInfo; 1896 1897 /// Values to ignore in the cost model. 1898 SmallPtrSet<const Value *, 16> ValuesToIgnore; 1899 1900 /// Values to ignore in the cost model when VF > 1. 1901 SmallPtrSet<const Value *, 16> VecValuesToIgnore; 1902 1903 /// All element types found in the loop. 1904 SmallPtrSet<Type *, 16> ElementTypesInLoop; 1905 1906 /// Profitable vector factors. 1907 SmallVector<VectorizationFactor, 8> ProfitableVFs; 1908 }; 1909 } // end namespace llvm 1910 1911 /// Helper struct to manage generating runtime checks for vectorization. 1912 /// 1913 /// The runtime checks are created up-front in temporary blocks to allow better 1914 /// estimating the cost and un-linked from the existing IR. After deciding to 1915 /// vectorize, the checks are moved back. If deciding not to vectorize, the 1916 /// temporary blocks are completely removed. 1917 class GeneratedRTChecks { 1918 /// Basic block which contains the generated SCEV checks, if any. 1919 BasicBlock *SCEVCheckBlock = nullptr; 1920 1921 /// The value representing the result of the generated SCEV checks. If it is 1922 /// nullptr, either no SCEV checks have been generated or they have been used. 1923 Value *SCEVCheckCond = nullptr; 1924 1925 /// Basic block which contains the generated memory runtime checks, if any. 1926 BasicBlock *MemCheckBlock = nullptr; 1927 1928 /// The value representing the result of the generated memory runtime checks. 1929 /// If it is nullptr, either no memory runtime checks have been generated or 1930 /// they have been used. 1931 Instruction *MemRuntimeCheckCond = nullptr; 1932 1933 DominatorTree *DT; 1934 LoopInfo *LI; 1935 1936 SCEVExpander SCEVExp; 1937 SCEVExpander MemCheckExp; 1938 1939 public: 1940 GeneratedRTChecks(ScalarEvolution &SE, DominatorTree *DT, LoopInfo *LI, 1941 const DataLayout &DL) 1942 : DT(DT), LI(LI), SCEVExp(SE, DL, "scev.check"), 1943 MemCheckExp(SE, DL, "scev.check") {} 1944 1945 /// Generate runtime checks in SCEVCheckBlock and MemCheckBlock, so we can 1946 /// accurately estimate the cost of the runtime checks. The blocks are 1947 /// un-linked from the IR and is added back during vector code generation. If 1948 /// there is no vector code generation, the check blocks are removed 1949 /// completely. 1950 void Create(Loop *L, const LoopAccessInfo &LAI, 1951 const SCEVUnionPredicate &UnionPred) { 1952 1953 BasicBlock *LoopHeader = L->getHeader(); 1954 BasicBlock *Preheader = L->getLoopPreheader(); 1955 1956 // Use SplitBlock to create blocks for SCEV & memory runtime checks to 1957 // ensure the blocks are properly added to LoopInfo & DominatorTree. Those 1958 // may be used by SCEVExpander. The blocks will be un-linked from their 1959 // predecessors and removed from LI & DT at the end of the function. 1960 if (!UnionPred.isAlwaysTrue()) { 1961 SCEVCheckBlock = SplitBlock(Preheader, Preheader->getTerminator(), DT, LI, 1962 nullptr, "vector.scevcheck"); 1963 1964 SCEVCheckCond = SCEVExp.expandCodeForPredicate( 1965 &UnionPred, SCEVCheckBlock->getTerminator()); 1966 } 1967 1968 const auto &RtPtrChecking = *LAI.getRuntimePointerChecking(); 1969 if (RtPtrChecking.Need) { 1970 auto *Pred = SCEVCheckBlock ? SCEVCheckBlock : Preheader; 1971 MemCheckBlock = SplitBlock(Pred, Pred->getTerminator(), DT, LI, nullptr, 1972 "vector.memcheck"); 1973 1974 std::tie(std::ignore, MemRuntimeCheckCond) = 1975 addRuntimeChecks(MemCheckBlock->getTerminator(), L, 1976 RtPtrChecking.getChecks(), MemCheckExp); 1977 assert(MemRuntimeCheckCond && 1978 "no RT checks generated although RtPtrChecking " 1979 "claimed checks are required"); 1980 } 1981 1982 if (!MemCheckBlock && !SCEVCheckBlock) 1983 return; 1984 1985 // Unhook the temporary block with the checks, update various places 1986 // accordingly. 1987 if (SCEVCheckBlock) 1988 SCEVCheckBlock->replaceAllUsesWith(Preheader); 1989 if (MemCheckBlock) 1990 MemCheckBlock->replaceAllUsesWith(Preheader); 1991 1992 if (SCEVCheckBlock) { 1993 SCEVCheckBlock->getTerminator()->moveBefore(Preheader->getTerminator()); 1994 new UnreachableInst(Preheader->getContext(), SCEVCheckBlock); 1995 Preheader->getTerminator()->eraseFromParent(); 1996 } 1997 if (MemCheckBlock) { 1998 MemCheckBlock->getTerminator()->moveBefore(Preheader->getTerminator()); 1999 new UnreachableInst(Preheader->getContext(), MemCheckBlock); 2000 Preheader->getTerminator()->eraseFromParent(); 2001 } 2002 2003 DT->changeImmediateDominator(LoopHeader, Preheader); 2004 if (MemCheckBlock) { 2005 DT->eraseNode(MemCheckBlock); 2006 LI->removeBlock(MemCheckBlock); 2007 } 2008 if (SCEVCheckBlock) { 2009 DT->eraseNode(SCEVCheckBlock); 2010 LI->removeBlock(SCEVCheckBlock); 2011 } 2012 } 2013 2014 /// Remove the created SCEV & memory runtime check blocks & instructions, if 2015 /// unused. 2016 ~GeneratedRTChecks() { 2017 SCEVExpanderCleaner SCEVCleaner(SCEVExp, *DT); 2018 SCEVExpanderCleaner MemCheckCleaner(MemCheckExp, *DT); 2019 if (!SCEVCheckCond) 2020 SCEVCleaner.markResultUsed(); 2021 2022 if (!MemRuntimeCheckCond) 2023 MemCheckCleaner.markResultUsed(); 2024 2025 if (MemRuntimeCheckCond) { 2026 auto &SE = *MemCheckExp.getSE(); 2027 // Memory runtime check generation creates compares that use expanded 2028 // values. Remove them before running the SCEVExpanderCleaners. 2029 for (auto &I : make_early_inc_range(reverse(*MemCheckBlock))) { 2030 if (MemCheckExp.isInsertedInstruction(&I)) 2031 continue; 2032 SE.forgetValue(&I); 2033 SE.eraseValueFromMap(&I); 2034 I.eraseFromParent(); 2035 } 2036 } 2037 MemCheckCleaner.cleanup(); 2038 SCEVCleaner.cleanup(); 2039 2040 if (SCEVCheckCond) 2041 SCEVCheckBlock->eraseFromParent(); 2042 if (MemRuntimeCheckCond) 2043 MemCheckBlock->eraseFromParent(); 2044 } 2045 2046 /// Adds the generated SCEVCheckBlock before \p LoopVectorPreHeader and 2047 /// adjusts the branches to branch to the vector preheader or \p Bypass, 2048 /// depending on the generated condition. 2049 BasicBlock *emitSCEVChecks(Loop *L, BasicBlock *Bypass, 2050 BasicBlock *LoopVectorPreHeader, 2051 BasicBlock *LoopExitBlock) { 2052 if (!SCEVCheckCond) 2053 return nullptr; 2054 if (auto *C = dyn_cast<ConstantInt>(SCEVCheckCond)) 2055 if (C->isZero()) 2056 return nullptr; 2057 2058 auto *Pred = LoopVectorPreHeader->getSinglePredecessor(); 2059 2060 BranchInst::Create(LoopVectorPreHeader, SCEVCheckBlock); 2061 // Create new preheader for vector loop. 2062 if (auto *PL = LI->getLoopFor(LoopVectorPreHeader)) 2063 PL->addBasicBlockToLoop(SCEVCheckBlock, *LI); 2064 2065 SCEVCheckBlock->getTerminator()->eraseFromParent(); 2066 SCEVCheckBlock->moveBefore(LoopVectorPreHeader); 2067 Pred->getTerminator()->replaceSuccessorWith(LoopVectorPreHeader, 2068 SCEVCheckBlock); 2069 2070 DT->addNewBlock(SCEVCheckBlock, Pred); 2071 DT->changeImmediateDominator(LoopVectorPreHeader, SCEVCheckBlock); 2072 2073 ReplaceInstWithInst( 2074 SCEVCheckBlock->getTerminator(), 2075 BranchInst::Create(Bypass, LoopVectorPreHeader, SCEVCheckCond)); 2076 // Mark the check as used, to prevent it from being removed during cleanup. 2077 SCEVCheckCond = nullptr; 2078 return SCEVCheckBlock; 2079 } 2080 2081 /// Adds the generated MemCheckBlock before \p LoopVectorPreHeader and adjusts 2082 /// the branches to branch to the vector preheader or \p Bypass, depending on 2083 /// the generated condition. 2084 BasicBlock *emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass, 2085 BasicBlock *LoopVectorPreHeader) { 2086 // Check if we generated code that checks in runtime if arrays overlap. 2087 if (!MemRuntimeCheckCond) 2088 return nullptr; 2089 2090 auto *Pred = LoopVectorPreHeader->getSinglePredecessor(); 2091 Pred->getTerminator()->replaceSuccessorWith(LoopVectorPreHeader, 2092 MemCheckBlock); 2093 2094 DT->addNewBlock(MemCheckBlock, Pred); 2095 DT->changeImmediateDominator(LoopVectorPreHeader, MemCheckBlock); 2096 MemCheckBlock->moveBefore(LoopVectorPreHeader); 2097 2098 if (auto *PL = LI->getLoopFor(LoopVectorPreHeader)) 2099 PL->addBasicBlockToLoop(MemCheckBlock, *LI); 2100 2101 ReplaceInstWithInst( 2102 MemCheckBlock->getTerminator(), 2103 BranchInst::Create(Bypass, LoopVectorPreHeader, MemRuntimeCheckCond)); 2104 MemCheckBlock->getTerminator()->setDebugLoc( 2105 Pred->getTerminator()->getDebugLoc()); 2106 2107 // Mark the check as used, to prevent it from being removed during cleanup. 2108 MemRuntimeCheckCond = nullptr; 2109 return MemCheckBlock; 2110 } 2111 }; 2112 2113 // Return true if \p OuterLp is an outer loop annotated with hints for explicit 2114 // vectorization. The loop needs to be annotated with #pragma omp simd 2115 // simdlen(#) or #pragma clang vectorize(enable) vectorize_width(#). If the 2116 // vector length information is not provided, vectorization is not considered 2117 // explicit. Interleave hints are not allowed either. These limitations will be 2118 // relaxed in the future. 2119 // Please, note that we are currently forced to abuse the pragma 'clang 2120 // vectorize' semantics. This pragma provides *auto-vectorization hints* 2121 // (i.e., LV must check that vectorization is legal) whereas pragma 'omp simd' 2122 // provides *explicit vectorization hints* (LV can bypass legal checks and 2123 // assume that vectorization is legal). However, both hints are implemented 2124 // using the same metadata (llvm.loop.vectorize, processed by 2125 // LoopVectorizeHints). This will be fixed in the future when the native IR 2126 // representation for pragma 'omp simd' is introduced. 2127 static bool isExplicitVecOuterLoop(Loop *OuterLp, 2128 OptimizationRemarkEmitter *ORE) { 2129 assert(!OuterLp->isInnermost() && "This is not an outer loop"); 2130 LoopVectorizeHints Hints(OuterLp, true /*DisableInterleaving*/, *ORE); 2131 2132 // Only outer loops with an explicit vectorization hint are supported. 2133 // Unannotated outer loops are ignored. 2134 if (Hints.getForce() == LoopVectorizeHints::FK_Undefined) 2135 return false; 2136 2137 Function *Fn = OuterLp->getHeader()->getParent(); 2138 if (!Hints.allowVectorization(Fn, OuterLp, 2139 true /*VectorizeOnlyWhenForced*/)) { 2140 LLVM_DEBUG(dbgs() << "LV: Loop hints prevent outer loop vectorization.\n"); 2141 return false; 2142 } 2143 2144 if (Hints.getInterleave() > 1) { 2145 // TODO: Interleave support is future work. 2146 LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Interleave is not supported for " 2147 "outer loops.\n"); 2148 Hints.emitRemarkWithHints(); 2149 return false; 2150 } 2151 2152 return true; 2153 } 2154 2155 static void collectSupportedLoops(Loop &L, LoopInfo *LI, 2156 OptimizationRemarkEmitter *ORE, 2157 SmallVectorImpl<Loop *> &V) { 2158 // Collect inner loops and outer loops without irreducible control flow. For 2159 // now, only collect outer loops that have explicit vectorization hints. If we 2160 // are stress testing the VPlan H-CFG construction, we collect the outermost 2161 // loop of every loop nest. 2162 if (L.isInnermost() || VPlanBuildStressTest || 2163 (EnableVPlanNativePath && isExplicitVecOuterLoop(&L, ORE))) { 2164 LoopBlocksRPO RPOT(&L); 2165 RPOT.perform(LI); 2166 if (!containsIrreducibleCFG<const BasicBlock *>(RPOT, *LI)) { 2167 V.push_back(&L); 2168 // TODO: Collect inner loops inside marked outer loops in case 2169 // vectorization fails for the outer loop. Do not invoke 2170 // 'containsIrreducibleCFG' again for inner loops when the outer loop is 2171 // already known to be reducible. We can use an inherited attribute for 2172 // that. 2173 return; 2174 } 2175 } 2176 for (Loop *InnerL : L) 2177 collectSupportedLoops(*InnerL, LI, ORE, V); 2178 } 2179 2180 namespace { 2181 2182 /// The LoopVectorize Pass. 2183 struct LoopVectorize : public FunctionPass { 2184 /// Pass identification, replacement for typeid 2185 static char ID; 2186 2187 LoopVectorizePass Impl; 2188 2189 explicit LoopVectorize(bool InterleaveOnlyWhenForced = false, 2190 bool VectorizeOnlyWhenForced = false) 2191 : FunctionPass(ID), 2192 Impl({InterleaveOnlyWhenForced, VectorizeOnlyWhenForced}) { 2193 initializeLoopVectorizePass(*PassRegistry::getPassRegistry()); 2194 } 2195 2196 bool runOnFunction(Function &F) override { 2197 if (skipFunction(F)) 2198 return false; 2199 2200 auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE(); 2201 auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); 2202 auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); 2203 auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 2204 auto *BFI = &getAnalysis<BlockFrequencyInfoWrapperPass>().getBFI(); 2205 auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>(); 2206 auto *TLI = TLIP ? &TLIP->getTLI(F) : nullptr; 2207 auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); 2208 auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); 2209 auto *LAA = &getAnalysis<LoopAccessLegacyAnalysis>(); 2210 auto *DB = &getAnalysis<DemandedBitsWrapperPass>().getDemandedBits(); 2211 auto *ORE = &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE(); 2212 auto *PSI = &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI(); 2213 2214 std::function<const LoopAccessInfo &(Loop &)> GetLAA = 2215 [&](Loop &L) -> const LoopAccessInfo & { return LAA->getInfo(&L); }; 2216 2217 return Impl.runImpl(F, *SE, *LI, *TTI, *DT, *BFI, TLI, *DB, *AA, *AC, 2218 GetLAA, *ORE, PSI).MadeAnyChange; 2219 } 2220 2221 void getAnalysisUsage(AnalysisUsage &AU) const override { 2222 AU.addRequired<AssumptionCacheTracker>(); 2223 AU.addRequired<BlockFrequencyInfoWrapperPass>(); 2224 AU.addRequired<DominatorTreeWrapperPass>(); 2225 AU.addRequired<LoopInfoWrapperPass>(); 2226 AU.addRequired<ScalarEvolutionWrapperPass>(); 2227 AU.addRequired<TargetTransformInfoWrapperPass>(); 2228 AU.addRequired<AAResultsWrapperPass>(); 2229 AU.addRequired<LoopAccessLegacyAnalysis>(); 2230 AU.addRequired<DemandedBitsWrapperPass>(); 2231 AU.addRequired<OptimizationRemarkEmitterWrapperPass>(); 2232 AU.addRequired<InjectTLIMappingsLegacy>(); 2233 2234 // We currently do not preserve loopinfo/dominator analyses with outer loop 2235 // vectorization. Until this is addressed, mark these analyses as preserved 2236 // only for non-VPlan-native path. 2237 // TODO: Preserve Loop and Dominator analyses for VPlan-native path. 2238 if (!EnableVPlanNativePath) { 2239 AU.addPreserved<LoopInfoWrapperPass>(); 2240 AU.addPreserved<DominatorTreeWrapperPass>(); 2241 } 2242 2243 AU.addPreserved<BasicAAWrapperPass>(); 2244 AU.addPreserved<GlobalsAAWrapperPass>(); 2245 AU.addRequired<ProfileSummaryInfoWrapperPass>(); 2246 } 2247 }; 2248 2249 } // end anonymous namespace 2250 2251 //===----------------------------------------------------------------------===// 2252 // Implementation of LoopVectorizationLegality, InnerLoopVectorizer and 2253 // LoopVectorizationCostModel and LoopVectorizationPlanner. 2254 //===----------------------------------------------------------------------===// 2255 2256 Value *InnerLoopVectorizer::getBroadcastInstrs(Value *V) { 2257 // We need to place the broadcast of invariant variables outside the loop, 2258 // but only if it's proven safe to do so. Else, broadcast will be inside 2259 // vector loop body. 2260 Instruction *Instr = dyn_cast<Instruction>(V); 2261 bool SafeToHoist = OrigLoop->isLoopInvariant(V) && 2262 (!Instr || 2263 DT->dominates(Instr->getParent(), LoopVectorPreHeader)); 2264 // Place the code for broadcasting invariant variables in the new preheader. 2265 IRBuilder<>::InsertPointGuard Guard(Builder); 2266 if (SafeToHoist) 2267 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 2268 2269 // Broadcast the scalar into all locations in the vector. 2270 Value *Shuf = Builder.CreateVectorSplat(VF, V, "broadcast"); 2271 2272 return Shuf; 2273 } 2274 2275 void InnerLoopVectorizer::createVectorIntOrFpInductionPHI( 2276 const InductionDescriptor &II, Value *Step, Value *Start, 2277 Instruction *EntryVal, VPValue *Def, VPValue *CastDef, 2278 VPTransformState &State) { 2279 assert((isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) && 2280 "Expected either an induction phi-node or a truncate of it!"); 2281 2282 // Construct the initial value of the vector IV in the vector loop preheader 2283 auto CurrIP = Builder.saveIP(); 2284 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 2285 if (isa<TruncInst>(EntryVal)) { 2286 assert(Start->getType()->isIntegerTy() && 2287 "Truncation requires an integer type"); 2288 auto *TruncType = cast<IntegerType>(EntryVal->getType()); 2289 Step = Builder.CreateTrunc(Step, TruncType); 2290 Start = Builder.CreateCast(Instruction::Trunc, Start, TruncType); 2291 } 2292 Value *SplatStart = Builder.CreateVectorSplat(VF, Start); 2293 Value *SteppedStart = 2294 getStepVector(SplatStart, 0, Step, II.getInductionOpcode()); 2295 2296 // We create vector phi nodes for both integer and floating-point induction 2297 // variables. Here, we determine the kind of arithmetic we will perform. 2298 Instruction::BinaryOps AddOp; 2299 Instruction::BinaryOps MulOp; 2300 if (Step->getType()->isIntegerTy()) { 2301 AddOp = Instruction::Add; 2302 MulOp = Instruction::Mul; 2303 } else { 2304 AddOp = II.getInductionOpcode(); 2305 MulOp = Instruction::FMul; 2306 } 2307 2308 // Multiply the vectorization factor by the step using integer or 2309 // floating-point arithmetic as appropriate. 2310 Type *StepType = Step->getType(); 2311 if (Step->getType()->isFloatingPointTy()) 2312 StepType = IntegerType::get(StepType->getContext(), 2313 StepType->getScalarSizeInBits()); 2314 Value *RuntimeVF = getRuntimeVF(Builder, StepType, VF); 2315 if (Step->getType()->isFloatingPointTy()) 2316 RuntimeVF = Builder.CreateSIToFP(RuntimeVF, Step->getType()); 2317 Value *Mul = Builder.CreateBinOp(MulOp, Step, RuntimeVF); 2318 2319 // Create a vector splat to use in the induction update. 2320 // 2321 // FIXME: If the step is non-constant, we create the vector splat with 2322 // IRBuilder. IRBuilder can constant-fold the multiply, but it doesn't 2323 // handle a constant vector splat. 2324 Value *SplatVF = isa<Constant>(Mul) 2325 ? ConstantVector::getSplat(VF, cast<Constant>(Mul)) 2326 : Builder.CreateVectorSplat(VF, Mul); 2327 Builder.restoreIP(CurrIP); 2328 2329 // We may need to add the step a number of times, depending on the unroll 2330 // factor. The last of those goes into the PHI. 2331 PHINode *VecInd = PHINode::Create(SteppedStart->getType(), 2, "vec.ind", 2332 &*LoopVectorBody->getFirstInsertionPt()); 2333 VecInd->setDebugLoc(EntryVal->getDebugLoc()); 2334 Instruction *LastInduction = VecInd; 2335 for (unsigned Part = 0; Part < UF; ++Part) { 2336 State.set(Def, LastInduction, Part); 2337 2338 if (isa<TruncInst>(EntryVal)) 2339 addMetadata(LastInduction, EntryVal); 2340 recordVectorLoopValueForInductionCast(II, EntryVal, LastInduction, CastDef, 2341 State, Part); 2342 2343 LastInduction = cast<Instruction>( 2344 Builder.CreateBinOp(AddOp, LastInduction, SplatVF, "step.add")); 2345 LastInduction->setDebugLoc(EntryVal->getDebugLoc()); 2346 } 2347 2348 // Move the last step to the end of the latch block. This ensures consistent 2349 // placement of all induction updates. 2350 auto *LoopVectorLatch = LI->getLoopFor(LoopVectorBody)->getLoopLatch(); 2351 auto *Br = cast<BranchInst>(LoopVectorLatch->getTerminator()); 2352 auto *ICmp = cast<Instruction>(Br->getCondition()); 2353 LastInduction->moveBefore(ICmp); 2354 LastInduction->setName("vec.ind.next"); 2355 2356 VecInd->addIncoming(SteppedStart, LoopVectorPreHeader); 2357 VecInd->addIncoming(LastInduction, LoopVectorLatch); 2358 } 2359 2360 bool InnerLoopVectorizer::shouldScalarizeInstruction(Instruction *I) const { 2361 return Cost->isScalarAfterVectorization(I, VF) || 2362 Cost->isProfitableToScalarize(I, VF); 2363 } 2364 2365 bool InnerLoopVectorizer::needsScalarInduction(Instruction *IV) const { 2366 if (shouldScalarizeInstruction(IV)) 2367 return true; 2368 auto isScalarInst = [&](User *U) -> bool { 2369 auto *I = cast<Instruction>(U); 2370 return (OrigLoop->contains(I) && shouldScalarizeInstruction(I)); 2371 }; 2372 return llvm::any_of(IV->users(), isScalarInst); 2373 } 2374 2375 void InnerLoopVectorizer::recordVectorLoopValueForInductionCast( 2376 const InductionDescriptor &ID, const Instruction *EntryVal, 2377 Value *VectorLoopVal, VPValue *CastDef, VPTransformState &State, 2378 unsigned Part, unsigned Lane) { 2379 assert((isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) && 2380 "Expected either an induction phi-node or a truncate of it!"); 2381 2382 // This induction variable is not the phi from the original loop but the 2383 // newly-created IV based on the proof that casted Phi is equal to the 2384 // uncasted Phi in the vectorized loop (under a runtime guard possibly). It 2385 // re-uses the same InductionDescriptor that original IV uses but we don't 2386 // have to do any recording in this case - that is done when original IV is 2387 // processed. 2388 if (isa<TruncInst>(EntryVal)) 2389 return; 2390 2391 const SmallVectorImpl<Instruction *> &Casts = ID.getCastInsts(); 2392 if (Casts.empty()) 2393 return; 2394 // Only the first Cast instruction in the Casts vector is of interest. 2395 // The rest of the Casts (if exist) have no uses outside the 2396 // induction update chain itself. 2397 if (Lane < UINT_MAX) 2398 State.set(CastDef, VectorLoopVal, VPIteration(Part, Lane)); 2399 else 2400 State.set(CastDef, VectorLoopVal, Part); 2401 } 2402 2403 void InnerLoopVectorizer::widenIntOrFpInduction(PHINode *IV, Value *Start, 2404 TruncInst *Trunc, VPValue *Def, 2405 VPValue *CastDef, 2406 VPTransformState &State) { 2407 assert((IV->getType()->isIntegerTy() || IV != OldInduction) && 2408 "Primary induction variable must have an integer type"); 2409 2410 auto II = Legal->getInductionVars().find(IV); 2411 assert(II != Legal->getInductionVars().end() && "IV is not an induction"); 2412 2413 auto ID = II->second; 2414 assert(IV->getType() == ID.getStartValue()->getType() && "Types must match"); 2415 2416 // The value from the original loop to which we are mapping the new induction 2417 // variable. 2418 Instruction *EntryVal = Trunc ? cast<Instruction>(Trunc) : IV; 2419 2420 auto &DL = OrigLoop->getHeader()->getModule()->getDataLayout(); 2421 2422 // Generate code for the induction step. Note that induction steps are 2423 // required to be loop-invariant 2424 auto CreateStepValue = [&](const SCEV *Step) -> Value * { 2425 assert(PSE.getSE()->isLoopInvariant(Step, OrigLoop) && 2426 "Induction step should be loop invariant"); 2427 if (PSE.getSE()->isSCEVable(IV->getType())) { 2428 SCEVExpander Exp(*PSE.getSE(), DL, "induction"); 2429 return Exp.expandCodeFor(Step, Step->getType(), 2430 LoopVectorPreHeader->getTerminator()); 2431 } 2432 return cast<SCEVUnknown>(Step)->getValue(); 2433 }; 2434 2435 // The scalar value to broadcast. This is derived from the canonical 2436 // induction variable. If a truncation type is given, truncate the canonical 2437 // induction variable and step. Otherwise, derive these values from the 2438 // induction descriptor. 2439 auto CreateScalarIV = [&](Value *&Step) -> Value * { 2440 Value *ScalarIV = Induction; 2441 if (IV != OldInduction) { 2442 ScalarIV = IV->getType()->isIntegerTy() 2443 ? Builder.CreateSExtOrTrunc(Induction, IV->getType()) 2444 : Builder.CreateCast(Instruction::SIToFP, Induction, 2445 IV->getType()); 2446 ScalarIV = emitTransformedIndex(Builder, ScalarIV, PSE.getSE(), DL, ID); 2447 ScalarIV->setName("offset.idx"); 2448 } 2449 if (Trunc) { 2450 auto *TruncType = cast<IntegerType>(Trunc->getType()); 2451 assert(Step->getType()->isIntegerTy() && 2452 "Truncation requires an integer step"); 2453 ScalarIV = Builder.CreateTrunc(ScalarIV, TruncType); 2454 Step = Builder.CreateTrunc(Step, TruncType); 2455 } 2456 return ScalarIV; 2457 }; 2458 2459 // Create the vector values from the scalar IV, in the absence of creating a 2460 // vector IV. 2461 auto CreateSplatIV = [&](Value *ScalarIV, Value *Step) { 2462 Value *Broadcasted = getBroadcastInstrs(ScalarIV); 2463 for (unsigned Part = 0; Part < UF; ++Part) { 2464 assert(!VF.isScalable() && "scalable vectors not yet supported."); 2465 Value *EntryPart = 2466 getStepVector(Broadcasted, VF.getKnownMinValue() * Part, Step, 2467 ID.getInductionOpcode()); 2468 State.set(Def, EntryPart, Part); 2469 if (Trunc) 2470 addMetadata(EntryPart, Trunc); 2471 recordVectorLoopValueForInductionCast(ID, EntryVal, EntryPart, CastDef, 2472 State, Part); 2473 } 2474 }; 2475 2476 // Fast-math-flags propagate from the original induction instruction. 2477 IRBuilder<>::FastMathFlagGuard FMFG(Builder); 2478 if (ID.getInductionBinOp() && isa<FPMathOperator>(ID.getInductionBinOp())) 2479 Builder.setFastMathFlags(ID.getInductionBinOp()->getFastMathFlags()); 2480 2481 // Now do the actual transformations, and start with creating the step value. 2482 Value *Step = CreateStepValue(ID.getStep()); 2483 if (VF.isZero() || VF.isScalar()) { 2484 Value *ScalarIV = CreateScalarIV(Step); 2485 CreateSplatIV(ScalarIV, Step); 2486 return; 2487 } 2488 2489 // Determine if we want a scalar version of the induction variable. This is 2490 // true if the induction variable itself is not widened, or if it has at 2491 // least one user in the loop that is not widened. 2492 auto NeedsScalarIV = needsScalarInduction(EntryVal); 2493 if (!NeedsScalarIV) { 2494 createVectorIntOrFpInductionPHI(ID, Step, Start, EntryVal, Def, CastDef, 2495 State); 2496 return; 2497 } 2498 2499 // Try to create a new independent vector induction variable. If we can't 2500 // create the phi node, we will splat the scalar induction variable in each 2501 // loop iteration. 2502 if (!shouldScalarizeInstruction(EntryVal)) { 2503 createVectorIntOrFpInductionPHI(ID, Step, Start, EntryVal, Def, CastDef, 2504 State); 2505 Value *ScalarIV = CreateScalarIV(Step); 2506 // Create scalar steps that can be used by instructions we will later 2507 // scalarize. Note that the addition of the scalar steps will not increase 2508 // the number of instructions in the loop in the common case prior to 2509 // InstCombine. We will be trading one vector extract for each scalar step. 2510 buildScalarSteps(ScalarIV, Step, EntryVal, ID, Def, CastDef, State); 2511 return; 2512 } 2513 2514 // All IV users are scalar instructions, so only emit a scalar IV, not a 2515 // vectorised IV. Except when we tail-fold, then the splat IV feeds the 2516 // predicate used by the masked loads/stores. 2517 Value *ScalarIV = CreateScalarIV(Step); 2518 if (!Cost->isScalarEpilogueAllowed()) 2519 CreateSplatIV(ScalarIV, Step); 2520 buildScalarSteps(ScalarIV, Step, EntryVal, ID, Def, CastDef, State); 2521 } 2522 2523 Value *InnerLoopVectorizer::getStepVector(Value *Val, int StartIdx, Value *Step, 2524 Instruction::BinaryOps BinOp) { 2525 // Create and check the types. 2526 auto *ValVTy = cast<VectorType>(Val->getType()); 2527 ElementCount VLen = ValVTy->getElementCount(); 2528 2529 Type *STy = Val->getType()->getScalarType(); 2530 assert((STy->isIntegerTy() || STy->isFloatingPointTy()) && 2531 "Induction Step must be an integer or FP"); 2532 assert(Step->getType() == STy && "Step has wrong type"); 2533 2534 SmallVector<Constant *, 8> Indices; 2535 2536 // Create a vector of consecutive numbers from zero to VF. 2537 VectorType *InitVecValVTy = ValVTy; 2538 Type *InitVecValSTy = STy; 2539 if (STy->isFloatingPointTy()) { 2540 InitVecValSTy = 2541 IntegerType::get(STy->getContext(), STy->getScalarSizeInBits()); 2542 InitVecValVTy = VectorType::get(InitVecValSTy, VLen); 2543 } 2544 Value *InitVec = Builder.CreateStepVector(InitVecValVTy); 2545 2546 // Add on StartIdx 2547 Value *StartIdxSplat = Builder.CreateVectorSplat( 2548 VLen, ConstantInt::get(InitVecValSTy, StartIdx)); 2549 InitVec = Builder.CreateAdd(InitVec, StartIdxSplat); 2550 2551 if (STy->isIntegerTy()) { 2552 Step = Builder.CreateVectorSplat(VLen, Step); 2553 assert(Step->getType() == Val->getType() && "Invalid step vec"); 2554 // FIXME: The newly created binary instructions should contain nsw/nuw flags, 2555 // which can be found from the original scalar operations. 2556 Step = Builder.CreateMul(InitVec, Step); 2557 return Builder.CreateAdd(Val, Step, "induction"); 2558 } 2559 2560 // Floating point induction. 2561 assert((BinOp == Instruction::FAdd || BinOp == Instruction::FSub) && 2562 "Binary Opcode should be specified for FP induction"); 2563 InitVec = Builder.CreateUIToFP(InitVec, ValVTy); 2564 Step = Builder.CreateVectorSplat(VLen, Step); 2565 Value *MulOp = Builder.CreateFMul(InitVec, Step); 2566 return Builder.CreateBinOp(BinOp, Val, MulOp, "induction"); 2567 } 2568 2569 void InnerLoopVectorizer::buildScalarSteps(Value *ScalarIV, Value *Step, 2570 Instruction *EntryVal, 2571 const InductionDescriptor &ID, 2572 VPValue *Def, VPValue *CastDef, 2573 VPTransformState &State) { 2574 // We shouldn't have to build scalar steps if we aren't vectorizing. 2575 assert(VF.isVector() && "VF should be greater than one"); 2576 // Get the value type and ensure it and the step have the same integer type. 2577 Type *ScalarIVTy = ScalarIV->getType()->getScalarType(); 2578 assert(ScalarIVTy == Step->getType() && 2579 "Val and Step should have the same type"); 2580 2581 // We build scalar steps for both integer and floating-point induction 2582 // variables. Here, we determine the kind of arithmetic we will perform. 2583 Instruction::BinaryOps AddOp; 2584 Instruction::BinaryOps MulOp; 2585 if (ScalarIVTy->isIntegerTy()) { 2586 AddOp = Instruction::Add; 2587 MulOp = Instruction::Mul; 2588 } else { 2589 AddOp = ID.getInductionOpcode(); 2590 MulOp = Instruction::FMul; 2591 } 2592 2593 // Determine the number of scalars we need to generate for each unroll 2594 // iteration. If EntryVal is uniform, we only need to generate the first 2595 // lane. Otherwise, we generate all VF values. 2596 bool IsUniform = 2597 Cost->isUniformAfterVectorization(cast<Instruction>(EntryVal), VF); 2598 unsigned Lanes = IsUniform ? 1 : VF.getKnownMinValue(); 2599 // Compute the scalar steps and save the results in State. 2600 Type *IntStepTy = IntegerType::get(ScalarIVTy->getContext(), 2601 ScalarIVTy->getScalarSizeInBits()); 2602 Type *VecIVTy = nullptr; 2603 Value *UnitStepVec = nullptr, *SplatStep = nullptr, *SplatIV = nullptr; 2604 if (!IsUniform && VF.isScalable()) { 2605 VecIVTy = VectorType::get(ScalarIVTy, VF); 2606 UnitStepVec = Builder.CreateStepVector(VectorType::get(IntStepTy, VF)); 2607 SplatStep = Builder.CreateVectorSplat(VF, Step); 2608 SplatIV = Builder.CreateVectorSplat(VF, ScalarIV); 2609 } 2610 2611 for (unsigned Part = 0; Part < UF; ++Part) { 2612 Value *StartIdx0 = 2613 createStepForVF(Builder, ConstantInt::get(IntStepTy, Part), VF); 2614 2615 if (!IsUniform && VF.isScalable()) { 2616 auto *SplatStartIdx = Builder.CreateVectorSplat(VF, StartIdx0); 2617 auto *InitVec = Builder.CreateAdd(SplatStartIdx, UnitStepVec); 2618 if (ScalarIVTy->isFloatingPointTy()) 2619 InitVec = Builder.CreateSIToFP(InitVec, VecIVTy); 2620 auto *Mul = Builder.CreateBinOp(MulOp, InitVec, SplatStep); 2621 auto *Add = Builder.CreateBinOp(AddOp, SplatIV, Mul); 2622 State.set(Def, Add, Part); 2623 recordVectorLoopValueForInductionCast(ID, EntryVal, Add, CastDef, State, 2624 Part); 2625 // It's useful to record the lane values too for the known minimum number 2626 // of elements so we do those below. This improves the code quality when 2627 // trying to extract the first element, for example. 2628 } 2629 2630 if (ScalarIVTy->isFloatingPointTy()) 2631 StartIdx0 = Builder.CreateSIToFP(StartIdx0, ScalarIVTy); 2632 2633 for (unsigned Lane = 0; Lane < Lanes; ++Lane) { 2634 Value *StartIdx = Builder.CreateBinOp( 2635 AddOp, StartIdx0, getSignedIntOrFpConstant(ScalarIVTy, Lane)); 2636 // The step returned by `createStepForVF` is a runtime-evaluated value 2637 // when VF is scalable. Otherwise, it should be folded into a Constant. 2638 assert((VF.isScalable() || isa<Constant>(StartIdx)) && 2639 "Expected StartIdx to be folded to a constant when VF is not " 2640 "scalable"); 2641 auto *Mul = Builder.CreateBinOp(MulOp, StartIdx, Step); 2642 auto *Add = Builder.CreateBinOp(AddOp, ScalarIV, Mul); 2643 State.set(Def, Add, VPIteration(Part, Lane)); 2644 recordVectorLoopValueForInductionCast(ID, EntryVal, Add, CastDef, State, 2645 Part, Lane); 2646 } 2647 } 2648 } 2649 2650 void InnerLoopVectorizer::packScalarIntoVectorValue(VPValue *Def, 2651 const VPIteration &Instance, 2652 VPTransformState &State) { 2653 Value *ScalarInst = State.get(Def, Instance); 2654 Value *VectorValue = State.get(Def, Instance.Part); 2655 VectorValue = Builder.CreateInsertElement( 2656 VectorValue, ScalarInst, 2657 Instance.Lane.getAsRuntimeExpr(State.Builder, VF)); 2658 State.set(Def, VectorValue, Instance.Part); 2659 } 2660 2661 Value *InnerLoopVectorizer::reverseVector(Value *Vec) { 2662 assert(Vec->getType()->isVectorTy() && "Invalid type"); 2663 return Builder.CreateVectorReverse(Vec, "reverse"); 2664 } 2665 2666 // Return whether we allow using masked interleave-groups (for dealing with 2667 // strided loads/stores that reside in predicated blocks, or for dealing 2668 // with gaps). 2669 static bool useMaskedInterleavedAccesses(const TargetTransformInfo &TTI) { 2670 // If an override option has been passed in for interleaved accesses, use it. 2671 if (EnableMaskedInterleavedMemAccesses.getNumOccurrences() > 0) 2672 return EnableMaskedInterleavedMemAccesses; 2673 2674 return TTI.enableMaskedInterleavedAccessVectorization(); 2675 } 2676 2677 // Try to vectorize the interleave group that \p Instr belongs to. 2678 // 2679 // E.g. Translate following interleaved load group (factor = 3): 2680 // for (i = 0; i < N; i+=3) { 2681 // R = Pic[i]; // Member of index 0 2682 // G = Pic[i+1]; // Member of index 1 2683 // B = Pic[i+2]; // Member of index 2 2684 // ... // do something to R, G, B 2685 // } 2686 // To: 2687 // %wide.vec = load <12 x i32> ; Read 4 tuples of R,G,B 2688 // %R.vec = shuffle %wide.vec, poison, <0, 3, 6, 9> ; R elements 2689 // %G.vec = shuffle %wide.vec, poison, <1, 4, 7, 10> ; G elements 2690 // %B.vec = shuffle %wide.vec, poison, <2, 5, 8, 11> ; B elements 2691 // 2692 // Or translate following interleaved store group (factor = 3): 2693 // for (i = 0; i < N; i+=3) { 2694 // ... do something to R, G, B 2695 // Pic[i] = R; // Member of index 0 2696 // Pic[i+1] = G; // Member of index 1 2697 // Pic[i+2] = B; // Member of index 2 2698 // } 2699 // To: 2700 // %R_G.vec = shuffle %R.vec, %G.vec, <0, 1, 2, ..., 7> 2701 // %B_U.vec = shuffle %B.vec, poison, <0, 1, 2, 3, u, u, u, u> 2702 // %interleaved.vec = shuffle %R_G.vec, %B_U.vec, 2703 // <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11> ; Interleave R,G,B elements 2704 // store <12 x i32> %interleaved.vec ; Write 4 tuples of R,G,B 2705 void InnerLoopVectorizer::vectorizeInterleaveGroup( 2706 const InterleaveGroup<Instruction> *Group, ArrayRef<VPValue *> VPDefs, 2707 VPTransformState &State, VPValue *Addr, ArrayRef<VPValue *> StoredValues, 2708 VPValue *BlockInMask) { 2709 Instruction *Instr = Group->getInsertPos(); 2710 const DataLayout &DL = Instr->getModule()->getDataLayout(); 2711 2712 // Prepare for the vector type of the interleaved load/store. 2713 Type *ScalarTy = getLoadStoreType(Instr); 2714 unsigned InterleaveFactor = Group->getFactor(); 2715 assert(!VF.isScalable() && "scalable vectors not yet supported."); 2716 auto *VecTy = VectorType::get(ScalarTy, VF * InterleaveFactor); 2717 2718 // Prepare for the new pointers. 2719 SmallVector<Value *, 2> AddrParts; 2720 unsigned Index = Group->getIndex(Instr); 2721 2722 // TODO: extend the masked interleaved-group support to reversed access. 2723 assert((!BlockInMask || !Group->isReverse()) && 2724 "Reversed masked interleave-group not supported."); 2725 2726 // If the group is reverse, adjust the index to refer to the last vector lane 2727 // instead of the first. We adjust the index from the first vector lane, 2728 // rather than directly getting the pointer for lane VF - 1, because the 2729 // pointer operand of the interleaved access is supposed to be uniform. For 2730 // uniform instructions, we're only required to generate a value for the 2731 // first vector lane in each unroll iteration. 2732 if (Group->isReverse()) 2733 Index += (VF.getKnownMinValue() - 1) * Group->getFactor(); 2734 2735 for (unsigned Part = 0; Part < UF; Part++) { 2736 Value *AddrPart = State.get(Addr, VPIteration(Part, 0)); 2737 setDebugLocFromInst(AddrPart); 2738 2739 // Notice current instruction could be any index. Need to adjust the address 2740 // to the member of index 0. 2741 // 2742 // E.g. a = A[i+1]; // Member of index 1 (Current instruction) 2743 // b = A[i]; // Member of index 0 2744 // Current pointer is pointed to A[i+1], adjust it to A[i]. 2745 // 2746 // E.g. A[i+1] = a; // Member of index 1 2747 // A[i] = b; // Member of index 0 2748 // A[i+2] = c; // Member of index 2 (Current instruction) 2749 // Current pointer is pointed to A[i+2], adjust it to A[i]. 2750 2751 bool InBounds = false; 2752 if (auto *gep = dyn_cast<GetElementPtrInst>(AddrPart->stripPointerCasts())) 2753 InBounds = gep->isInBounds(); 2754 AddrPart = Builder.CreateGEP(ScalarTy, AddrPart, Builder.getInt32(-Index)); 2755 cast<GetElementPtrInst>(AddrPart)->setIsInBounds(InBounds); 2756 2757 // Cast to the vector pointer type. 2758 unsigned AddressSpace = AddrPart->getType()->getPointerAddressSpace(); 2759 Type *PtrTy = VecTy->getPointerTo(AddressSpace); 2760 AddrParts.push_back(Builder.CreateBitCast(AddrPart, PtrTy)); 2761 } 2762 2763 setDebugLocFromInst(Instr); 2764 Value *PoisonVec = PoisonValue::get(VecTy); 2765 2766 Value *MaskForGaps = nullptr; 2767 if (Group->requiresScalarEpilogue() && !Cost->isScalarEpilogueAllowed()) { 2768 MaskForGaps = createBitMaskForGaps(Builder, VF.getKnownMinValue(), *Group); 2769 assert(MaskForGaps && "Mask for Gaps is required but it is null"); 2770 } 2771 2772 // Vectorize the interleaved load group. 2773 if (isa<LoadInst>(Instr)) { 2774 // For each unroll part, create a wide load for the group. 2775 SmallVector<Value *, 2> NewLoads; 2776 for (unsigned Part = 0; Part < UF; Part++) { 2777 Instruction *NewLoad; 2778 if (BlockInMask || MaskForGaps) { 2779 assert(useMaskedInterleavedAccesses(*TTI) && 2780 "masked interleaved groups are not allowed."); 2781 Value *GroupMask = MaskForGaps; 2782 if (BlockInMask) { 2783 Value *BlockInMaskPart = State.get(BlockInMask, Part); 2784 Value *ShuffledMask = Builder.CreateShuffleVector( 2785 BlockInMaskPart, 2786 createReplicatedMask(InterleaveFactor, VF.getKnownMinValue()), 2787 "interleaved.mask"); 2788 GroupMask = MaskForGaps 2789 ? Builder.CreateBinOp(Instruction::And, ShuffledMask, 2790 MaskForGaps) 2791 : ShuffledMask; 2792 } 2793 NewLoad = 2794 Builder.CreateMaskedLoad(VecTy, AddrParts[Part], Group->getAlign(), 2795 GroupMask, PoisonVec, "wide.masked.vec"); 2796 } 2797 else 2798 NewLoad = Builder.CreateAlignedLoad(VecTy, AddrParts[Part], 2799 Group->getAlign(), "wide.vec"); 2800 Group->addMetadata(NewLoad); 2801 NewLoads.push_back(NewLoad); 2802 } 2803 2804 // For each member in the group, shuffle out the appropriate data from the 2805 // wide loads. 2806 unsigned J = 0; 2807 for (unsigned I = 0; I < InterleaveFactor; ++I) { 2808 Instruction *Member = Group->getMember(I); 2809 2810 // Skip the gaps in the group. 2811 if (!Member) 2812 continue; 2813 2814 auto StrideMask = 2815 createStrideMask(I, InterleaveFactor, VF.getKnownMinValue()); 2816 for (unsigned Part = 0; Part < UF; Part++) { 2817 Value *StridedVec = Builder.CreateShuffleVector( 2818 NewLoads[Part], StrideMask, "strided.vec"); 2819 2820 // If this member has different type, cast the result type. 2821 if (Member->getType() != ScalarTy) { 2822 assert(!VF.isScalable() && "VF is assumed to be non scalable."); 2823 VectorType *OtherVTy = VectorType::get(Member->getType(), VF); 2824 StridedVec = createBitOrPointerCast(StridedVec, OtherVTy, DL); 2825 } 2826 2827 if (Group->isReverse()) 2828 StridedVec = reverseVector(StridedVec); 2829 2830 State.set(VPDefs[J], StridedVec, Part); 2831 } 2832 ++J; 2833 } 2834 return; 2835 } 2836 2837 // The sub vector type for current instruction. 2838 auto *SubVT = VectorType::get(ScalarTy, VF); 2839 2840 // Vectorize the interleaved store group. 2841 for (unsigned Part = 0; Part < UF; Part++) { 2842 // Collect the stored vector from each member. 2843 SmallVector<Value *, 4> StoredVecs; 2844 for (unsigned i = 0; i < InterleaveFactor; i++) { 2845 // Interleaved store group doesn't allow a gap, so each index has a member 2846 assert(Group->getMember(i) && "Fail to get a member from an interleaved store group"); 2847 2848 Value *StoredVec = State.get(StoredValues[i], Part); 2849 2850 if (Group->isReverse()) 2851 StoredVec = reverseVector(StoredVec); 2852 2853 // If this member has different type, cast it to a unified type. 2854 2855 if (StoredVec->getType() != SubVT) 2856 StoredVec = createBitOrPointerCast(StoredVec, SubVT, DL); 2857 2858 StoredVecs.push_back(StoredVec); 2859 } 2860 2861 // Concatenate all vectors into a wide vector. 2862 Value *WideVec = concatenateVectors(Builder, StoredVecs); 2863 2864 // Interleave the elements in the wide vector. 2865 Value *IVec = Builder.CreateShuffleVector( 2866 WideVec, createInterleaveMask(VF.getKnownMinValue(), InterleaveFactor), 2867 "interleaved.vec"); 2868 2869 Instruction *NewStoreInstr; 2870 if (BlockInMask) { 2871 Value *BlockInMaskPart = State.get(BlockInMask, Part); 2872 Value *ShuffledMask = Builder.CreateShuffleVector( 2873 BlockInMaskPart, 2874 createReplicatedMask(InterleaveFactor, VF.getKnownMinValue()), 2875 "interleaved.mask"); 2876 NewStoreInstr = Builder.CreateMaskedStore( 2877 IVec, AddrParts[Part], Group->getAlign(), ShuffledMask); 2878 } 2879 else 2880 NewStoreInstr = 2881 Builder.CreateAlignedStore(IVec, AddrParts[Part], Group->getAlign()); 2882 2883 Group->addMetadata(NewStoreInstr); 2884 } 2885 } 2886 2887 void InnerLoopVectorizer::vectorizeMemoryInstruction( 2888 Instruction *Instr, VPTransformState &State, VPValue *Def, VPValue *Addr, 2889 VPValue *StoredValue, VPValue *BlockInMask) { 2890 // Attempt to issue a wide load. 2891 LoadInst *LI = dyn_cast<LoadInst>(Instr); 2892 StoreInst *SI = dyn_cast<StoreInst>(Instr); 2893 2894 assert((LI || SI) && "Invalid Load/Store instruction"); 2895 assert((!SI || StoredValue) && "No stored value provided for widened store"); 2896 assert((!LI || !StoredValue) && "Stored value provided for widened load"); 2897 2898 LoopVectorizationCostModel::InstWidening Decision = 2899 Cost->getWideningDecision(Instr, VF); 2900 assert((Decision == LoopVectorizationCostModel::CM_Widen || 2901 Decision == LoopVectorizationCostModel::CM_Widen_Reverse || 2902 Decision == LoopVectorizationCostModel::CM_GatherScatter) && 2903 "CM decision is not to widen the memory instruction"); 2904 2905 Type *ScalarDataTy = getLoadStoreType(Instr); 2906 2907 auto *DataTy = VectorType::get(ScalarDataTy, VF); 2908 const Align Alignment = getLoadStoreAlignment(Instr); 2909 2910 // Determine if the pointer operand of the access is either consecutive or 2911 // reverse consecutive. 2912 bool Reverse = (Decision == LoopVectorizationCostModel::CM_Widen_Reverse); 2913 bool ConsecutiveStride = 2914 Reverse || (Decision == LoopVectorizationCostModel::CM_Widen); 2915 bool CreateGatherScatter = 2916 (Decision == LoopVectorizationCostModel::CM_GatherScatter); 2917 2918 // Either Ptr feeds a vector load/store, or a vector GEP should feed a vector 2919 // gather/scatter. Otherwise Decision should have been to Scalarize. 2920 assert((ConsecutiveStride || CreateGatherScatter) && 2921 "The instruction should be scalarized"); 2922 (void)ConsecutiveStride; 2923 2924 VectorParts BlockInMaskParts(UF); 2925 bool isMaskRequired = BlockInMask; 2926 if (isMaskRequired) 2927 for (unsigned Part = 0; Part < UF; ++Part) 2928 BlockInMaskParts[Part] = State.get(BlockInMask, Part); 2929 2930 const auto CreateVecPtr = [&](unsigned Part, Value *Ptr) -> Value * { 2931 // Calculate the pointer for the specific unroll-part. 2932 GetElementPtrInst *PartPtr = nullptr; 2933 2934 bool InBounds = false; 2935 if (auto *gep = dyn_cast<GetElementPtrInst>(Ptr->stripPointerCasts())) 2936 InBounds = gep->isInBounds(); 2937 if (Reverse) { 2938 // If the address is consecutive but reversed, then the 2939 // wide store needs to start at the last vector element. 2940 // RunTimeVF = VScale * VF.getKnownMinValue() 2941 // For fixed-width VScale is 1, then RunTimeVF = VF.getKnownMinValue() 2942 Value *RunTimeVF = getRuntimeVF(Builder, Builder.getInt32Ty(), VF); 2943 // NumElt = -Part * RunTimeVF 2944 Value *NumElt = Builder.CreateMul(Builder.getInt32(-Part), RunTimeVF); 2945 // LastLane = 1 - RunTimeVF 2946 Value *LastLane = Builder.CreateSub(Builder.getInt32(1), RunTimeVF); 2947 PartPtr = 2948 cast<GetElementPtrInst>(Builder.CreateGEP(ScalarDataTy, Ptr, NumElt)); 2949 PartPtr->setIsInBounds(InBounds); 2950 PartPtr = cast<GetElementPtrInst>( 2951 Builder.CreateGEP(ScalarDataTy, PartPtr, LastLane)); 2952 PartPtr->setIsInBounds(InBounds); 2953 if (isMaskRequired) // Reverse of a null all-one mask is a null mask. 2954 BlockInMaskParts[Part] = reverseVector(BlockInMaskParts[Part]); 2955 } else { 2956 Value *Increment = createStepForVF(Builder, Builder.getInt32(Part), VF); 2957 PartPtr = cast<GetElementPtrInst>( 2958 Builder.CreateGEP(ScalarDataTy, Ptr, Increment)); 2959 PartPtr->setIsInBounds(InBounds); 2960 } 2961 2962 unsigned AddressSpace = Ptr->getType()->getPointerAddressSpace(); 2963 return Builder.CreateBitCast(PartPtr, DataTy->getPointerTo(AddressSpace)); 2964 }; 2965 2966 // Handle Stores: 2967 if (SI) { 2968 setDebugLocFromInst(SI); 2969 2970 for (unsigned Part = 0; Part < UF; ++Part) { 2971 Instruction *NewSI = nullptr; 2972 Value *StoredVal = State.get(StoredValue, Part); 2973 if (CreateGatherScatter) { 2974 Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr; 2975 Value *VectorGep = State.get(Addr, Part); 2976 NewSI = Builder.CreateMaskedScatter(StoredVal, VectorGep, Alignment, 2977 MaskPart); 2978 } else { 2979 if (Reverse) { 2980 // If we store to reverse consecutive memory locations, then we need 2981 // to reverse the order of elements in the stored value. 2982 StoredVal = reverseVector(StoredVal); 2983 // We don't want to update the value in the map as it might be used in 2984 // another expression. So don't call resetVectorValue(StoredVal). 2985 } 2986 auto *VecPtr = CreateVecPtr(Part, State.get(Addr, VPIteration(0, 0))); 2987 if (isMaskRequired) 2988 NewSI = Builder.CreateMaskedStore(StoredVal, VecPtr, Alignment, 2989 BlockInMaskParts[Part]); 2990 else 2991 NewSI = Builder.CreateAlignedStore(StoredVal, VecPtr, Alignment); 2992 } 2993 addMetadata(NewSI, SI); 2994 } 2995 return; 2996 } 2997 2998 // Handle loads. 2999 assert(LI && "Must have a load instruction"); 3000 setDebugLocFromInst(LI); 3001 for (unsigned Part = 0; Part < UF; ++Part) { 3002 Value *NewLI; 3003 if (CreateGatherScatter) { 3004 Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr; 3005 Value *VectorGep = State.get(Addr, Part); 3006 NewLI = Builder.CreateMaskedGather(DataTy, VectorGep, Alignment, MaskPart, 3007 nullptr, "wide.masked.gather"); 3008 addMetadata(NewLI, LI); 3009 } else { 3010 auto *VecPtr = CreateVecPtr(Part, State.get(Addr, VPIteration(0, 0))); 3011 if (isMaskRequired) 3012 NewLI = Builder.CreateMaskedLoad( 3013 DataTy, VecPtr, Alignment, BlockInMaskParts[Part], 3014 PoisonValue::get(DataTy), "wide.masked.load"); 3015 else 3016 NewLI = 3017 Builder.CreateAlignedLoad(DataTy, VecPtr, Alignment, "wide.load"); 3018 3019 // Add metadata to the load, but setVectorValue to the reverse shuffle. 3020 addMetadata(NewLI, LI); 3021 if (Reverse) 3022 NewLI = reverseVector(NewLI); 3023 } 3024 3025 State.set(Def, NewLI, Part); 3026 } 3027 } 3028 3029 void InnerLoopVectorizer::scalarizeInstruction(Instruction *Instr, VPValue *Def, 3030 VPUser &User, 3031 const VPIteration &Instance, 3032 bool IfPredicateInstr, 3033 VPTransformState &State) { 3034 assert(!Instr->getType()->isAggregateType() && "Can't handle vectors"); 3035 3036 // llvm.experimental.noalias.scope.decl intrinsics must only be duplicated for 3037 // the first lane and part. 3038 if (isa<NoAliasScopeDeclInst>(Instr)) 3039 if (!Instance.isFirstIteration()) 3040 return; 3041 3042 setDebugLocFromInst(Instr); 3043 3044 // Does this instruction return a value ? 3045 bool IsVoidRetTy = Instr->getType()->isVoidTy(); 3046 3047 Instruction *Cloned = Instr->clone(); 3048 if (!IsVoidRetTy) 3049 Cloned->setName(Instr->getName() + ".cloned"); 3050 3051 State.Builder.SetInsertPoint(Builder.GetInsertBlock(), 3052 Builder.GetInsertPoint()); 3053 // Replace the operands of the cloned instructions with their scalar 3054 // equivalents in the new loop. 3055 for (unsigned op = 0, e = User.getNumOperands(); op != e; ++op) { 3056 auto *Operand = dyn_cast<Instruction>(Instr->getOperand(op)); 3057 auto InputInstance = Instance; 3058 if (!Operand || !OrigLoop->contains(Operand) || 3059 (Cost->isUniformAfterVectorization(Operand, State.VF))) 3060 InputInstance.Lane = VPLane::getFirstLane(); 3061 auto *NewOp = State.get(User.getOperand(op), InputInstance); 3062 Cloned->setOperand(op, NewOp); 3063 } 3064 addNewMetadata(Cloned, Instr); 3065 3066 // Place the cloned scalar in the new loop. 3067 Builder.Insert(Cloned); 3068 3069 State.set(Def, Cloned, Instance); 3070 3071 // If we just cloned a new assumption, add it the assumption cache. 3072 if (auto *II = dyn_cast<AssumeInst>(Cloned)) 3073 AC->registerAssumption(II); 3074 3075 // End if-block. 3076 if (IfPredicateInstr) 3077 PredicatedInstructions.push_back(Cloned); 3078 } 3079 3080 PHINode *InnerLoopVectorizer::createInductionVariable(Loop *L, Value *Start, 3081 Value *End, Value *Step, 3082 Instruction *DL) { 3083 BasicBlock *Header = L->getHeader(); 3084 BasicBlock *Latch = L->getLoopLatch(); 3085 // As we're just creating this loop, it's possible no latch exists 3086 // yet. If so, use the header as this will be a single block loop. 3087 if (!Latch) 3088 Latch = Header; 3089 3090 IRBuilder<> B(&*Header->getFirstInsertionPt()); 3091 Instruction *OldInst = getDebugLocFromInstOrOperands(OldInduction); 3092 setDebugLocFromInst(OldInst, &B); 3093 auto *Induction = B.CreatePHI(Start->getType(), 2, "index"); 3094 3095 B.SetInsertPoint(Latch->getTerminator()); 3096 setDebugLocFromInst(OldInst, &B); 3097 3098 // Create i+1 and fill the PHINode. 3099 // 3100 // If the tail is not folded, we know that End - Start >= Step (either 3101 // statically or through the minimum iteration checks). We also know that both 3102 // Start % Step == 0 and End % Step == 0. We exit the vector loop if %IV + 3103 // %Step == %End. Hence we must exit the loop before %IV + %Step unsigned 3104 // overflows and we can mark the induction increment as NUW. 3105 Value *Next = B.CreateAdd(Induction, Step, "index.next", 3106 /*NUW=*/!Cost->foldTailByMasking(), /*NSW=*/false); 3107 Induction->addIncoming(Start, L->getLoopPreheader()); 3108 Induction->addIncoming(Next, Latch); 3109 // Create the compare. 3110 Value *ICmp = B.CreateICmpEQ(Next, End); 3111 B.CreateCondBr(ICmp, L->getUniqueExitBlock(), Header); 3112 3113 // Now we have two terminators. Remove the old one from the block. 3114 Latch->getTerminator()->eraseFromParent(); 3115 3116 return Induction; 3117 } 3118 3119 Value *InnerLoopVectorizer::getOrCreateTripCount(Loop *L) { 3120 if (TripCount) 3121 return TripCount; 3122 3123 assert(L && "Create Trip Count for null loop."); 3124 IRBuilder<> Builder(L->getLoopPreheader()->getTerminator()); 3125 // Find the loop boundaries. 3126 ScalarEvolution *SE = PSE.getSE(); 3127 const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount(); 3128 assert(!isa<SCEVCouldNotCompute>(BackedgeTakenCount) && 3129 "Invalid loop count"); 3130 3131 Type *IdxTy = Legal->getWidestInductionType(); 3132 assert(IdxTy && "No type for induction"); 3133 3134 // The exit count might have the type of i64 while the phi is i32. This can 3135 // happen if we have an induction variable that is sign extended before the 3136 // compare. The only way that we get a backedge taken count is that the 3137 // induction variable was signed and as such will not overflow. In such a case 3138 // truncation is legal. 3139 if (SE->getTypeSizeInBits(BackedgeTakenCount->getType()) > 3140 IdxTy->getPrimitiveSizeInBits()) 3141 BackedgeTakenCount = SE->getTruncateOrNoop(BackedgeTakenCount, IdxTy); 3142 BackedgeTakenCount = SE->getNoopOrZeroExtend(BackedgeTakenCount, IdxTy); 3143 3144 // Get the total trip count from the count by adding 1. 3145 const SCEV *ExitCount = SE->getAddExpr( 3146 BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType())); 3147 3148 const DataLayout &DL = L->getHeader()->getModule()->getDataLayout(); 3149 3150 // Expand the trip count and place the new instructions in the preheader. 3151 // Notice that the pre-header does not change, only the loop body. 3152 SCEVExpander Exp(*SE, DL, "induction"); 3153 3154 // Count holds the overall loop count (N). 3155 TripCount = Exp.expandCodeFor(ExitCount, ExitCount->getType(), 3156 L->getLoopPreheader()->getTerminator()); 3157 3158 if (TripCount->getType()->isPointerTy()) 3159 TripCount = 3160 CastInst::CreatePointerCast(TripCount, IdxTy, "exitcount.ptrcnt.to.int", 3161 L->getLoopPreheader()->getTerminator()); 3162 3163 return TripCount; 3164 } 3165 3166 Value *InnerLoopVectorizer::getOrCreateVectorTripCount(Loop *L) { 3167 if (VectorTripCount) 3168 return VectorTripCount; 3169 3170 Value *TC = getOrCreateTripCount(L); 3171 IRBuilder<> Builder(L->getLoopPreheader()->getTerminator()); 3172 3173 Type *Ty = TC->getType(); 3174 // This is where we can make the step a runtime constant. 3175 Value *Step = createStepForVF(Builder, ConstantInt::get(Ty, UF), VF); 3176 3177 // If the tail is to be folded by masking, round the number of iterations N 3178 // up to a multiple of Step instead of rounding down. This is done by first 3179 // adding Step-1 and then rounding down. Note that it's ok if this addition 3180 // overflows: the vector induction variable will eventually wrap to zero given 3181 // that it starts at zero and its Step is a power of two; the loop will then 3182 // exit, with the last early-exit vector comparison also producing all-true. 3183 if (Cost->foldTailByMasking()) { 3184 assert(isPowerOf2_32(VF.getKnownMinValue() * UF) && 3185 "VF*UF must be a power of 2 when folding tail by masking"); 3186 assert(!VF.isScalable() && 3187 "Tail folding not yet supported for scalable vectors"); 3188 TC = Builder.CreateAdd( 3189 TC, ConstantInt::get(Ty, VF.getKnownMinValue() * UF - 1), "n.rnd.up"); 3190 } 3191 3192 // Now we need to generate the expression for the part of the loop that the 3193 // vectorized body will execute. This is equal to N - (N % Step) if scalar 3194 // iterations are not required for correctness, or N - Step, otherwise. Step 3195 // is equal to the vectorization factor (number of SIMD elements) times the 3196 // unroll factor (number of SIMD instructions). 3197 Value *R = Builder.CreateURem(TC, Step, "n.mod.vf"); 3198 3199 // There are cases where we *must* run at least one iteration in the remainder 3200 // loop. See the cost model for when this can happen. If the step evenly 3201 // divides the trip count, we set the remainder to be equal to the step. If 3202 // the step does not evenly divide the trip count, no adjustment is necessary 3203 // since there will already be scalar iterations. Note that the minimum 3204 // iterations check ensures that N >= Step. 3205 if (Cost->requiresScalarEpilogue(VF)) { 3206 auto *IsZero = Builder.CreateICmpEQ(R, ConstantInt::get(R->getType(), 0)); 3207 R = Builder.CreateSelect(IsZero, Step, R); 3208 } 3209 3210 VectorTripCount = Builder.CreateSub(TC, R, "n.vec"); 3211 3212 return VectorTripCount; 3213 } 3214 3215 Value *InnerLoopVectorizer::createBitOrPointerCast(Value *V, VectorType *DstVTy, 3216 const DataLayout &DL) { 3217 // Verify that V is a vector type with same number of elements as DstVTy. 3218 auto *DstFVTy = cast<FixedVectorType>(DstVTy); 3219 unsigned VF = DstFVTy->getNumElements(); 3220 auto *SrcVecTy = cast<FixedVectorType>(V->getType()); 3221 assert((VF == SrcVecTy->getNumElements()) && "Vector dimensions do not match"); 3222 Type *SrcElemTy = SrcVecTy->getElementType(); 3223 Type *DstElemTy = DstFVTy->getElementType(); 3224 assert((DL.getTypeSizeInBits(SrcElemTy) == DL.getTypeSizeInBits(DstElemTy)) && 3225 "Vector elements must have same size"); 3226 3227 // Do a direct cast if element types are castable. 3228 if (CastInst::isBitOrNoopPointerCastable(SrcElemTy, DstElemTy, DL)) { 3229 return Builder.CreateBitOrPointerCast(V, DstFVTy); 3230 } 3231 // V cannot be directly casted to desired vector type. 3232 // May happen when V is a floating point vector but DstVTy is a vector of 3233 // pointers or vice-versa. Handle this using a two-step bitcast using an 3234 // intermediate Integer type for the bitcast i.e. Ptr <-> Int <-> Float. 3235 assert((DstElemTy->isPointerTy() != SrcElemTy->isPointerTy()) && 3236 "Only one type should be a pointer type"); 3237 assert((DstElemTy->isFloatingPointTy() != SrcElemTy->isFloatingPointTy()) && 3238 "Only one type should be a floating point type"); 3239 Type *IntTy = 3240 IntegerType::getIntNTy(V->getContext(), DL.getTypeSizeInBits(SrcElemTy)); 3241 auto *VecIntTy = FixedVectorType::get(IntTy, VF); 3242 Value *CastVal = Builder.CreateBitOrPointerCast(V, VecIntTy); 3243 return Builder.CreateBitOrPointerCast(CastVal, DstFVTy); 3244 } 3245 3246 void InnerLoopVectorizer::emitMinimumIterationCountCheck(Loop *L, 3247 BasicBlock *Bypass) { 3248 Value *Count = getOrCreateTripCount(L); 3249 // Reuse existing vector loop preheader for TC checks. 3250 // Note that new preheader block is generated for vector loop. 3251 BasicBlock *const TCCheckBlock = LoopVectorPreHeader; 3252 IRBuilder<> Builder(TCCheckBlock->getTerminator()); 3253 3254 // Generate code to check if the loop's trip count is less than VF * UF, or 3255 // equal to it in case a scalar epilogue is required; this implies that the 3256 // vector trip count is zero. This check also covers the case where adding one 3257 // to the backedge-taken count overflowed leading to an incorrect trip count 3258 // of zero. In this case we will also jump to the scalar loop. 3259 auto P = Cost->requiresScalarEpilogue(VF) ? ICmpInst::ICMP_ULE 3260 : ICmpInst::ICMP_ULT; 3261 3262 // If tail is to be folded, vector loop takes care of all iterations. 3263 Value *CheckMinIters = Builder.getFalse(); 3264 if (!Cost->foldTailByMasking()) { 3265 Value *Step = 3266 createStepForVF(Builder, ConstantInt::get(Count->getType(), UF), VF); 3267 CheckMinIters = Builder.CreateICmp(P, Count, Step, "min.iters.check"); 3268 } 3269 // Create new preheader for vector loop. 3270 LoopVectorPreHeader = 3271 SplitBlock(TCCheckBlock, TCCheckBlock->getTerminator(), DT, LI, nullptr, 3272 "vector.ph"); 3273 3274 assert(DT->properlyDominates(DT->getNode(TCCheckBlock), 3275 DT->getNode(Bypass)->getIDom()) && 3276 "TC check is expected to dominate Bypass"); 3277 3278 // Update dominator for Bypass & LoopExit (if needed). 3279 DT->changeImmediateDominator(Bypass, TCCheckBlock); 3280 if (!Cost->requiresScalarEpilogue(VF)) 3281 // If there is an epilogue which must run, there's no edge from the 3282 // middle block to exit blocks and thus no need to update the immediate 3283 // dominator of the exit blocks. 3284 DT->changeImmediateDominator(LoopExitBlock, TCCheckBlock); 3285 3286 ReplaceInstWithInst( 3287 TCCheckBlock->getTerminator(), 3288 BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters)); 3289 LoopBypassBlocks.push_back(TCCheckBlock); 3290 } 3291 3292 BasicBlock *InnerLoopVectorizer::emitSCEVChecks(Loop *L, BasicBlock *Bypass) { 3293 3294 BasicBlock *const SCEVCheckBlock = 3295 RTChecks.emitSCEVChecks(L, Bypass, LoopVectorPreHeader, LoopExitBlock); 3296 if (!SCEVCheckBlock) 3297 return nullptr; 3298 3299 assert(!(SCEVCheckBlock->getParent()->hasOptSize() || 3300 (OptForSizeBasedOnProfile && 3301 Cost->Hints->getForce() != LoopVectorizeHints::FK_Enabled)) && 3302 "Cannot SCEV check stride or overflow when optimizing for size"); 3303 3304 3305 // Update dominator only if this is first RT check. 3306 if (LoopBypassBlocks.empty()) { 3307 DT->changeImmediateDominator(Bypass, SCEVCheckBlock); 3308 if (!Cost->requiresScalarEpilogue(VF)) 3309 // If there is an epilogue which must run, there's no edge from the 3310 // middle block to exit blocks and thus no need to update the immediate 3311 // dominator of the exit blocks. 3312 DT->changeImmediateDominator(LoopExitBlock, SCEVCheckBlock); 3313 } 3314 3315 LoopBypassBlocks.push_back(SCEVCheckBlock); 3316 AddedSafetyChecks = true; 3317 return SCEVCheckBlock; 3318 } 3319 3320 BasicBlock *InnerLoopVectorizer::emitMemRuntimeChecks(Loop *L, 3321 BasicBlock *Bypass) { 3322 // VPlan-native path does not do any analysis for runtime checks currently. 3323 if (EnableVPlanNativePath) 3324 return nullptr; 3325 3326 BasicBlock *const MemCheckBlock = 3327 RTChecks.emitMemRuntimeChecks(L, Bypass, LoopVectorPreHeader); 3328 3329 // Check if we generated code that checks in runtime if arrays overlap. We put 3330 // the checks into a separate block to make the more common case of few 3331 // elements faster. 3332 if (!MemCheckBlock) 3333 return nullptr; 3334 3335 if (MemCheckBlock->getParent()->hasOptSize() || OptForSizeBasedOnProfile) { 3336 assert(Cost->Hints->getForce() == LoopVectorizeHints::FK_Enabled && 3337 "Cannot emit memory checks when optimizing for size, unless forced " 3338 "to vectorize."); 3339 ORE->emit([&]() { 3340 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationCodeSize", 3341 L->getStartLoc(), L->getHeader()) 3342 << "Code-size may be reduced by not forcing " 3343 "vectorization, or by source-code modifications " 3344 "eliminating the need for runtime checks " 3345 "(e.g., adding 'restrict')."; 3346 }); 3347 } 3348 3349 LoopBypassBlocks.push_back(MemCheckBlock); 3350 3351 AddedSafetyChecks = true; 3352 3353 // We currently don't use LoopVersioning for the actual loop cloning but we 3354 // still use it to add the noalias metadata. 3355 LVer = std::make_unique<LoopVersioning>( 3356 *Legal->getLAI(), 3357 Legal->getLAI()->getRuntimePointerChecking()->getChecks(), OrigLoop, LI, 3358 DT, PSE.getSE()); 3359 LVer->prepareNoAliasMetadata(); 3360 return MemCheckBlock; 3361 } 3362 3363 Value *InnerLoopVectorizer::emitTransformedIndex( 3364 IRBuilder<> &B, Value *Index, ScalarEvolution *SE, const DataLayout &DL, 3365 const InductionDescriptor &ID) const { 3366 3367 SCEVExpander Exp(*SE, DL, "induction"); 3368 auto Step = ID.getStep(); 3369 auto StartValue = ID.getStartValue(); 3370 assert(Index->getType()->getScalarType() == Step->getType() && 3371 "Index scalar type does not match StepValue type"); 3372 3373 // Note: the IR at this point is broken. We cannot use SE to create any new 3374 // SCEV and then expand it, hoping that SCEV's simplification will give us 3375 // a more optimal code. Unfortunately, attempt of doing so on invalid IR may 3376 // lead to various SCEV crashes. So all we can do is to use builder and rely 3377 // on InstCombine for future simplifications. Here we handle some trivial 3378 // cases only. 3379 auto CreateAdd = [&B](Value *X, Value *Y) { 3380 assert(X->getType() == Y->getType() && "Types don't match!"); 3381 if (auto *CX = dyn_cast<ConstantInt>(X)) 3382 if (CX->isZero()) 3383 return Y; 3384 if (auto *CY = dyn_cast<ConstantInt>(Y)) 3385 if (CY->isZero()) 3386 return X; 3387 return B.CreateAdd(X, Y); 3388 }; 3389 3390 // We allow X to be a vector type, in which case Y will potentially be 3391 // splatted into a vector with the same element count. 3392 auto CreateMul = [&B](Value *X, Value *Y) { 3393 assert(X->getType()->getScalarType() == Y->getType() && 3394 "Types don't match!"); 3395 if (auto *CX = dyn_cast<ConstantInt>(X)) 3396 if (CX->isOne()) 3397 return Y; 3398 if (auto *CY = dyn_cast<ConstantInt>(Y)) 3399 if (CY->isOne()) 3400 return X; 3401 VectorType *XVTy = dyn_cast<VectorType>(X->getType()); 3402 if (XVTy && !isa<VectorType>(Y->getType())) 3403 Y = B.CreateVectorSplat(XVTy->getElementCount(), Y); 3404 return B.CreateMul(X, Y); 3405 }; 3406 3407 // Get a suitable insert point for SCEV expansion. For blocks in the vector 3408 // loop, choose the end of the vector loop header (=LoopVectorBody), because 3409 // the DomTree is not kept up-to-date for additional blocks generated in the 3410 // vector loop. By using the header as insertion point, we guarantee that the 3411 // expanded instructions dominate all their uses. 3412 auto GetInsertPoint = [this, &B]() { 3413 BasicBlock *InsertBB = B.GetInsertPoint()->getParent(); 3414 if (InsertBB != LoopVectorBody && 3415 LI->getLoopFor(LoopVectorBody) == LI->getLoopFor(InsertBB)) 3416 return LoopVectorBody->getTerminator(); 3417 return &*B.GetInsertPoint(); 3418 }; 3419 3420 switch (ID.getKind()) { 3421 case InductionDescriptor::IK_IntInduction: { 3422 assert(!isa<VectorType>(Index->getType()) && 3423 "Vector indices not supported for integer inductions yet"); 3424 assert(Index->getType() == StartValue->getType() && 3425 "Index type does not match StartValue type"); 3426 if (ID.getConstIntStepValue() && ID.getConstIntStepValue()->isMinusOne()) 3427 return B.CreateSub(StartValue, Index); 3428 auto *Offset = CreateMul( 3429 Index, Exp.expandCodeFor(Step, Index->getType(), GetInsertPoint())); 3430 return CreateAdd(StartValue, Offset); 3431 } 3432 case InductionDescriptor::IK_PtrInduction: { 3433 assert(isa<SCEVConstant>(Step) && 3434 "Expected constant step for pointer induction"); 3435 return B.CreateGEP( 3436 StartValue->getType()->getPointerElementType(), StartValue, 3437 CreateMul(Index, 3438 Exp.expandCodeFor(Step, Index->getType()->getScalarType(), 3439 GetInsertPoint()))); 3440 } 3441 case InductionDescriptor::IK_FpInduction: { 3442 assert(!isa<VectorType>(Index->getType()) && 3443 "Vector indices not supported for FP inductions yet"); 3444 assert(Step->getType()->isFloatingPointTy() && "Expected FP Step value"); 3445 auto InductionBinOp = ID.getInductionBinOp(); 3446 assert(InductionBinOp && 3447 (InductionBinOp->getOpcode() == Instruction::FAdd || 3448 InductionBinOp->getOpcode() == Instruction::FSub) && 3449 "Original bin op should be defined for FP induction"); 3450 3451 Value *StepValue = cast<SCEVUnknown>(Step)->getValue(); 3452 Value *MulExp = B.CreateFMul(StepValue, Index); 3453 return B.CreateBinOp(InductionBinOp->getOpcode(), StartValue, MulExp, 3454 "induction"); 3455 } 3456 case InductionDescriptor::IK_NoInduction: 3457 return nullptr; 3458 } 3459 llvm_unreachable("invalid enum"); 3460 } 3461 3462 Loop *InnerLoopVectorizer::createVectorLoopSkeleton(StringRef Prefix) { 3463 LoopScalarBody = OrigLoop->getHeader(); 3464 LoopVectorPreHeader = OrigLoop->getLoopPreheader(); 3465 assert(LoopVectorPreHeader && "Invalid loop structure"); 3466 LoopExitBlock = OrigLoop->getUniqueExitBlock(); // may be nullptr 3467 assert((LoopExitBlock || Cost->requiresScalarEpilogue(VF)) && 3468 "multiple exit loop without required epilogue?"); 3469 3470 LoopMiddleBlock = 3471 SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT, 3472 LI, nullptr, Twine(Prefix) + "middle.block"); 3473 LoopScalarPreHeader = 3474 SplitBlock(LoopMiddleBlock, LoopMiddleBlock->getTerminator(), DT, LI, 3475 nullptr, Twine(Prefix) + "scalar.ph"); 3476 3477 auto *ScalarLatchTerm = OrigLoop->getLoopLatch()->getTerminator(); 3478 3479 // Set up the middle block terminator. Two cases: 3480 // 1) If we know that we must execute the scalar epilogue, emit an 3481 // unconditional branch. 3482 // 2) Otherwise, we must have a single unique exit block (due to how we 3483 // implement the multiple exit case). In this case, set up a conditonal 3484 // branch from the middle block to the loop scalar preheader, and the 3485 // exit block. completeLoopSkeleton will update the condition to use an 3486 // iteration check, if required to decide whether to execute the remainder. 3487 BranchInst *BrInst = Cost->requiresScalarEpilogue(VF) ? 3488 BranchInst::Create(LoopScalarPreHeader) : 3489 BranchInst::Create(LoopExitBlock, LoopScalarPreHeader, 3490 Builder.getTrue()); 3491 BrInst->setDebugLoc(ScalarLatchTerm->getDebugLoc()); 3492 ReplaceInstWithInst(LoopMiddleBlock->getTerminator(), BrInst); 3493 3494 // We intentionally don't let SplitBlock to update LoopInfo since 3495 // LoopVectorBody should belong to another loop than LoopVectorPreHeader. 3496 // LoopVectorBody is explicitly added to the correct place few lines later. 3497 LoopVectorBody = 3498 SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT, 3499 nullptr, nullptr, Twine(Prefix) + "vector.body"); 3500 3501 // Update dominator for loop exit. 3502 if (!Cost->requiresScalarEpilogue(VF)) 3503 // If there is an epilogue which must run, there's no edge from the 3504 // middle block to exit blocks and thus no need to update the immediate 3505 // dominator of the exit blocks. 3506 DT->changeImmediateDominator(LoopExitBlock, LoopMiddleBlock); 3507 3508 // Create and register the new vector loop. 3509 Loop *Lp = LI->AllocateLoop(); 3510 Loop *ParentLoop = OrigLoop->getParentLoop(); 3511 3512 // Insert the new loop into the loop nest and register the new basic blocks 3513 // before calling any utilities such as SCEV that require valid LoopInfo. 3514 if (ParentLoop) { 3515 ParentLoop->addChildLoop(Lp); 3516 } else { 3517 LI->addTopLevelLoop(Lp); 3518 } 3519 Lp->addBasicBlockToLoop(LoopVectorBody, *LI); 3520 return Lp; 3521 } 3522 3523 void InnerLoopVectorizer::createInductionResumeValues( 3524 Loop *L, Value *VectorTripCount, 3525 std::pair<BasicBlock *, Value *> AdditionalBypass) { 3526 assert(VectorTripCount && L && "Expected valid arguments"); 3527 assert(((AdditionalBypass.first && AdditionalBypass.second) || 3528 (!AdditionalBypass.first && !AdditionalBypass.second)) && 3529 "Inconsistent information about additional bypass."); 3530 // We are going to resume the execution of the scalar loop. 3531 // Go over all of the induction variables that we found and fix the 3532 // PHIs that are left in the scalar version of the loop. 3533 // The starting values of PHI nodes depend on the counter of the last 3534 // iteration in the vectorized loop. 3535 // If we come from a bypass edge then we need to start from the original 3536 // start value. 3537 for (auto &InductionEntry : Legal->getInductionVars()) { 3538 PHINode *OrigPhi = InductionEntry.first; 3539 InductionDescriptor II = InductionEntry.second; 3540 3541 // Create phi nodes to merge from the backedge-taken check block. 3542 PHINode *BCResumeVal = 3543 PHINode::Create(OrigPhi->getType(), 3, "bc.resume.val", 3544 LoopScalarPreHeader->getTerminator()); 3545 // Copy original phi DL over to the new one. 3546 BCResumeVal->setDebugLoc(OrigPhi->getDebugLoc()); 3547 Value *&EndValue = IVEndValues[OrigPhi]; 3548 Value *EndValueFromAdditionalBypass = AdditionalBypass.second; 3549 if (OrigPhi == OldInduction) { 3550 // We know what the end value is. 3551 EndValue = VectorTripCount; 3552 } else { 3553 IRBuilder<> B(L->getLoopPreheader()->getTerminator()); 3554 3555 // Fast-math-flags propagate from the original induction instruction. 3556 if (II.getInductionBinOp() && isa<FPMathOperator>(II.getInductionBinOp())) 3557 B.setFastMathFlags(II.getInductionBinOp()->getFastMathFlags()); 3558 3559 Type *StepType = II.getStep()->getType(); 3560 Instruction::CastOps CastOp = 3561 CastInst::getCastOpcode(VectorTripCount, true, StepType, true); 3562 Value *CRD = B.CreateCast(CastOp, VectorTripCount, StepType, "cast.crd"); 3563 const DataLayout &DL = LoopScalarBody->getModule()->getDataLayout(); 3564 EndValue = emitTransformedIndex(B, CRD, PSE.getSE(), DL, II); 3565 EndValue->setName("ind.end"); 3566 3567 // Compute the end value for the additional bypass (if applicable). 3568 if (AdditionalBypass.first) { 3569 B.SetInsertPoint(&(*AdditionalBypass.first->getFirstInsertionPt())); 3570 CastOp = CastInst::getCastOpcode(AdditionalBypass.second, true, 3571 StepType, true); 3572 CRD = 3573 B.CreateCast(CastOp, AdditionalBypass.second, StepType, "cast.crd"); 3574 EndValueFromAdditionalBypass = 3575 emitTransformedIndex(B, CRD, PSE.getSE(), DL, II); 3576 EndValueFromAdditionalBypass->setName("ind.end"); 3577 } 3578 } 3579 // The new PHI merges the original incoming value, in case of a bypass, 3580 // or the value at the end of the vectorized loop. 3581 BCResumeVal->addIncoming(EndValue, LoopMiddleBlock); 3582 3583 // Fix the scalar body counter (PHI node). 3584 // The old induction's phi node in the scalar body needs the truncated 3585 // value. 3586 for (BasicBlock *BB : LoopBypassBlocks) 3587 BCResumeVal->addIncoming(II.getStartValue(), BB); 3588 3589 if (AdditionalBypass.first) 3590 BCResumeVal->setIncomingValueForBlock(AdditionalBypass.first, 3591 EndValueFromAdditionalBypass); 3592 3593 OrigPhi->setIncomingValueForBlock(LoopScalarPreHeader, BCResumeVal); 3594 } 3595 } 3596 3597 BasicBlock *InnerLoopVectorizer::completeLoopSkeleton(Loop *L, 3598 MDNode *OrigLoopID) { 3599 assert(L && "Expected valid loop."); 3600 3601 // The trip counts should be cached by now. 3602 Value *Count = getOrCreateTripCount(L); 3603 Value *VectorTripCount = getOrCreateVectorTripCount(L); 3604 3605 auto *ScalarLatchTerm = OrigLoop->getLoopLatch()->getTerminator(); 3606 3607 // Add a check in the middle block to see if we have completed 3608 // all of the iterations in the first vector loop. Three cases: 3609 // 1) If we require a scalar epilogue, there is no conditional branch as 3610 // we unconditionally branch to the scalar preheader. Do nothing. 3611 // 2) If (N - N%VF) == N, then we *don't* need to run the remainder. 3612 // Thus if tail is to be folded, we know we don't need to run the 3613 // remainder and we can use the previous value for the condition (true). 3614 // 3) Otherwise, construct a runtime check. 3615 if (!Cost->requiresScalarEpilogue(VF) && !Cost->foldTailByMasking()) { 3616 Instruction *CmpN = CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_EQ, 3617 Count, VectorTripCount, "cmp.n", 3618 LoopMiddleBlock->getTerminator()); 3619 3620 // Here we use the same DebugLoc as the scalar loop latch terminator instead 3621 // of the corresponding compare because they may have ended up with 3622 // different line numbers and we want to avoid awkward line stepping while 3623 // debugging. Eg. if the compare has got a line number inside the loop. 3624 CmpN->setDebugLoc(ScalarLatchTerm->getDebugLoc()); 3625 cast<BranchInst>(LoopMiddleBlock->getTerminator())->setCondition(CmpN); 3626 } 3627 3628 // Get ready to start creating new instructions into the vectorized body. 3629 assert(LoopVectorPreHeader == L->getLoopPreheader() && 3630 "Inconsistent vector loop preheader"); 3631 Builder.SetInsertPoint(&*LoopVectorBody->getFirstInsertionPt()); 3632 3633 Optional<MDNode *> VectorizedLoopID = 3634 makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll, 3635 LLVMLoopVectorizeFollowupVectorized}); 3636 if (VectorizedLoopID.hasValue()) { 3637 L->setLoopID(VectorizedLoopID.getValue()); 3638 3639 // Do not setAlreadyVectorized if loop attributes have been defined 3640 // explicitly. 3641 return LoopVectorPreHeader; 3642 } 3643 3644 // Keep all loop hints from the original loop on the vector loop (we'll 3645 // replace the vectorizer-specific hints below). 3646 if (MDNode *LID = OrigLoop->getLoopID()) 3647 L->setLoopID(LID); 3648 3649 LoopVectorizeHints Hints(L, true, *ORE); 3650 Hints.setAlreadyVectorized(); 3651 3652 #ifdef EXPENSIVE_CHECKS 3653 assert(DT->verify(DominatorTree::VerificationLevel::Fast)); 3654 LI->verify(*DT); 3655 #endif 3656 3657 return LoopVectorPreHeader; 3658 } 3659 3660 BasicBlock *InnerLoopVectorizer::createVectorizedLoopSkeleton() { 3661 /* 3662 In this function we generate a new loop. The new loop will contain 3663 the vectorized instructions while the old loop will continue to run the 3664 scalar remainder. 3665 3666 [ ] <-- loop iteration number check. 3667 / | 3668 / v 3669 | [ ] <-- vector loop bypass (may consist of multiple blocks). 3670 | / | 3671 | / v 3672 || [ ] <-- vector pre header. 3673 |/ | 3674 | v 3675 | [ ] \ 3676 | [ ]_| <-- vector loop. 3677 | | 3678 | v 3679 \ -[ ] <--- middle-block. 3680 \/ | 3681 /\ v 3682 | ->[ ] <--- new preheader. 3683 | | 3684 (opt) v <-- edge from middle to exit iff epilogue is not required. 3685 | [ ] \ 3686 | [ ]_| <-- old scalar loop to handle remainder (scalar epilogue). 3687 \ | 3688 \ v 3689 >[ ] <-- exit block(s). 3690 ... 3691 */ 3692 3693 // Get the metadata of the original loop before it gets modified. 3694 MDNode *OrigLoopID = OrigLoop->getLoopID(); 3695 3696 // Workaround! Compute the trip count of the original loop and cache it 3697 // before we start modifying the CFG. This code has a systemic problem 3698 // wherein it tries to run analysis over partially constructed IR; this is 3699 // wrong, and not simply for SCEV. The trip count of the original loop 3700 // simply happens to be prone to hitting this in practice. In theory, we 3701 // can hit the same issue for any SCEV, or ValueTracking query done during 3702 // mutation. See PR49900. 3703 getOrCreateTripCount(OrigLoop); 3704 3705 // Create an empty vector loop, and prepare basic blocks for the runtime 3706 // checks. 3707 Loop *Lp = createVectorLoopSkeleton(""); 3708 3709 // Now, compare the new count to zero. If it is zero skip the vector loop and 3710 // jump to the scalar loop. This check also covers the case where the 3711 // backedge-taken count is uint##_max: adding one to it will overflow leading 3712 // to an incorrect trip count of zero. In this (rare) case we will also jump 3713 // to the scalar loop. 3714 emitMinimumIterationCountCheck(Lp, LoopScalarPreHeader); 3715 3716 // Generate the code to check any assumptions that we've made for SCEV 3717 // expressions. 3718 emitSCEVChecks(Lp, LoopScalarPreHeader); 3719 3720 // Generate the code that checks in runtime if arrays overlap. We put the 3721 // checks into a separate block to make the more common case of few elements 3722 // faster. 3723 emitMemRuntimeChecks(Lp, LoopScalarPreHeader); 3724 3725 // Some loops have a single integer induction variable, while other loops 3726 // don't. One example is c++ iterators that often have multiple pointer 3727 // induction variables. In the code below we also support a case where we 3728 // don't have a single induction variable. 3729 // 3730 // We try to obtain an induction variable from the original loop as hard 3731 // as possible. However if we don't find one that: 3732 // - is an integer 3733 // - counts from zero, stepping by one 3734 // - is the size of the widest induction variable type 3735 // then we create a new one. 3736 OldInduction = Legal->getPrimaryInduction(); 3737 Type *IdxTy = Legal->getWidestInductionType(); 3738 Value *StartIdx = ConstantInt::get(IdxTy, 0); 3739 // The loop step is equal to the vectorization factor (num of SIMD elements) 3740 // times the unroll factor (num of SIMD instructions). 3741 Builder.SetInsertPoint(&*Lp->getHeader()->getFirstInsertionPt()); 3742 Value *Step = createStepForVF(Builder, ConstantInt::get(IdxTy, UF), VF); 3743 Value *CountRoundDown = getOrCreateVectorTripCount(Lp); 3744 Induction = 3745 createInductionVariable(Lp, StartIdx, CountRoundDown, Step, 3746 getDebugLocFromInstOrOperands(OldInduction)); 3747 3748 // Emit phis for the new starting index of the scalar loop. 3749 createInductionResumeValues(Lp, CountRoundDown); 3750 3751 return completeLoopSkeleton(Lp, OrigLoopID); 3752 } 3753 3754 // Fix up external users of the induction variable. At this point, we are 3755 // in LCSSA form, with all external PHIs that use the IV having one input value, 3756 // coming from the remainder loop. We need those PHIs to also have a correct 3757 // value for the IV when arriving directly from the middle block. 3758 void InnerLoopVectorizer::fixupIVUsers(PHINode *OrigPhi, 3759 const InductionDescriptor &II, 3760 Value *CountRoundDown, Value *EndValue, 3761 BasicBlock *MiddleBlock) { 3762 // There are two kinds of external IV usages - those that use the value 3763 // computed in the last iteration (the PHI) and those that use the penultimate 3764 // value (the value that feeds into the phi from the loop latch). 3765 // We allow both, but they, obviously, have different values. 3766 3767 assert(OrigLoop->getUniqueExitBlock() && "Expected a single exit block"); 3768 3769 DenseMap<Value *, Value *> MissingVals; 3770 3771 // An external user of the last iteration's value should see the value that 3772 // the remainder loop uses to initialize its own IV. 3773 Value *PostInc = OrigPhi->getIncomingValueForBlock(OrigLoop->getLoopLatch()); 3774 for (User *U : PostInc->users()) { 3775 Instruction *UI = cast<Instruction>(U); 3776 if (!OrigLoop->contains(UI)) { 3777 assert(isa<PHINode>(UI) && "Expected LCSSA form"); 3778 MissingVals[UI] = EndValue; 3779 } 3780 } 3781 3782 // An external user of the penultimate value need to see EndValue - Step. 3783 // The simplest way to get this is to recompute it from the constituent SCEVs, 3784 // that is Start + (Step * (CRD - 1)). 3785 for (User *U : OrigPhi->users()) { 3786 auto *UI = cast<Instruction>(U); 3787 if (!OrigLoop->contains(UI)) { 3788 const DataLayout &DL = 3789 OrigLoop->getHeader()->getModule()->getDataLayout(); 3790 assert(isa<PHINode>(UI) && "Expected LCSSA form"); 3791 3792 IRBuilder<> B(MiddleBlock->getTerminator()); 3793 3794 // Fast-math-flags propagate from the original induction instruction. 3795 if (II.getInductionBinOp() && isa<FPMathOperator>(II.getInductionBinOp())) 3796 B.setFastMathFlags(II.getInductionBinOp()->getFastMathFlags()); 3797 3798 Value *CountMinusOne = B.CreateSub( 3799 CountRoundDown, ConstantInt::get(CountRoundDown->getType(), 1)); 3800 Value *CMO = 3801 !II.getStep()->getType()->isIntegerTy() 3802 ? B.CreateCast(Instruction::SIToFP, CountMinusOne, 3803 II.getStep()->getType()) 3804 : B.CreateSExtOrTrunc(CountMinusOne, II.getStep()->getType()); 3805 CMO->setName("cast.cmo"); 3806 Value *Escape = emitTransformedIndex(B, CMO, PSE.getSE(), DL, II); 3807 Escape->setName("ind.escape"); 3808 MissingVals[UI] = Escape; 3809 } 3810 } 3811 3812 for (auto &I : MissingVals) { 3813 PHINode *PHI = cast<PHINode>(I.first); 3814 // One corner case we have to handle is two IVs "chasing" each-other, 3815 // that is %IV2 = phi [...], [ %IV1, %latch ] 3816 // In this case, if IV1 has an external use, we need to avoid adding both 3817 // "last value of IV1" and "penultimate value of IV2". So, verify that we 3818 // don't already have an incoming value for the middle block. 3819 if (PHI->getBasicBlockIndex(MiddleBlock) == -1) 3820 PHI->addIncoming(I.second, MiddleBlock); 3821 } 3822 } 3823 3824 namespace { 3825 3826 struct CSEDenseMapInfo { 3827 static bool canHandle(const Instruction *I) { 3828 return isa<InsertElementInst>(I) || isa<ExtractElementInst>(I) || 3829 isa<ShuffleVectorInst>(I) || isa<GetElementPtrInst>(I); 3830 } 3831 3832 static inline Instruction *getEmptyKey() { 3833 return DenseMapInfo<Instruction *>::getEmptyKey(); 3834 } 3835 3836 static inline Instruction *getTombstoneKey() { 3837 return DenseMapInfo<Instruction *>::getTombstoneKey(); 3838 } 3839 3840 static unsigned getHashValue(const Instruction *I) { 3841 assert(canHandle(I) && "Unknown instruction!"); 3842 return hash_combine(I->getOpcode(), hash_combine_range(I->value_op_begin(), 3843 I->value_op_end())); 3844 } 3845 3846 static bool isEqual(const Instruction *LHS, const Instruction *RHS) { 3847 if (LHS == getEmptyKey() || RHS == getEmptyKey() || 3848 LHS == getTombstoneKey() || RHS == getTombstoneKey()) 3849 return LHS == RHS; 3850 return LHS->isIdenticalTo(RHS); 3851 } 3852 }; 3853 3854 } // end anonymous namespace 3855 3856 ///Perform cse of induction variable instructions. 3857 static void cse(BasicBlock *BB) { 3858 // Perform simple cse. 3859 SmallDenseMap<Instruction *, Instruction *, 4, CSEDenseMapInfo> CSEMap; 3860 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;) { 3861 Instruction *In = &*I++; 3862 3863 if (!CSEDenseMapInfo::canHandle(In)) 3864 continue; 3865 3866 // Check if we can replace this instruction with any of the 3867 // visited instructions. 3868 if (Instruction *V = CSEMap.lookup(In)) { 3869 In->replaceAllUsesWith(V); 3870 In->eraseFromParent(); 3871 continue; 3872 } 3873 3874 CSEMap[In] = In; 3875 } 3876 } 3877 3878 InstructionCost 3879 LoopVectorizationCostModel::getVectorCallCost(CallInst *CI, ElementCount VF, 3880 bool &NeedToScalarize) const { 3881 Function *F = CI->getCalledFunction(); 3882 Type *ScalarRetTy = CI->getType(); 3883 SmallVector<Type *, 4> Tys, ScalarTys; 3884 for (auto &ArgOp : CI->arg_operands()) 3885 ScalarTys.push_back(ArgOp->getType()); 3886 3887 // Estimate cost of scalarized vector call. The source operands are assumed 3888 // to be vectors, so we need to extract individual elements from there, 3889 // execute VF scalar calls, and then gather the result into the vector return 3890 // value. 3891 InstructionCost ScalarCallCost = 3892 TTI.getCallInstrCost(F, ScalarRetTy, ScalarTys, TTI::TCK_RecipThroughput); 3893 if (VF.isScalar()) 3894 return ScalarCallCost; 3895 3896 // Compute corresponding vector type for return value and arguments. 3897 Type *RetTy = ToVectorTy(ScalarRetTy, VF); 3898 for (Type *ScalarTy : ScalarTys) 3899 Tys.push_back(ToVectorTy(ScalarTy, VF)); 3900 3901 // Compute costs of unpacking argument values for the scalar calls and 3902 // packing the return values to a vector. 3903 InstructionCost ScalarizationCost = getScalarizationOverhead(CI, VF); 3904 3905 InstructionCost Cost = 3906 ScalarCallCost * VF.getKnownMinValue() + ScalarizationCost; 3907 3908 // If we can't emit a vector call for this function, then the currently found 3909 // cost is the cost we need to return. 3910 NeedToScalarize = true; 3911 VFShape Shape = VFShape::get(*CI, VF, false /*HasGlobalPred*/); 3912 Function *VecFunc = VFDatabase(*CI).getVectorizedFunction(Shape); 3913 3914 if (!TLI || CI->isNoBuiltin() || !VecFunc) 3915 return Cost; 3916 3917 // If the corresponding vector cost is cheaper, return its cost. 3918 InstructionCost VectorCallCost = 3919 TTI.getCallInstrCost(nullptr, RetTy, Tys, TTI::TCK_RecipThroughput); 3920 if (VectorCallCost < Cost) { 3921 NeedToScalarize = false; 3922 Cost = VectorCallCost; 3923 } 3924 return Cost; 3925 } 3926 3927 static Type *MaybeVectorizeType(Type *Elt, ElementCount VF) { 3928 if (VF.isScalar() || (!Elt->isIntOrPtrTy() && !Elt->isFloatingPointTy())) 3929 return Elt; 3930 return VectorType::get(Elt, VF); 3931 } 3932 3933 InstructionCost 3934 LoopVectorizationCostModel::getVectorIntrinsicCost(CallInst *CI, 3935 ElementCount VF) const { 3936 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 3937 assert(ID && "Expected intrinsic call!"); 3938 Type *RetTy = MaybeVectorizeType(CI->getType(), VF); 3939 FastMathFlags FMF; 3940 if (auto *FPMO = dyn_cast<FPMathOperator>(CI)) 3941 FMF = FPMO->getFastMathFlags(); 3942 3943 SmallVector<const Value *> Arguments(CI->arg_begin(), CI->arg_end()); 3944 FunctionType *FTy = CI->getCalledFunction()->getFunctionType(); 3945 SmallVector<Type *> ParamTys; 3946 std::transform(FTy->param_begin(), FTy->param_end(), 3947 std::back_inserter(ParamTys), 3948 [&](Type *Ty) { return MaybeVectorizeType(Ty, VF); }); 3949 3950 IntrinsicCostAttributes CostAttrs(ID, RetTy, Arguments, ParamTys, FMF, 3951 dyn_cast<IntrinsicInst>(CI)); 3952 return TTI.getIntrinsicInstrCost(CostAttrs, 3953 TargetTransformInfo::TCK_RecipThroughput); 3954 } 3955 3956 static Type *smallestIntegerVectorType(Type *T1, Type *T2) { 3957 auto *I1 = cast<IntegerType>(cast<VectorType>(T1)->getElementType()); 3958 auto *I2 = cast<IntegerType>(cast<VectorType>(T2)->getElementType()); 3959 return I1->getBitWidth() < I2->getBitWidth() ? T1 : T2; 3960 } 3961 3962 static Type *largestIntegerVectorType(Type *T1, Type *T2) { 3963 auto *I1 = cast<IntegerType>(cast<VectorType>(T1)->getElementType()); 3964 auto *I2 = cast<IntegerType>(cast<VectorType>(T2)->getElementType()); 3965 return I1->getBitWidth() > I2->getBitWidth() ? T1 : T2; 3966 } 3967 3968 void InnerLoopVectorizer::truncateToMinimalBitwidths(VPTransformState &State) { 3969 // For every instruction `I` in MinBWs, truncate the operands, create a 3970 // truncated version of `I` and reextend its result. InstCombine runs 3971 // later and will remove any ext/trunc pairs. 3972 SmallPtrSet<Value *, 4> Erased; 3973 for (const auto &KV : Cost->getMinimalBitwidths()) { 3974 // If the value wasn't vectorized, we must maintain the original scalar 3975 // type. The absence of the value from State indicates that it 3976 // wasn't vectorized. 3977 VPValue *Def = State.Plan->getVPValue(KV.first); 3978 if (!State.hasAnyVectorValue(Def)) 3979 continue; 3980 for (unsigned Part = 0; Part < UF; ++Part) { 3981 Value *I = State.get(Def, Part); 3982 if (Erased.count(I) || I->use_empty() || !isa<Instruction>(I)) 3983 continue; 3984 Type *OriginalTy = I->getType(); 3985 Type *ScalarTruncatedTy = 3986 IntegerType::get(OriginalTy->getContext(), KV.second); 3987 auto *TruncatedTy = VectorType::get( 3988 ScalarTruncatedTy, cast<VectorType>(OriginalTy)->getElementCount()); 3989 if (TruncatedTy == OriginalTy) 3990 continue; 3991 3992 IRBuilder<> B(cast<Instruction>(I)); 3993 auto ShrinkOperand = [&](Value *V) -> Value * { 3994 if (auto *ZI = dyn_cast<ZExtInst>(V)) 3995 if (ZI->getSrcTy() == TruncatedTy) 3996 return ZI->getOperand(0); 3997 return B.CreateZExtOrTrunc(V, TruncatedTy); 3998 }; 3999 4000 // The actual instruction modification depends on the instruction type, 4001 // unfortunately. 4002 Value *NewI = nullptr; 4003 if (auto *BO = dyn_cast<BinaryOperator>(I)) { 4004 NewI = B.CreateBinOp(BO->getOpcode(), ShrinkOperand(BO->getOperand(0)), 4005 ShrinkOperand(BO->getOperand(1))); 4006 4007 // Any wrapping introduced by shrinking this operation shouldn't be 4008 // considered undefined behavior. So, we can't unconditionally copy 4009 // arithmetic wrapping flags to NewI. 4010 cast<BinaryOperator>(NewI)->copyIRFlags(I, /*IncludeWrapFlags=*/false); 4011 } else if (auto *CI = dyn_cast<ICmpInst>(I)) { 4012 NewI = 4013 B.CreateICmp(CI->getPredicate(), ShrinkOperand(CI->getOperand(0)), 4014 ShrinkOperand(CI->getOperand(1))); 4015 } else if (auto *SI = dyn_cast<SelectInst>(I)) { 4016 NewI = B.CreateSelect(SI->getCondition(), 4017 ShrinkOperand(SI->getTrueValue()), 4018 ShrinkOperand(SI->getFalseValue())); 4019 } else if (auto *CI = dyn_cast<CastInst>(I)) { 4020 switch (CI->getOpcode()) { 4021 default: 4022 llvm_unreachable("Unhandled cast!"); 4023 case Instruction::Trunc: 4024 NewI = ShrinkOperand(CI->getOperand(0)); 4025 break; 4026 case Instruction::SExt: 4027 NewI = B.CreateSExtOrTrunc( 4028 CI->getOperand(0), 4029 smallestIntegerVectorType(OriginalTy, TruncatedTy)); 4030 break; 4031 case Instruction::ZExt: 4032 NewI = B.CreateZExtOrTrunc( 4033 CI->getOperand(0), 4034 smallestIntegerVectorType(OriginalTy, TruncatedTy)); 4035 break; 4036 } 4037 } else if (auto *SI = dyn_cast<ShuffleVectorInst>(I)) { 4038 auto Elements0 = 4039 cast<VectorType>(SI->getOperand(0)->getType())->getElementCount(); 4040 auto *O0 = B.CreateZExtOrTrunc( 4041 SI->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements0)); 4042 auto Elements1 = 4043 cast<VectorType>(SI->getOperand(1)->getType())->getElementCount(); 4044 auto *O1 = B.CreateZExtOrTrunc( 4045 SI->getOperand(1), VectorType::get(ScalarTruncatedTy, Elements1)); 4046 4047 NewI = B.CreateShuffleVector(O0, O1, SI->getShuffleMask()); 4048 } else if (isa<LoadInst>(I) || isa<PHINode>(I)) { 4049 // Don't do anything with the operands, just extend the result. 4050 continue; 4051 } else if (auto *IE = dyn_cast<InsertElementInst>(I)) { 4052 auto Elements = 4053 cast<VectorType>(IE->getOperand(0)->getType())->getElementCount(); 4054 auto *O0 = B.CreateZExtOrTrunc( 4055 IE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements)); 4056 auto *O1 = B.CreateZExtOrTrunc(IE->getOperand(1), ScalarTruncatedTy); 4057 NewI = B.CreateInsertElement(O0, O1, IE->getOperand(2)); 4058 } else if (auto *EE = dyn_cast<ExtractElementInst>(I)) { 4059 auto Elements = 4060 cast<VectorType>(EE->getOperand(0)->getType())->getElementCount(); 4061 auto *O0 = B.CreateZExtOrTrunc( 4062 EE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements)); 4063 NewI = B.CreateExtractElement(O0, EE->getOperand(2)); 4064 } else { 4065 // If we don't know what to do, be conservative and don't do anything. 4066 continue; 4067 } 4068 4069 // Lastly, extend the result. 4070 NewI->takeName(cast<Instruction>(I)); 4071 Value *Res = B.CreateZExtOrTrunc(NewI, OriginalTy); 4072 I->replaceAllUsesWith(Res); 4073 cast<Instruction>(I)->eraseFromParent(); 4074 Erased.insert(I); 4075 State.reset(Def, Res, Part); 4076 } 4077 } 4078 4079 // We'll have created a bunch of ZExts that are now parentless. Clean up. 4080 for (const auto &KV : Cost->getMinimalBitwidths()) { 4081 // If the value wasn't vectorized, we must maintain the original scalar 4082 // type. The absence of the value from State indicates that it 4083 // wasn't vectorized. 4084 VPValue *Def = State.Plan->getVPValue(KV.first); 4085 if (!State.hasAnyVectorValue(Def)) 4086 continue; 4087 for (unsigned Part = 0; Part < UF; ++Part) { 4088 Value *I = State.get(Def, Part); 4089 ZExtInst *Inst = dyn_cast<ZExtInst>(I); 4090 if (Inst && Inst->use_empty()) { 4091 Value *NewI = Inst->getOperand(0); 4092 Inst->eraseFromParent(); 4093 State.reset(Def, NewI, Part); 4094 } 4095 } 4096 } 4097 } 4098 4099 void InnerLoopVectorizer::fixVectorizedLoop(VPTransformState &State) { 4100 // Insert truncates and extends for any truncated instructions as hints to 4101 // InstCombine. 4102 if (VF.isVector()) 4103 truncateToMinimalBitwidths(State); 4104 4105 // Fix widened non-induction PHIs by setting up the PHI operands. 4106 if (OrigPHIsToFix.size()) { 4107 assert(EnableVPlanNativePath && 4108 "Unexpected non-induction PHIs for fixup in non VPlan-native path"); 4109 fixNonInductionPHIs(State); 4110 } 4111 4112 // At this point every instruction in the original loop is widened to a 4113 // vector form. Now we need to fix the recurrences in the loop. These PHI 4114 // nodes are currently empty because we did not want to introduce cycles. 4115 // This is the second stage of vectorizing recurrences. 4116 fixCrossIterationPHIs(State); 4117 4118 // Forget the original basic block. 4119 PSE.getSE()->forgetLoop(OrigLoop); 4120 4121 // If we inserted an edge from the middle block to the unique exit block, 4122 // update uses outside the loop (phis) to account for the newly inserted 4123 // edge. 4124 if (!Cost->requiresScalarEpilogue(VF)) { 4125 // Fix-up external users of the induction variables. 4126 for (auto &Entry : Legal->getInductionVars()) 4127 fixupIVUsers(Entry.first, Entry.second, 4128 getOrCreateVectorTripCount(LI->getLoopFor(LoopVectorBody)), 4129 IVEndValues[Entry.first], LoopMiddleBlock); 4130 4131 fixLCSSAPHIs(State); 4132 } 4133 4134 for (Instruction *PI : PredicatedInstructions) 4135 sinkScalarOperands(&*PI); 4136 4137 // Remove redundant induction instructions. 4138 cse(LoopVectorBody); 4139 4140 // Set/update profile weights for the vector and remainder loops as original 4141 // loop iterations are now distributed among them. Note that original loop 4142 // represented by LoopScalarBody becomes remainder loop after vectorization. 4143 // 4144 // For cases like foldTailByMasking() and requiresScalarEpiloque() we may 4145 // end up getting slightly roughened result but that should be OK since 4146 // profile is not inherently precise anyway. Note also possible bypass of 4147 // vector code caused by legality checks is ignored, assigning all the weight 4148 // to the vector loop, optimistically. 4149 // 4150 // For scalable vectorization we can't know at compile time how many iterations 4151 // of the loop are handled in one vector iteration, so instead assume a pessimistic 4152 // vscale of '1'. 4153 setProfileInfoAfterUnrolling( 4154 LI->getLoopFor(LoopScalarBody), LI->getLoopFor(LoopVectorBody), 4155 LI->getLoopFor(LoopScalarBody), VF.getKnownMinValue() * UF); 4156 } 4157 4158 void InnerLoopVectorizer::fixCrossIterationPHIs(VPTransformState &State) { 4159 // In order to support recurrences we need to be able to vectorize Phi nodes. 4160 // Phi nodes have cycles, so we need to vectorize them in two stages. This is 4161 // stage #2: We now need to fix the recurrences by adding incoming edges to 4162 // the currently empty PHI nodes. At this point every instruction in the 4163 // original loop is widened to a vector form so we can use them to construct 4164 // the incoming edges. 4165 VPBasicBlock *Header = State.Plan->getEntry()->getEntryBasicBlock(); 4166 for (VPRecipeBase &R : Header->phis()) { 4167 if (auto *ReductionPhi = dyn_cast<VPReductionPHIRecipe>(&R)) 4168 fixReduction(ReductionPhi, State); 4169 else if (auto *FOR = dyn_cast<VPFirstOrderRecurrencePHIRecipe>(&R)) 4170 fixFirstOrderRecurrence(FOR, State); 4171 } 4172 } 4173 4174 void InnerLoopVectorizer::fixFirstOrderRecurrence(VPWidenPHIRecipe *PhiR, 4175 VPTransformState &State) { 4176 // This is the second phase of vectorizing first-order recurrences. An 4177 // overview of the transformation is described below. Suppose we have the 4178 // following loop. 4179 // 4180 // for (int i = 0; i < n; ++i) 4181 // b[i] = a[i] - a[i - 1]; 4182 // 4183 // There is a first-order recurrence on "a". For this loop, the shorthand 4184 // scalar IR looks like: 4185 // 4186 // scalar.ph: 4187 // s_init = a[-1] 4188 // br scalar.body 4189 // 4190 // scalar.body: 4191 // i = phi [0, scalar.ph], [i+1, scalar.body] 4192 // s1 = phi [s_init, scalar.ph], [s2, scalar.body] 4193 // s2 = a[i] 4194 // b[i] = s2 - s1 4195 // br cond, scalar.body, ... 4196 // 4197 // In this example, s1 is a recurrence because it's value depends on the 4198 // previous iteration. In the first phase of vectorization, we created a 4199 // vector phi v1 for s1. We now complete the vectorization and produce the 4200 // shorthand vector IR shown below (for VF = 4, UF = 1). 4201 // 4202 // vector.ph: 4203 // v_init = vector(..., ..., ..., a[-1]) 4204 // br vector.body 4205 // 4206 // vector.body 4207 // i = phi [0, vector.ph], [i+4, vector.body] 4208 // v1 = phi [v_init, vector.ph], [v2, vector.body] 4209 // v2 = a[i, i+1, i+2, i+3]; 4210 // v3 = vector(v1(3), v2(0, 1, 2)) 4211 // b[i, i+1, i+2, i+3] = v2 - v3 4212 // br cond, vector.body, middle.block 4213 // 4214 // middle.block: 4215 // x = v2(3) 4216 // br scalar.ph 4217 // 4218 // scalar.ph: 4219 // s_init = phi [x, middle.block], [a[-1], otherwise] 4220 // br scalar.body 4221 // 4222 // After execution completes the vector loop, we extract the next value of 4223 // the recurrence (x) to use as the initial value in the scalar loop. 4224 4225 auto *IdxTy = Builder.getInt32Ty(); 4226 auto *VecPhi = cast<PHINode>(State.get(PhiR, 0)); 4227 4228 // Fix the latch value of the new recurrence in the vector loop. 4229 VPValue *PreviousDef = PhiR->getBackedgeValue(); 4230 Value *Incoming = State.get(PreviousDef, UF - 1); 4231 VecPhi->addIncoming(Incoming, LI->getLoopFor(LoopVectorBody)->getLoopLatch()); 4232 4233 // Extract the last vector element in the middle block. This will be the 4234 // initial value for the recurrence when jumping to the scalar loop. 4235 auto *ExtractForScalar = Incoming; 4236 if (VF.isVector()) { 4237 auto *One = ConstantInt::get(IdxTy, 1); 4238 Builder.SetInsertPoint(LoopMiddleBlock->getTerminator()); 4239 auto *RuntimeVF = getRuntimeVF(Builder, IdxTy, VF); 4240 auto *LastIdx = Builder.CreateSub(RuntimeVF, One); 4241 ExtractForScalar = Builder.CreateExtractElement(ExtractForScalar, LastIdx, 4242 "vector.recur.extract"); 4243 } 4244 // Extract the second last element in the middle block if the 4245 // Phi is used outside the loop. We need to extract the phi itself 4246 // and not the last element (the phi update in the current iteration). This 4247 // will be the value when jumping to the exit block from the LoopMiddleBlock, 4248 // when the scalar loop is not run at all. 4249 Value *ExtractForPhiUsedOutsideLoop = nullptr; 4250 if (VF.isVector()) { 4251 auto *RuntimeVF = getRuntimeVF(Builder, IdxTy, VF); 4252 auto *Idx = Builder.CreateSub(RuntimeVF, ConstantInt::get(IdxTy, 2)); 4253 ExtractForPhiUsedOutsideLoop = Builder.CreateExtractElement( 4254 Incoming, Idx, "vector.recur.extract.for.phi"); 4255 } else if (UF > 1) 4256 // When loop is unrolled without vectorizing, initialize 4257 // ExtractForPhiUsedOutsideLoop with the value just prior to unrolled value 4258 // of `Incoming`. This is analogous to the vectorized case above: extracting 4259 // the second last element when VF > 1. 4260 ExtractForPhiUsedOutsideLoop = State.get(PreviousDef, UF - 2); 4261 4262 // Fix the initial value of the original recurrence in the scalar loop. 4263 Builder.SetInsertPoint(&*LoopScalarPreHeader->begin()); 4264 PHINode *Phi = cast<PHINode>(PhiR->getUnderlyingValue()); 4265 auto *Start = Builder.CreatePHI(Phi->getType(), 2, "scalar.recur.init"); 4266 auto *ScalarInit = PhiR->getStartValue()->getLiveInIRValue(); 4267 for (auto *BB : predecessors(LoopScalarPreHeader)) { 4268 auto *Incoming = BB == LoopMiddleBlock ? ExtractForScalar : ScalarInit; 4269 Start->addIncoming(Incoming, BB); 4270 } 4271 4272 Phi->setIncomingValueForBlock(LoopScalarPreHeader, Start); 4273 Phi->setName("scalar.recur"); 4274 4275 // Finally, fix users of the recurrence outside the loop. The users will need 4276 // either the last value of the scalar recurrence or the last value of the 4277 // vector recurrence we extracted in the middle block. Since the loop is in 4278 // LCSSA form, we just need to find all the phi nodes for the original scalar 4279 // recurrence in the exit block, and then add an edge for the middle block. 4280 // Note that LCSSA does not imply single entry when the original scalar loop 4281 // had multiple exiting edges (as we always run the last iteration in the 4282 // scalar epilogue); in that case, there is no edge from middle to exit and 4283 // and thus no phis which needed updated. 4284 if (!Cost->requiresScalarEpilogue(VF)) 4285 for (PHINode &LCSSAPhi : LoopExitBlock->phis()) 4286 if (any_of(LCSSAPhi.incoming_values(), 4287 [Phi](Value *V) { return V == Phi; })) 4288 LCSSAPhi.addIncoming(ExtractForPhiUsedOutsideLoop, LoopMiddleBlock); 4289 } 4290 4291 void InnerLoopVectorizer::fixReduction(VPReductionPHIRecipe *PhiR, 4292 VPTransformState &State) { 4293 PHINode *OrigPhi = cast<PHINode>(PhiR->getUnderlyingValue()); 4294 // Get it's reduction variable descriptor. 4295 assert(Legal->isReductionVariable(OrigPhi) && 4296 "Unable to find the reduction variable"); 4297 const RecurrenceDescriptor &RdxDesc = PhiR->getRecurrenceDescriptor(); 4298 4299 RecurKind RK = RdxDesc.getRecurrenceKind(); 4300 TrackingVH<Value> ReductionStartValue = RdxDesc.getRecurrenceStartValue(); 4301 Instruction *LoopExitInst = RdxDesc.getLoopExitInstr(); 4302 setDebugLocFromInst(ReductionStartValue); 4303 4304 VPValue *LoopExitInstDef = State.Plan->getVPValue(LoopExitInst); 4305 // This is the vector-clone of the value that leaves the loop. 4306 Type *VecTy = State.get(LoopExitInstDef, 0)->getType(); 4307 4308 // Wrap flags are in general invalid after vectorization, clear them. 4309 clearReductionWrapFlags(RdxDesc, State); 4310 4311 // Fix the vector-loop phi. 4312 4313 // Reductions do not have to start at zero. They can start with 4314 // any loop invariant values. 4315 BasicBlock *VectorLoopLatch = LI->getLoopFor(LoopVectorBody)->getLoopLatch(); 4316 4317 unsigned LastPartForNewPhi = PhiR->isOrdered() ? 1 : UF; 4318 for (unsigned Part = 0; Part < LastPartForNewPhi; ++Part) { 4319 Value *VecRdxPhi = State.get(PhiR->getVPSingleValue(), Part); 4320 Value *Val = State.get(PhiR->getBackedgeValue(), Part); 4321 if (PhiR->isOrdered()) 4322 Val = State.get(PhiR->getBackedgeValue(), UF - 1); 4323 4324 cast<PHINode>(VecRdxPhi)->addIncoming(Val, VectorLoopLatch); 4325 } 4326 4327 // Before each round, move the insertion point right between 4328 // the PHIs and the values we are going to write. 4329 // This allows us to write both PHINodes and the extractelement 4330 // instructions. 4331 Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt()); 4332 4333 setDebugLocFromInst(LoopExitInst); 4334 4335 Type *PhiTy = OrigPhi->getType(); 4336 // If tail is folded by masking, the vector value to leave the loop should be 4337 // a Select choosing between the vectorized LoopExitInst and vectorized Phi, 4338 // instead of the former. For an inloop reduction the reduction will already 4339 // be predicated, and does not need to be handled here. 4340 if (Cost->foldTailByMasking() && !PhiR->isInLoop()) { 4341 for (unsigned Part = 0; Part < UF; ++Part) { 4342 Value *VecLoopExitInst = State.get(LoopExitInstDef, Part); 4343 Value *Sel = nullptr; 4344 for (User *U : VecLoopExitInst->users()) { 4345 if (isa<SelectInst>(U)) { 4346 assert(!Sel && "Reduction exit feeding two selects"); 4347 Sel = U; 4348 } else 4349 assert(isa<PHINode>(U) && "Reduction exit must feed Phi's or select"); 4350 } 4351 assert(Sel && "Reduction exit feeds no select"); 4352 State.reset(LoopExitInstDef, Sel, Part); 4353 4354 // If the target can create a predicated operator for the reduction at no 4355 // extra cost in the loop (for example a predicated vadd), it can be 4356 // cheaper for the select to remain in the loop than be sunk out of it, 4357 // and so use the select value for the phi instead of the old 4358 // LoopExitValue. 4359 if (PreferPredicatedReductionSelect || 4360 TTI->preferPredicatedReductionSelect( 4361 RdxDesc.getOpcode(), PhiTy, 4362 TargetTransformInfo::ReductionFlags())) { 4363 auto *VecRdxPhi = 4364 cast<PHINode>(State.get(PhiR->getVPSingleValue(), Part)); 4365 VecRdxPhi->setIncomingValueForBlock( 4366 LI->getLoopFor(LoopVectorBody)->getLoopLatch(), Sel); 4367 } 4368 } 4369 } 4370 4371 // If the vector reduction can be performed in a smaller type, we truncate 4372 // then extend the loop exit value to enable InstCombine to evaluate the 4373 // entire expression in the smaller type. 4374 if (VF.isVector() && PhiTy != RdxDesc.getRecurrenceType()) { 4375 assert(!PhiR->isInLoop() && "Unexpected truncated inloop reduction!"); 4376 Type *RdxVecTy = VectorType::get(RdxDesc.getRecurrenceType(), VF); 4377 Builder.SetInsertPoint( 4378 LI->getLoopFor(LoopVectorBody)->getLoopLatch()->getTerminator()); 4379 VectorParts RdxParts(UF); 4380 for (unsigned Part = 0; Part < UF; ++Part) { 4381 RdxParts[Part] = State.get(LoopExitInstDef, Part); 4382 Value *Trunc = Builder.CreateTrunc(RdxParts[Part], RdxVecTy); 4383 Value *Extnd = RdxDesc.isSigned() ? Builder.CreateSExt(Trunc, VecTy) 4384 : Builder.CreateZExt(Trunc, VecTy); 4385 for (Value::user_iterator UI = RdxParts[Part]->user_begin(); 4386 UI != RdxParts[Part]->user_end();) 4387 if (*UI != Trunc) { 4388 (*UI++)->replaceUsesOfWith(RdxParts[Part], Extnd); 4389 RdxParts[Part] = Extnd; 4390 } else { 4391 ++UI; 4392 } 4393 } 4394 Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt()); 4395 for (unsigned Part = 0; Part < UF; ++Part) { 4396 RdxParts[Part] = Builder.CreateTrunc(RdxParts[Part], RdxVecTy); 4397 State.reset(LoopExitInstDef, RdxParts[Part], Part); 4398 } 4399 } 4400 4401 // Reduce all of the unrolled parts into a single vector. 4402 Value *ReducedPartRdx = State.get(LoopExitInstDef, 0); 4403 unsigned Op = RecurrenceDescriptor::getOpcode(RK); 4404 4405 // The middle block terminator has already been assigned a DebugLoc here (the 4406 // OrigLoop's single latch terminator). We want the whole middle block to 4407 // appear to execute on this line because: (a) it is all compiler generated, 4408 // (b) these instructions are always executed after evaluating the latch 4409 // conditional branch, and (c) other passes may add new predecessors which 4410 // terminate on this line. This is the easiest way to ensure we don't 4411 // accidentally cause an extra step back into the loop while debugging. 4412 setDebugLocFromInst(LoopMiddleBlock->getTerminator()); 4413 if (PhiR->isOrdered()) 4414 ReducedPartRdx = State.get(LoopExitInstDef, UF - 1); 4415 else { 4416 // Floating-point operations should have some FMF to enable the reduction. 4417 IRBuilderBase::FastMathFlagGuard FMFG(Builder); 4418 Builder.setFastMathFlags(RdxDesc.getFastMathFlags()); 4419 for (unsigned Part = 1; Part < UF; ++Part) { 4420 Value *RdxPart = State.get(LoopExitInstDef, Part); 4421 if (Op != Instruction::ICmp && Op != Instruction::FCmp) { 4422 ReducedPartRdx = Builder.CreateBinOp( 4423 (Instruction::BinaryOps)Op, RdxPart, ReducedPartRdx, "bin.rdx"); 4424 } else { 4425 ReducedPartRdx = createMinMaxOp(Builder, RK, ReducedPartRdx, RdxPart); 4426 } 4427 } 4428 } 4429 4430 // Create the reduction after the loop. Note that inloop reductions create the 4431 // target reduction in the loop using a Reduction recipe. 4432 if (VF.isVector() && !PhiR->isInLoop()) { 4433 ReducedPartRdx = 4434 createTargetReduction(Builder, TTI, RdxDesc, ReducedPartRdx); 4435 // If the reduction can be performed in a smaller type, we need to extend 4436 // the reduction to the wider type before we branch to the original loop. 4437 if (PhiTy != RdxDesc.getRecurrenceType()) 4438 ReducedPartRdx = RdxDesc.isSigned() 4439 ? Builder.CreateSExt(ReducedPartRdx, PhiTy) 4440 : Builder.CreateZExt(ReducedPartRdx, PhiTy); 4441 } 4442 4443 // Create a phi node that merges control-flow from the backedge-taken check 4444 // block and the middle block. 4445 PHINode *BCBlockPhi = PHINode::Create(PhiTy, 2, "bc.merge.rdx", 4446 LoopScalarPreHeader->getTerminator()); 4447 for (unsigned I = 0, E = LoopBypassBlocks.size(); I != E; ++I) 4448 BCBlockPhi->addIncoming(ReductionStartValue, LoopBypassBlocks[I]); 4449 BCBlockPhi->addIncoming(ReducedPartRdx, LoopMiddleBlock); 4450 4451 // Now, we need to fix the users of the reduction variable 4452 // inside and outside of the scalar remainder loop. 4453 4454 // We know that the loop is in LCSSA form. We need to update the PHI nodes 4455 // in the exit blocks. See comment on analogous loop in 4456 // fixFirstOrderRecurrence for a more complete explaination of the logic. 4457 if (!Cost->requiresScalarEpilogue(VF)) 4458 for (PHINode &LCSSAPhi : LoopExitBlock->phis()) 4459 if (any_of(LCSSAPhi.incoming_values(), 4460 [LoopExitInst](Value *V) { return V == LoopExitInst; })) 4461 LCSSAPhi.addIncoming(ReducedPartRdx, LoopMiddleBlock); 4462 4463 // Fix the scalar loop reduction variable with the incoming reduction sum 4464 // from the vector body and from the backedge value. 4465 int IncomingEdgeBlockIdx = 4466 OrigPhi->getBasicBlockIndex(OrigLoop->getLoopLatch()); 4467 assert(IncomingEdgeBlockIdx >= 0 && "Invalid block index"); 4468 // Pick the other block. 4469 int SelfEdgeBlockIdx = (IncomingEdgeBlockIdx ? 0 : 1); 4470 OrigPhi->setIncomingValue(SelfEdgeBlockIdx, BCBlockPhi); 4471 OrigPhi->setIncomingValue(IncomingEdgeBlockIdx, LoopExitInst); 4472 } 4473 4474 void InnerLoopVectorizer::clearReductionWrapFlags(const RecurrenceDescriptor &RdxDesc, 4475 VPTransformState &State) { 4476 RecurKind RK = RdxDesc.getRecurrenceKind(); 4477 if (RK != RecurKind::Add && RK != RecurKind::Mul) 4478 return; 4479 4480 Instruction *LoopExitInstr = RdxDesc.getLoopExitInstr(); 4481 assert(LoopExitInstr && "null loop exit instruction"); 4482 SmallVector<Instruction *, 8> Worklist; 4483 SmallPtrSet<Instruction *, 8> Visited; 4484 Worklist.push_back(LoopExitInstr); 4485 Visited.insert(LoopExitInstr); 4486 4487 while (!Worklist.empty()) { 4488 Instruction *Cur = Worklist.pop_back_val(); 4489 if (isa<OverflowingBinaryOperator>(Cur)) 4490 for (unsigned Part = 0; Part < UF; ++Part) { 4491 Value *V = State.get(State.Plan->getVPValue(Cur), Part); 4492 cast<Instruction>(V)->dropPoisonGeneratingFlags(); 4493 } 4494 4495 for (User *U : Cur->users()) { 4496 Instruction *UI = cast<Instruction>(U); 4497 if ((Cur != LoopExitInstr || OrigLoop->contains(UI->getParent())) && 4498 Visited.insert(UI).second) 4499 Worklist.push_back(UI); 4500 } 4501 } 4502 } 4503 4504 void InnerLoopVectorizer::fixLCSSAPHIs(VPTransformState &State) { 4505 for (PHINode &LCSSAPhi : LoopExitBlock->phis()) { 4506 if (LCSSAPhi.getBasicBlockIndex(LoopMiddleBlock) != -1) 4507 // Some phis were already hand updated by the reduction and recurrence 4508 // code above, leave them alone. 4509 continue; 4510 4511 auto *IncomingValue = LCSSAPhi.getIncomingValue(0); 4512 // Non-instruction incoming values will have only one value. 4513 4514 VPLane Lane = VPLane::getFirstLane(); 4515 if (isa<Instruction>(IncomingValue) && 4516 !Cost->isUniformAfterVectorization(cast<Instruction>(IncomingValue), 4517 VF)) 4518 Lane = VPLane::getLastLaneForVF(VF); 4519 4520 // Can be a loop invariant incoming value or the last scalar value to be 4521 // extracted from the vectorized loop. 4522 Builder.SetInsertPoint(LoopMiddleBlock->getTerminator()); 4523 Value *lastIncomingValue = 4524 OrigLoop->isLoopInvariant(IncomingValue) 4525 ? IncomingValue 4526 : State.get(State.Plan->getVPValue(IncomingValue), 4527 VPIteration(UF - 1, Lane)); 4528 LCSSAPhi.addIncoming(lastIncomingValue, LoopMiddleBlock); 4529 } 4530 } 4531 4532 void InnerLoopVectorizer::sinkScalarOperands(Instruction *PredInst) { 4533 // The basic block and loop containing the predicated instruction. 4534 auto *PredBB = PredInst->getParent(); 4535 auto *VectorLoop = LI->getLoopFor(PredBB); 4536 4537 // Initialize a worklist with the operands of the predicated instruction. 4538 SetVector<Value *> Worklist(PredInst->op_begin(), PredInst->op_end()); 4539 4540 // Holds instructions that we need to analyze again. An instruction may be 4541 // reanalyzed if we don't yet know if we can sink it or not. 4542 SmallVector<Instruction *, 8> InstsToReanalyze; 4543 4544 // Returns true if a given use occurs in the predicated block. Phi nodes use 4545 // their operands in their corresponding predecessor blocks. 4546 auto isBlockOfUsePredicated = [&](Use &U) -> bool { 4547 auto *I = cast<Instruction>(U.getUser()); 4548 BasicBlock *BB = I->getParent(); 4549 if (auto *Phi = dyn_cast<PHINode>(I)) 4550 BB = Phi->getIncomingBlock( 4551 PHINode::getIncomingValueNumForOperand(U.getOperandNo())); 4552 return BB == PredBB; 4553 }; 4554 4555 // Iteratively sink the scalarized operands of the predicated instruction 4556 // into the block we created for it. When an instruction is sunk, it's 4557 // operands are then added to the worklist. The algorithm ends after one pass 4558 // through the worklist doesn't sink a single instruction. 4559 bool Changed; 4560 do { 4561 // Add the instructions that need to be reanalyzed to the worklist, and 4562 // reset the changed indicator. 4563 Worklist.insert(InstsToReanalyze.begin(), InstsToReanalyze.end()); 4564 InstsToReanalyze.clear(); 4565 Changed = false; 4566 4567 while (!Worklist.empty()) { 4568 auto *I = dyn_cast<Instruction>(Worklist.pop_back_val()); 4569 4570 // We can't sink an instruction if it is a phi node, is not in the loop, 4571 // or may have side effects. 4572 if (!I || isa<PHINode>(I) || !VectorLoop->contains(I) || 4573 I->mayHaveSideEffects()) 4574 continue; 4575 4576 // If the instruction is already in PredBB, check if we can sink its 4577 // operands. In that case, VPlan's sinkScalarOperands() succeeded in 4578 // sinking the scalar instruction I, hence it appears in PredBB; but it 4579 // may have failed to sink I's operands (recursively), which we try 4580 // (again) here. 4581 if (I->getParent() == PredBB) { 4582 Worklist.insert(I->op_begin(), I->op_end()); 4583 continue; 4584 } 4585 4586 // It's legal to sink the instruction if all its uses occur in the 4587 // predicated block. Otherwise, there's nothing to do yet, and we may 4588 // need to reanalyze the instruction. 4589 if (!llvm::all_of(I->uses(), isBlockOfUsePredicated)) { 4590 InstsToReanalyze.push_back(I); 4591 continue; 4592 } 4593 4594 // Move the instruction to the beginning of the predicated block, and add 4595 // it's operands to the worklist. 4596 I->moveBefore(&*PredBB->getFirstInsertionPt()); 4597 Worklist.insert(I->op_begin(), I->op_end()); 4598 4599 // The sinking may have enabled other instructions to be sunk, so we will 4600 // need to iterate. 4601 Changed = true; 4602 } 4603 } while (Changed); 4604 } 4605 4606 void InnerLoopVectorizer::fixNonInductionPHIs(VPTransformState &State) { 4607 for (PHINode *OrigPhi : OrigPHIsToFix) { 4608 VPWidenPHIRecipe *VPPhi = 4609 cast<VPWidenPHIRecipe>(State.Plan->getVPValue(OrigPhi)); 4610 PHINode *NewPhi = cast<PHINode>(State.get(VPPhi, 0)); 4611 // Make sure the builder has a valid insert point. 4612 Builder.SetInsertPoint(NewPhi); 4613 for (unsigned i = 0; i < VPPhi->getNumOperands(); ++i) { 4614 VPValue *Inc = VPPhi->getIncomingValue(i); 4615 VPBasicBlock *VPBB = VPPhi->getIncomingBlock(i); 4616 NewPhi->addIncoming(State.get(Inc, 0), State.CFG.VPBB2IRBB[VPBB]); 4617 } 4618 } 4619 } 4620 4621 bool InnerLoopVectorizer::useOrderedReductions(RecurrenceDescriptor &RdxDesc) { 4622 return Cost->useOrderedReductions(RdxDesc); 4623 } 4624 4625 void InnerLoopVectorizer::widenGEP(GetElementPtrInst *GEP, VPValue *VPDef, 4626 VPUser &Operands, unsigned UF, 4627 ElementCount VF, bool IsPtrLoopInvariant, 4628 SmallBitVector &IsIndexLoopInvariant, 4629 VPTransformState &State) { 4630 // Construct a vector GEP by widening the operands of the scalar GEP as 4631 // necessary. We mark the vector GEP 'inbounds' if appropriate. A GEP 4632 // results in a vector of pointers when at least one operand of the GEP 4633 // is vector-typed. Thus, to keep the representation compact, we only use 4634 // vector-typed operands for loop-varying values. 4635 4636 if (VF.isVector() && IsPtrLoopInvariant && IsIndexLoopInvariant.all()) { 4637 // If we are vectorizing, but the GEP has only loop-invariant operands, 4638 // the GEP we build (by only using vector-typed operands for 4639 // loop-varying values) would be a scalar pointer. Thus, to ensure we 4640 // produce a vector of pointers, we need to either arbitrarily pick an 4641 // operand to broadcast, or broadcast a clone of the original GEP. 4642 // Here, we broadcast a clone of the original. 4643 // 4644 // TODO: If at some point we decide to scalarize instructions having 4645 // loop-invariant operands, this special case will no longer be 4646 // required. We would add the scalarization decision to 4647 // collectLoopScalars() and teach getVectorValue() to broadcast 4648 // the lane-zero scalar value. 4649 auto *Clone = Builder.Insert(GEP->clone()); 4650 for (unsigned Part = 0; Part < UF; ++Part) { 4651 Value *EntryPart = Builder.CreateVectorSplat(VF, Clone); 4652 State.set(VPDef, EntryPart, Part); 4653 addMetadata(EntryPart, GEP); 4654 } 4655 } else { 4656 // If the GEP has at least one loop-varying operand, we are sure to 4657 // produce a vector of pointers. But if we are only unrolling, we want 4658 // to produce a scalar GEP for each unroll part. Thus, the GEP we 4659 // produce with the code below will be scalar (if VF == 1) or vector 4660 // (otherwise). Note that for the unroll-only case, we still maintain 4661 // values in the vector mapping with initVector, as we do for other 4662 // instructions. 4663 for (unsigned Part = 0; Part < UF; ++Part) { 4664 // The pointer operand of the new GEP. If it's loop-invariant, we 4665 // won't broadcast it. 4666 auto *Ptr = IsPtrLoopInvariant 4667 ? State.get(Operands.getOperand(0), VPIteration(0, 0)) 4668 : State.get(Operands.getOperand(0), Part); 4669 4670 // Collect all the indices for the new GEP. If any index is 4671 // loop-invariant, we won't broadcast it. 4672 SmallVector<Value *, 4> Indices; 4673 for (unsigned I = 1, E = Operands.getNumOperands(); I < E; I++) { 4674 VPValue *Operand = Operands.getOperand(I); 4675 if (IsIndexLoopInvariant[I - 1]) 4676 Indices.push_back(State.get(Operand, VPIteration(0, 0))); 4677 else 4678 Indices.push_back(State.get(Operand, Part)); 4679 } 4680 4681 // Create the new GEP. Note that this GEP may be a scalar if VF == 1, 4682 // but it should be a vector, otherwise. 4683 auto *NewGEP = 4684 GEP->isInBounds() 4685 ? Builder.CreateInBoundsGEP(GEP->getSourceElementType(), Ptr, 4686 Indices) 4687 : Builder.CreateGEP(GEP->getSourceElementType(), Ptr, Indices); 4688 assert((VF.isScalar() || NewGEP->getType()->isVectorTy()) && 4689 "NewGEP is not a pointer vector"); 4690 State.set(VPDef, NewGEP, Part); 4691 addMetadata(NewGEP, GEP); 4692 } 4693 } 4694 } 4695 4696 void InnerLoopVectorizer::widenPHIInstruction(Instruction *PN, 4697 VPWidenPHIRecipe *PhiR, 4698 VPTransformState &State) { 4699 PHINode *P = cast<PHINode>(PN); 4700 if (EnableVPlanNativePath) { 4701 // Currently we enter here in the VPlan-native path for non-induction 4702 // PHIs where all control flow is uniform. We simply widen these PHIs. 4703 // Create a vector phi with no operands - the vector phi operands will be 4704 // set at the end of vector code generation. 4705 Type *VecTy = (State.VF.isScalar()) 4706 ? PN->getType() 4707 : VectorType::get(PN->getType(), State.VF); 4708 Value *VecPhi = Builder.CreatePHI(VecTy, PN->getNumOperands(), "vec.phi"); 4709 State.set(PhiR, VecPhi, 0); 4710 OrigPHIsToFix.push_back(P); 4711 4712 return; 4713 } 4714 4715 assert(PN->getParent() == OrigLoop->getHeader() && 4716 "Non-header phis should have been handled elsewhere"); 4717 4718 // In order to support recurrences we need to be able to vectorize Phi nodes. 4719 // Phi nodes have cycles, so we need to vectorize them in two stages. This is 4720 // stage #1: We create a new vector PHI node with no incoming edges. We'll use 4721 // this value when we vectorize all of the instructions that use the PHI. 4722 4723 assert(!Legal->isReductionVariable(P) && 4724 "reductions should be handled elsewhere"); 4725 4726 setDebugLocFromInst(P); 4727 4728 // This PHINode must be an induction variable. 4729 // Make sure that we know about it. 4730 assert(Legal->getInductionVars().count(P) && "Not an induction variable"); 4731 4732 InductionDescriptor II = Legal->getInductionVars().lookup(P); 4733 const DataLayout &DL = OrigLoop->getHeader()->getModule()->getDataLayout(); 4734 4735 // FIXME: The newly created binary instructions should contain nsw/nuw flags, 4736 // which can be found from the original scalar operations. 4737 switch (II.getKind()) { 4738 case InductionDescriptor::IK_NoInduction: 4739 llvm_unreachable("Unknown induction"); 4740 case InductionDescriptor::IK_IntInduction: 4741 case InductionDescriptor::IK_FpInduction: 4742 llvm_unreachable("Integer/fp induction is handled elsewhere."); 4743 case InductionDescriptor::IK_PtrInduction: { 4744 // Handle the pointer induction variable case. 4745 assert(P->getType()->isPointerTy() && "Unexpected type."); 4746 4747 if (Cost->isScalarAfterVectorization(P, State.VF)) { 4748 // This is the normalized GEP that starts counting at zero. 4749 Value *PtrInd = 4750 Builder.CreateSExtOrTrunc(Induction, II.getStep()->getType()); 4751 // Determine the number of scalars we need to generate for each unroll 4752 // iteration. If the instruction is uniform, we only need to generate the 4753 // first lane. Otherwise, we generate all VF values. 4754 bool IsUniform = Cost->isUniformAfterVectorization(P, State.VF); 4755 unsigned Lanes = IsUniform ? 1 : State.VF.getKnownMinValue(); 4756 4757 bool NeedsVectorIndex = !IsUniform && VF.isScalable(); 4758 Value *UnitStepVec = nullptr, *PtrIndSplat = nullptr; 4759 if (NeedsVectorIndex) { 4760 Type *VecIVTy = VectorType::get(PtrInd->getType(), VF); 4761 UnitStepVec = Builder.CreateStepVector(VecIVTy); 4762 PtrIndSplat = Builder.CreateVectorSplat(VF, PtrInd); 4763 } 4764 4765 for (unsigned Part = 0; Part < UF; ++Part) { 4766 Value *PartStart = createStepForVF( 4767 Builder, ConstantInt::get(PtrInd->getType(), Part), VF); 4768 4769 if (NeedsVectorIndex) { 4770 Value *PartStartSplat = Builder.CreateVectorSplat(VF, PartStart); 4771 Value *Indices = Builder.CreateAdd(PartStartSplat, UnitStepVec); 4772 Value *GlobalIndices = Builder.CreateAdd(PtrIndSplat, Indices); 4773 Value *SclrGep = 4774 emitTransformedIndex(Builder, GlobalIndices, PSE.getSE(), DL, II); 4775 SclrGep->setName("next.gep"); 4776 State.set(PhiR, SclrGep, Part); 4777 // We've cached the whole vector, which means we can support the 4778 // extraction of any lane. 4779 continue; 4780 } 4781 4782 for (unsigned Lane = 0; Lane < Lanes; ++Lane) { 4783 Value *Idx = Builder.CreateAdd( 4784 PartStart, ConstantInt::get(PtrInd->getType(), Lane)); 4785 Value *GlobalIdx = Builder.CreateAdd(PtrInd, Idx); 4786 Value *SclrGep = 4787 emitTransformedIndex(Builder, GlobalIdx, PSE.getSE(), DL, II); 4788 SclrGep->setName("next.gep"); 4789 State.set(PhiR, SclrGep, VPIteration(Part, Lane)); 4790 } 4791 } 4792 return; 4793 } 4794 assert(isa<SCEVConstant>(II.getStep()) && 4795 "Induction step not a SCEV constant!"); 4796 Type *PhiType = II.getStep()->getType(); 4797 4798 // Build a pointer phi 4799 Value *ScalarStartValue = II.getStartValue(); 4800 Type *ScStValueType = ScalarStartValue->getType(); 4801 PHINode *NewPointerPhi = 4802 PHINode::Create(ScStValueType, 2, "pointer.phi", Induction); 4803 NewPointerPhi->addIncoming(ScalarStartValue, LoopVectorPreHeader); 4804 4805 // A pointer induction, performed by using a gep 4806 BasicBlock *LoopLatch = LI->getLoopFor(LoopVectorBody)->getLoopLatch(); 4807 Instruction *InductionLoc = LoopLatch->getTerminator(); 4808 const SCEV *ScalarStep = II.getStep(); 4809 SCEVExpander Exp(*PSE.getSE(), DL, "induction"); 4810 Value *ScalarStepValue = 4811 Exp.expandCodeFor(ScalarStep, PhiType, InductionLoc); 4812 Value *RuntimeVF = getRuntimeVF(Builder, PhiType, VF); 4813 Value *NumUnrolledElems = 4814 Builder.CreateMul(RuntimeVF, ConstantInt::get(PhiType, State.UF)); 4815 Value *InductionGEP = GetElementPtrInst::Create( 4816 ScStValueType->getPointerElementType(), NewPointerPhi, 4817 Builder.CreateMul(ScalarStepValue, NumUnrolledElems), "ptr.ind", 4818 InductionLoc); 4819 NewPointerPhi->addIncoming(InductionGEP, LoopLatch); 4820 4821 // Create UF many actual address geps that use the pointer 4822 // phi as base and a vectorized version of the step value 4823 // (<step*0, ..., step*N>) as offset. 4824 for (unsigned Part = 0; Part < State.UF; ++Part) { 4825 Type *VecPhiType = VectorType::get(PhiType, State.VF); 4826 Value *StartOffsetScalar = 4827 Builder.CreateMul(RuntimeVF, ConstantInt::get(PhiType, Part)); 4828 Value *StartOffset = 4829 Builder.CreateVectorSplat(State.VF, StartOffsetScalar); 4830 // Create a vector of consecutive numbers from zero to VF. 4831 StartOffset = 4832 Builder.CreateAdd(StartOffset, Builder.CreateStepVector(VecPhiType)); 4833 4834 Value *GEP = Builder.CreateGEP( 4835 ScStValueType->getPointerElementType(), NewPointerPhi, 4836 Builder.CreateMul( 4837 StartOffset, Builder.CreateVectorSplat(State.VF, ScalarStepValue), 4838 "vector.gep")); 4839 State.set(PhiR, GEP, Part); 4840 } 4841 } 4842 } 4843 } 4844 4845 /// A helper function for checking whether an integer division-related 4846 /// instruction may divide by zero (in which case it must be predicated if 4847 /// executed conditionally in the scalar code). 4848 /// TODO: It may be worthwhile to generalize and check isKnownNonZero(). 4849 /// Non-zero divisors that are non compile-time constants will not be 4850 /// converted into multiplication, so we will still end up scalarizing 4851 /// the division, but can do so w/o predication. 4852 static bool mayDivideByZero(Instruction &I) { 4853 assert((I.getOpcode() == Instruction::UDiv || 4854 I.getOpcode() == Instruction::SDiv || 4855 I.getOpcode() == Instruction::URem || 4856 I.getOpcode() == Instruction::SRem) && 4857 "Unexpected instruction"); 4858 Value *Divisor = I.getOperand(1); 4859 auto *CInt = dyn_cast<ConstantInt>(Divisor); 4860 return !CInt || CInt->isZero(); 4861 } 4862 4863 void InnerLoopVectorizer::widenInstruction(Instruction &I, VPValue *Def, 4864 VPUser &User, 4865 VPTransformState &State) { 4866 switch (I.getOpcode()) { 4867 case Instruction::Call: 4868 case Instruction::Br: 4869 case Instruction::PHI: 4870 case Instruction::GetElementPtr: 4871 case Instruction::Select: 4872 llvm_unreachable("This instruction is handled by a different recipe."); 4873 case Instruction::UDiv: 4874 case Instruction::SDiv: 4875 case Instruction::SRem: 4876 case Instruction::URem: 4877 case Instruction::Add: 4878 case Instruction::FAdd: 4879 case Instruction::Sub: 4880 case Instruction::FSub: 4881 case Instruction::FNeg: 4882 case Instruction::Mul: 4883 case Instruction::FMul: 4884 case Instruction::FDiv: 4885 case Instruction::FRem: 4886 case Instruction::Shl: 4887 case Instruction::LShr: 4888 case Instruction::AShr: 4889 case Instruction::And: 4890 case Instruction::Or: 4891 case Instruction::Xor: { 4892 // Just widen unops and binops. 4893 setDebugLocFromInst(&I); 4894 4895 for (unsigned Part = 0; Part < UF; ++Part) { 4896 SmallVector<Value *, 2> Ops; 4897 for (VPValue *VPOp : User.operands()) 4898 Ops.push_back(State.get(VPOp, Part)); 4899 4900 Value *V = Builder.CreateNAryOp(I.getOpcode(), Ops); 4901 4902 if (auto *VecOp = dyn_cast<Instruction>(V)) 4903 VecOp->copyIRFlags(&I); 4904 4905 // Use this vector value for all users of the original instruction. 4906 State.set(Def, V, Part); 4907 addMetadata(V, &I); 4908 } 4909 4910 break; 4911 } 4912 case Instruction::ICmp: 4913 case Instruction::FCmp: { 4914 // Widen compares. Generate vector compares. 4915 bool FCmp = (I.getOpcode() == Instruction::FCmp); 4916 auto *Cmp = cast<CmpInst>(&I); 4917 setDebugLocFromInst(Cmp); 4918 for (unsigned Part = 0; Part < UF; ++Part) { 4919 Value *A = State.get(User.getOperand(0), Part); 4920 Value *B = State.get(User.getOperand(1), Part); 4921 Value *C = nullptr; 4922 if (FCmp) { 4923 // Propagate fast math flags. 4924 IRBuilder<>::FastMathFlagGuard FMFG(Builder); 4925 Builder.setFastMathFlags(Cmp->getFastMathFlags()); 4926 C = Builder.CreateFCmp(Cmp->getPredicate(), A, B); 4927 } else { 4928 C = Builder.CreateICmp(Cmp->getPredicate(), A, B); 4929 } 4930 State.set(Def, C, Part); 4931 addMetadata(C, &I); 4932 } 4933 4934 break; 4935 } 4936 4937 case Instruction::ZExt: 4938 case Instruction::SExt: 4939 case Instruction::FPToUI: 4940 case Instruction::FPToSI: 4941 case Instruction::FPExt: 4942 case Instruction::PtrToInt: 4943 case Instruction::IntToPtr: 4944 case Instruction::SIToFP: 4945 case Instruction::UIToFP: 4946 case Instruction::Trunc: 4947 case Instruction::FPTrunc: 4948 case Instruction::BitCast: { 4949 auto *CI = cast<CastInst>(&I); 4950 setDebugLocFromInst(CI); 4951 4952 /// Vectorize casts. 4953 Type *DestTy = 4954 (VF.isScalar()) ? CI->getType() : VectorType::get(CI->getType(), VF); 4955 4956 for (unsigned Part = 0; Part < UF; ++Part) { 4957 Value *A = State.get(User.getOperand(0), Part); 4958 Value *Cast = Builder.CreateCast(CI->getOpcode(), A, DestTy); 4959 State.set(Def, Cast, Part); 4960 addMetadata(Cast, &I); 4961 } 4962 break; 4963 } 4964 default: 4965 // This instruction is not vectorized by simple widening. 4966 LLVM_DEBUG(dbgs() << "LV: Found an unhandled instruction: " << I); 4967 llvm_unreachable("Unhandled instruction!"); 4968 } // end of switch. 4969 } 4970 4971 void InnerLoopVectorizer::widenCallInstruction(CallInst &I, VPValue *Def, 4972 VPUser &ArgOperands, 4973 VPTransformState &State) { 4974 assert(!isa<DbgInfoIntrinsic>(I) && 4975 "DbgInfoIntrinsic should have been dropped during VPlan construction"); 4976 setDebugLocFromInst(&I); 4977 4978 Module *M = I.getParent()->getParent()->getParent(); 4979 auto *CI = cast<CallInst>(&I); 4980 4981 SmallVector<Type *, 4> Tys; 4982 for (Value *ArgOperand : CI->arg_operands()) 4983 Tys.push_back(ToVectorTy(ArgOperand->getType(), VF.getKnownMinValue())); 4984 4985 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 4986 4987 // The flag shows whether we use Intrinsic or a usual Call for vectorized 4988 // version of the instruction. 4989 // Is it beneficial to perform intrinsic call compared to lib call? 4990 bool NeedToScalarize = false; 4991 InstructionCost CallCost = Cost->getVectorCallCost(CI, VF, NeedToScalarize); 4992 InstructionCost IntrinsicCost = ID ? Cost->getVectorIntrinsicCost(CI, VF) : 0; 4993 bool UseVectorIntrinsic = ID && IntrinsicCost <= CallCost; 4994 assert((UseVectorIntrinsic || !NeedToScalarize) && 4995 "Instruction should be scalarized elsewhere."); 4996 assert((IntrinsicCost.isValid() || CallCost.isValid()) && 4997 "Either the intrinsic cost or vector call cost must be valid"); 4998 4999 for (unsigned Part = 0; Part < UF; ++Part) { 5000 SmallVector<Type *, 2> TysForDecl = {CI->getType()}; 5001 SmallVector<Value *, 4> Args; 5002 for (auto &I : enumerate(ArgOperands.operands())) { 5003 // Some intrinsics have a scalar argument - don't replace it with a 5004 // vector. 5005 Value *Arg; 5006 if (!UseVectorIntrinsic || !hasVectorInstrinsicScalarOpd(ID, I.index())) 5007 Arg = State.get(I.value(), Part); 5008 else { 5009 Arg = State.get(I.value(), VPIteration(0, 0)); 5010 if (hasVectorInstrinsicOverloadedScalarOpd(ID, I.index())) 5011 TysForDecl.push_back(Arg->getType()); 5012 } 5013 Args.push_back(Arg); 5014 } 5015 5016 Function *VectorF; 5017 if (UseVectorIntrinsic) { 5018 // Use vector version of the intrinsic. 5019 if (VF.isVector()) 5020 TysForDecl[0] = VectorType::get(CI->getType()->getScalarType(), VF); 5021 VectorF = Intrinsic::getDeclaration(M, ID, TysForDecl); 5022 assert(VectorF && "Can't retrieve vector intrinsic."); 5023 } else { 5024 // Use vector version of the function call. 5025 const VFShape Shape = VFShape::get(*CI, VF, false /*HasGlobalPred*/); 5026 #ifndef NDEBUG 5027 assert(VFDatabase(*CI).getVectorizedFunction(Shape) != nullptr && 5028 "Can't create vector function."); 5029 #endif 5030 VectorF = VFDatabase(*CI).getVectorizedFunction(Shape); 5031 } 5032 SmallVector<OperandBundleDef, 1> OpBundles; 5033 CI->getOperandBundlesAsDefs(OpBundles); 5034 CallInst *V = Builder.CreateCall(VectorF, Args, OpBundles); 5035 5036 if (isa<FPMathOperator>(V)) 5037 V->copyFastMathFlags(CI); 5038 5039 State.set(Def, V, Part); 5040 addMetadata(V, &I); 5041 } 5042 } 5043 5044 void InnerLoopVectorizer::widenSelectInstruction(SelectInst &I, VPValue *VPDef, 5045 VPUser &Operands, 5046 bool InvariantCond, 5047 VPTransformState &State) { 5048 setDebugLocFromInst(&I); 5049 5050 // The condition can be loop invariant but still defined inside the 5051 // loop. This means that we can't just use the original 'cond' value. 5052 // We have to take the 'vectorized' value and pick the first lane. 5053 // Instcombine will make this a no-op. 5054 auto *InvarCond = InvariantCond 5055 ? State.get(Operands.getOperand(0), VPIteration(0, 0)) 5056 : nullptr; 5057 5058 for (unsigned Part = 0; Part < UF; ++Part) { 5059 Value *Cond = 5060 InvarCond ? InvarCond : State.get(Operands.getOperand(0), Part); 5061 Value *Op0 = State.get(Operands.getOperand(1), Part); 5062 Value *Op1 = State.get(Operands.getOperand(2), Part); 5063 Value *Sel = Builder.CreateSelect(Cond, Op0, Op1); 5064 State.set(VPDef, Sel, Part); 5065 addMetadata(Sel, &I); 5066 } 5067 } 5068 5069 void LoopVectorizationCostModel::collectLoopScalars(ElementCount VF) { 5070 // We should not collect Scalars more than once per VF. Right now, this 5071 // function is called from collectUniformsAndScalars(), which already does 5072 // this check. Collecting Scalars for VF=1 does not make any sense. 5073 assert(VF.isVector() && Scalars.find(VF) == Scalars.end() && 5074 "This function should not be visited twice for the same VF"); 5075 5076 SmallSetVector<Instruction *, 8> Worklist; 5077 5078 // These sets are used to seed the analysis with pointers used by memory 5079 // accesses that will remain scalar. 5080 SmallSetVector<Instruction *, 8> ScalarPtrs; 5081 SmallPtrSet<Instruction *, 8> PossibleNonScalarPtrs; 5082 auto *Latch = TheLoop->getLoopLatch(); 5083 5084 // A helper that returns true if the use of Ptr by MemAccess will be scalar. 5085 // The pointer operands of loads and stores will be scalar as long as the 5086 // memory access is not a gather or scatter operation. The value operand of a 5087 // store will remain scalar if the store is scalarized. 5088 auto isScalarUse = [&](Instruction *MemAccess, Value *Ptr) { 5089 InstWidening WideningDecision = getWideningDecision(MemAccess, VF); 5090 assert(WideningDecision != CM_Unknown && 5091 "Widening decision should be ready at this moment"); 5092 if (auto *Store = dyn_cast<StoreInst>(MemAccess)) 5093 if (Ptr == Store->getValueOperand()) 5094 return WideningDecision == CM_Scalarize; 5095 assert(Ptr == getLoadStorePointerOperand(MemAccess) && 5096 "Ptr is neither a value or pointer operand"); 5097 return WideningDecision != CM_GatherScatter; 5098 }; 5099 5100 // A helper that returns true if the given value is a bitcast or 5101 // getelementptr instruction contained in the loop. 5102 auto isLoopVaryingBitCastOrGEP = [&](Value *V) { 5103 return ((isa<BitCastInst>(V) && V->getType()->isPointerTy()) || 5104 isa<GetElementPtrInst>(V)) && 5105 !TheLoop->isLoopInvariant(V); 5106 }; 5107 5108 auto isScalarPtrInduction = [&](Instruction *MemAccess, Value *Ptr) { 5109 if (!isa<PHINode>(Ptr) || 5110 !Legal->getInductionVars().count(cast<PHINode>(Ptr))) 5111 return false; 5112 auto &Induction = Legal->getInductionVars()[cast<PHINode>(Ptr)]; 5113 if (Induction.getKind() != InductionDescriptor::IK_PtrInduction) 5114 return false; 5115 return isScalarUse(MemAccess, Ptr); 5116 }; 5117 5118 // A helper that evaluates a memory access's use of a pointer. If the 5119 // pointer is actually the pointer induction of a loop, it is being 5120 // inserted into Worklist. If the use will be a scalar use, and the 5121 // pointer is only used by memory accesses, we place the pointer in 5122 // ScalarPtrs. Otherwise, the pointer is placed in PossibleNonScalarPtrs. 5123 auto evaluatePtrUse = [&](Instruction *MemAccess, Value *Ptr) { 5124 if (isScalarPtrInduction(MemAccess, Ptr)) { 5125 Worklist.insert(cast<Instruction>(Ptr)); 5126 LLVM_DEBUG(dbgs() << "LV: Found new scalar instruction: " << *Ptr 5127 << "\n"); 5128 5129 Instruction *Update = cast<Instruction>( 5130 cast<PHINode>(Ptr)->getIncomingValueForBlock(Latch)); 5131 ScalarPtrs.insert(Update); 5132 return; 5133 } 5134 // We only care about bitcast and getelementptr instructions contained in 5135 // the loop. 5136 if (!isLoopVaryingBitCastOrGEP(Ptr)) 5137 return; 5138 5139 // If the pointer has already been identified as scalar (e.g., if it was 5140 // also identified as uniform), there's nothing to do. 5141 auto *I = cast<Instruction>(Ptr); 5142 if (Worklist.count(I)) 5143 return; 5144 5145 // If all users of the pointer will be memory accesses and scalar, place the 5146 // pointer in ScalarPtrs. Otherwise, place the pointer in 5147 // PossibleNonScalarPtrs. 5148 if (llvm::all_of(I->users(), [&](User *U) { 5149 return (isa<LoadInst>(U) || isa<StoreInst>(U)) && 5150 isScalarUse(cast<Instruction>(U), Ptr); 5151 })) 5152 ScalarPtrs.insert(I); 5153 else 5154 PossibleNonScalarPtrs.insert(I); 5155 }; 5156 5157 // We seed the scalars analysis with three classes of instructions: (1) 5158 // instructions marked uniform-after-vectorization and (2) bitcast, 5159 // getelementptr and (pointer) phi instructions used by memory accesses 5160 // requiring a scalar use. 5161 // 5162 // (1) Add to the worklist all instructions that have been identified as 5163 // uniform-after-vectorization. 5164 Worklist.insert(Uniforms[VF].begin(), Uniforms[VF].end()); 5165 5166 // (2) Add to the worklist all bitcast and getelementptr instructions used by 5167 // memory accesses requiring a scalar use. The pointer operands of loads and 5168 // stores will be scalar as long as the memory accesses is not a gather or 5169 // scatter operation. The value operand of a store will remain scalar if the 5170 // store is scalarized. 5171 for (auto *BB : TheLoop->blocks()) 5172 for (auto &I : *BB) { 5173 if (auto *Load = dyn_cast<LoadInst>(&I)) { 5174 evaluatePtrUse(Load, Load->getPointerOperand()); 5175 } else if (auto *Store = dyn_cast<StoreInst>(&I)) { 5176 evaluatePtrUse(Store, Store->getPointerOperand()); 5177 evaluatePtrUse(Store, Store->getValueOperand()); 5178 } 5179 } 5180 for (auto *I : ScalarPtrs) 5181 if (!PossibleNonScalarPtrs.count(I)) { 5182 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *I << "\n"); 5183 Worklist.insert(I); 5184 } 5185 5186 // Insert the forced scalars. 5187 // FIXME: Currently widenPHIInstruction() often creates a dead vector 5188 // induction variable when the PHI user is scalarized. 5189 auto ForcedScalar = ForcedScalars.find(VF); 5190 if (ForcedScalar != ForcedScalars.end()) 5191 for (auto *I : ForcedScalar->second) 5192 Worklist.insert(I); 5193 5194 // Expand the worklist by looking through any bitcasts and getelementptr 5195 // instructions we've already identified as scalar. This is similar to the 5196 // expansion step in collectLoopUniforms(); however, here we're only 5197 // expanding to include additional bitcasts and getelementptr instructions. 5198 unsigned Idx = 0; 5199 while (Idx != Worklist.size()) { 5200 Instruction *Dst = Worklist[Idx++]; 5201 if (!isLoopVaryingBitCastOrGEP(Dst->getOperand(0))) 5202 continue; 5203 auto *Src = cast<Instruction>(Dst->getOperand(0)); 5204 if (llvm::all_of(Src->users(), [&](User *U) -> bool { 5205 auto *J = cast<Instruction>(U); 5206 return !TheLoop->contains(J) || Worklist.count(J) || 5207 ((isa<LoadInst>(J) || isa<StoreInst>(J)) && 5208 isScalarUse(J, Src)); 5209 })) { 5210 Worklist.insert(Src); 5211 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Src << "\n"); 5212 } 5213 } 5214 5215 // An induction variable will remain scalar if all users of the induction 5216 // variable and induction variable update remain scalar. 5217 for (auto &Induction : Legal->getInductionVars()) { 5218 auto *Ind = Induction.first; 5219 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 5220 5221 // If tail-folding is applied, the primary induction variable will be used 5222 // to feed a vector compare. 5223 if (Ind == Legal->getPrimaryInduction() && foldTailByMasking()) 5224 continue; 5225 5226 // Determine if all users of the induction variable are scalar after 5227 // vectorization. 5228 auto ScalarInd = llvm::all_of(Ind->users(), [&](User *U) -> bool { 5229 auto *I = cast<Instruction>(U); 5230 return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I); 5231 }); 5232 if (!ScalarInd) 5233 continue; 5234 5235 // Determine if all users of the induction variable update instruction are 5236 // scalar after vectorization. 5237 auto ScalarIndUpdate = 5238 llvm::all_of(IndUpdate->users(), [&](User *U) -> bool { 5239 auto *I = cast<Instruction>(U); 5240 return I == Ind || !TheLoop->contains(I) || Worklist.count(I); 5241 }); 5242 if (!ScalarIndUpdate) 5243 continue; 5244 5245 // The induction variable and its update instruction will remain scalar. 5246 Worklist.insert(Ind); 5247 Worklist.insert(IndUpdate); 5248 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Ind << "\n"); 5249 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *IndUpdate 5250 << "\n"); 5251 } 5252 5253 Scalars[VF].insert(Worklist.begin(), Worklist.end()); 5254 } 5255 5256 bool LoopVectorizationCostModel::isScalarWithPredication(Instruction *I) const { 5257 if (!blockNeedsPredication(I->getParent())) 5258 return false; 5259 switch(I->getOpcode()) { 5260 default: 5261 break; 5262 case Instruction::Load: 5263 case Instruction::Store: { 5264 if (!Legal->isMaskRequired(I)) 5265 return false; 5266 auto *Ptr = getLoadStorePointerOperand(I); 5267 auto *Ty = getLoadStoreType(I); 5268 const Align Alignment = getLoadStoreAlignment(I); 5269 return isa<LoadInst>(I) ? !(isLegalMaskedLoad(Ty, Ptr, Alignment) || 5270 TTI.isLegalMaskedGather(Ty, Alignment)) 5271 : !(isLegalMaskedStore(Ty, Ptr, Alignment) || 5272 TTI.isLegalMaskedScatter(Ty, Alignment)); 5273 } 5274 case Instruction::UDiv: 5275 case Instruction::SDiv: 5276 case Instruction::SRem: 5277 case Instruction::URem: 5278 return mayDivideByZero(*I); 5279 } 5280 return false; 5281 } 5282 5283 bool LoopVectorizationCostModel::interleavedAccessCanBeWidened( 5284 Instruction *I, ElementCount VF) { 5285 assert(isAccessInterleaved(I) && "Expecting interleaved access."); 5286 assert(getWideningDecision(I, VF) == CM_Unknown && 5287 "Decision should not be set yet."); 5288 auto *Group = getInterleavedAccessGroup(I); 5289 assert(Group && "Must have a group."); 5290 5291 // If the instruction's allocated size doesn't equal it's type size, it 5292 // requires padding and will be scalarized. 5293 auto &DL = I->getModule()->getDataLayout(); 5294 auto *ScalarTy = getLoadStoreType(I); 5295 if (hasIrregularType(ScalarTy, DL)) 5296 return false; 5297 5298 // Check if masking is required. 5299 // A Group may need masking for one of two reasons: it resides in a block that 5300 // needs predication, or it was decided to use masking to deal with gaps. 5301 bool PredicatedAccessRequiresMasking = 5302 Legal->blockNeedsPredication(I->getParent()) && Legal->isMaskRequired(I); 5303 bool AccessWithGapsRequiresMasking = 5304 Group->requiresScalarEpilogue() && !isScalarEpilogueAllowed(); 5305 if (!PredicatedAccessRequiresMasking && !AccessWithGapsRequiresMasking) 5306 return true; 5307 5308 // If masked interleaving is required, we expect that the user/target had 5309 // enabled it, because otherwise it either wouldn't have been created or 5310 // it should have been invalidated by the CostModel. 5311 assert(useMaskedInterleavedAccesses(TTI) && 5312 "Masked interleave-groups for predicated accesses are not enabled."); 5313 5314 auto *Ty = getLoadStoreType(I); 5315 const Align Alignment = getLoadStoreAlignment(I); 5316 return isa<LoadInst>(I) ? TTI.isLegalMaskedLoad(Ty, Alignment) 5317 : TTI.isLegalMaskedStore(Ty, Alignment); 5318 } 5319 5320 bool LoopVectorizationCostModel::memoryInstructionCanBeWidened( 5321 Instruction *I, ElementCount VF) { 5322 // Get and ensure we have a valid memory instruction. 5323 LoadInst *LI = dyn_cast<LoadInst>(I); 5324 StoreInst *SI = dyn_cast<StoreInst>(I); 5325 assert((LI || SI) && "Invalid memory instruction"); 5326 5327 auto *Ptr = getLoadStorePointerOperand(I); 5328 5329 // In order to be widened, the pointer should be consecutive, first of all. 5330 if (!Legal->isConsecutivePtr(Ptr)) 5331 return false; 5332 5333 // If the instruction is a store located in a predicated block, it will be 5334 // scalarized. 5335 if (isScalarWithPredication(I)) 5336 return false; 5337 5338 // If the instruction's allocated size doesn't equal it's type size, it 5339 // requires padding and will be scalarized. 5340 auto &DL = I->getModule()->getDataLayout(); 5341 auto *ScalarTy = LI ? LI->getType() : SI->getValueOperand()->getType(); 5342 if (hasIrregularType(ScalarTy, DL)) 5343 return false; 5344 5345 return true; 5346 } 5347 5348 void LoopVectorizationCostModel::collectLoopUniforms(ElementCount VF) { 5349 // We should not collect Uniforms more than once per VF. Right now, 5350 // this function is called from collectUniformsAndScalars(), which 5351 // already does this check. Collecting Uniforms for VF=1 does not make any 5352 // sense. 5353 5354 assert(VF.isVector() && Uniforms.find(VF) == Uniforms.end() && 5355 "This function should not be visited twice for the same VF"); 5356 5357 // Visit the list of Uniforms. If we'll not find any uniform value, we'll 5358 // not analyze again. Uniforms.count(VF) will return 1. 5359 Uniforms[VF].clear(); 5360 5361 // We now know that the loop is vectorizable! 5362 // Collect instructions inside the loop that will remain uniform after 5363 // vectorization. 5364 5365 // Global values, params and instructions outside of current loop are out of 5366 // scope. 5367 auto isOutOfScope = [&](Value *V) -> bool { 5368 Instruction *I = dyn_cast<Instruction>(V); 5369 return (!I || !TheLoop->contains(I)); 5370 }; 5371 5372 SetVector<Instruction *> Worklist; 5373 BasicBlock *Latch = TheLoop->getLoopLatch(); 5374 5375 // Instructions that are scalar with predication must not be considered 5376 // uniform after vectorization, because that would create an erroneous 5377 // replicating region where only a single instance out of VF should be formed. 5378 // TODO: optimize such seldom cases if found important, see PR40816. 5379 auto addToWorklistIfAllowed = [&](Instruction *I) -> void { 5380 if (isOutOfScope(I)) { 5381 LLVM_DEBUG(dbgs() << "LV: Found not uniform due to scope: " 5382 << *I << "\n"); 5383 return; 5384 } 5385 if (isScalarWithPredication(I)) { 5386 LLVM_DEBUG(dbgs() << "LV: Found not uniform being ScalarWithPredication: " 5387 << *I << "\n"); 5388 return; 5389 } 5390 LLVM_DEBUG(dbgs() << "LV: Found uniform instruction: " << *I << "\n"); 5391 Worklist.insert(I); 5392 }; 5393 5394 // Start with the conditional branch. If the branch condition is an 5395 // instruction contained in the loop that is only used by the branch, it is 5396 // uniform. 5397 auto *Cmp = dyn_cast<Instruction>(Latch->getTerminator()->getOperand(0)); 5398 if (Cmp && TheLoop->contains(Cmp) && Cmp->hasOneUse()) 5399 addToWorklistIfAllowed(Cmp); 5400 5401 auto isUniformDecision = [&](Instruction *I, ElementCount VF) { 5402 InstWidening WideningDecision = getWideningDecision(I, VF); 5403 assert(WideningDecision != CM_Unknown && 5404 "Widening decision should be ready at this moment"); 5405 5406 // A uniform memory op is itself uniform. We exclude uniform stores 5407 // here as they demand the last lane, not the first one. 5408 if (isa<LoadInst>(I) && Legal->isUniformMemOp(*I)) { 5409 assert(WideningDecision == CM_Scalarize); 5410 return true; 5411 } 5412 5413 return (WideningDecision == CM_Widen || 5414 WideningDecision == CM_Widen_Reverse || 5415 WideningDecision == CM_Interleave); 5416 }; 5417 5418 5419 // Returns true if Ptr is the pointer operand of a memory access instruction 5420 // I, and I is known to not require scalarization. 5421 auto isVectorizedMemAccessUse = [&](Instruction *I, Value *Ptr) -> bool { 5422 return getLoadStorePointerOperand(I) == Ptr && isUniformDecision(I, VF); 5423 }; 5424 5425 // Holds a list of values which are known to have at least one uniform use. 5426 // Note that there may be other uses which aren't uniform. A "uniform use" 5427 // here is something which only demands lane 0 of the unrolled iterations; 5428 // it does not imply that all lanes produce the same value (e.g. this is not 5429 // the usual meaning of uniform) 5430 SetVector<Value *> HasUniformUse; 5431 5432 // Scan the loop for instructions which are either a) known to have only 5433 // lane 0 demanded or b) are uses which demand only lane 0 of their operand. 5434 for (auto *BB : TheLoop->blocks()) 5435 for (auto &I : *BB) { 5436 // If there's no pointer operand, there's nothing to do. 5437 auto *Ptr = getLoadStorePointerOperand(&I); 5438 if (!Ptr) 5439 continue; 5440 5441 // A uniform memory op is itself uniform. We exclude uniform stores 5442 // here as they demand the last lane, not the first one. 5443 if (isa<LoadInst>(I) && Legal->isUniformMemOp(I)) 5444 addToWorklistIfAllowed(&I); 5445 5446 if (isUniformDecision(&I, VF)) { 5447 assert(isVectorizedMemAccessUse(&I, Ptr) && "consistency check"); 5448 HasUniformUse.insert(Ptr); 5449 } 5450 } 5451 5452 // Add to the worklist any operands which have *only* uniform (e.g. lane 0 5453 // demanding) users. Since loops are assumed to be in LCSSA form, this 5454 // disallows uses outside the loop as well. 5455 for (auto *V : HasUniformUse) { 5456 if (isOutOfScope(V)) 5457 continue; 5458 auto *I = cast<Instruction>(V); 5459 auto UsersAreMemAccesses = 5460 llvm::all_of(I->users(), [&](User *U) -> bool { 5461 return isVectorizedMemAccessUse(cast<Instruction>(U), V); 5462 }); 5463 if (UsersAreMemAccesses) 5464 addToWorklistIfAllowed(I); 5465 } 5466 5467 // Expand Worklist in topological order: whenever a new instruction 5468 // is added , its users should be already inside Worklist. It ensures 5469 // a uniform instruction will only be used by uniform instructions. 5470 unsigned idx = 0; 5471 while (idx != Worklist.size()) { 5472 Instruction *I = Worklist[idx++]; 5473 5474 for (auto OV : I->operand_values()) { 5475 // isOutOfScope operands cannot be uniform instructions. 5476 if (isOutOfScope(OV)) 5477 continue; 5478 // First order recurrence Phi's should typically be considered 5479 // non-uniform. 5480 auto *OP = dyn_cast<PHINode>(OV); 5481 if (OP && Legal->isFirstOrderRecurrence(OP)) 5482 continue; 5483 // If all the users of the operand are uniform, then add the 5484 // operand into the uniform worklist. 5485 auto *OI = cast<Instruction>(OV); 5486 if (llvm::all_of(OI->users(), [&](User *U) -> bool { 5487 auto *J = cast<Instruction>(U); 5488 return Worklist.count(J) || isVectorizedMemAccessUse(J, OI); 5489 })) 5490 addToWorklistIfAllowed(OI); 5491 } 5492 } 5493 5494 // For an instruction to be added into Worklist above, all its users inside 5495 // the loop should also be in Worklist. However, this condition cannot be 5496 // true for phi nodes that form a cyclic dependence. We must process phi 5497 // nodes separately. An induction variable will remain uniform if all users 5498 // of the induction variable and induction variable update remain uniform. 5499 // The code below handles both pointer and non-pointer induction variables. 5500 for (auto &Induction : Legal->getInductionVars()) { 5501 auto *Ind = Induction.first; 5502 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 5503 5504 // Determine if all users of the induction variable are uniform after 5505 // vectorization. 5506 auto UniformInd = llvm::all_of(Ind->users(), [&](User *U) -> bool { 5507 auto *I = cast<Instruction>(U); 5508 return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) || 5509 isVectorizedMemAccessUse(I, Ind); 5510 }); 5511 if (!UniformInd) 5512 continue; 5513 5514 // Determine if all users of the induction variable update instruction are 5515 // uniform after vectorization. 5516 auto UniformIndUpdate = 5517 llvm::all_of(IndUpdate->users(), [&](User *U) -> bool { 5518 auto *I = cast<Instruction>(U); 5519 return I == Ind || !TheLoop->contains(I) || Worklist.count(I) || 5520 isVectorizedMemAccessUse(I, IndUpdate); 5521 }); 5522 if (!UniformIndUpdate) 5523 continue; 5524 5525 // The induction variable and its update instruction will remain uniform. 5526 addToWorklistIfAllowed(Ind); 5527 addToWorklistIfAllowed(IndUpdate); 5528 } 5529 5530 Uniforms[VF].insert(Worklist.begin(), Worklist.end()); 5531 } 5532 5533 bool LoopVectorizationCostModel::runtimeChecksRequired() { 5534 LLVM_DEBUG(dbgs() << "LV: Performing code size checks.\n"); 5535 5536 if (Legal->getRuntimePointerChecking()->Need) { 5537 reportVectorizationFailure("Runtime ptr check is required with -Os/-Oz", 5538 "runtime pointer checks needed. Enable vectorization of this " 5539 "loop with '#pragma clang loop vectorize(enable)' when " 5540 "compiling with -Os/-Oz", 5541 "CantVersionLoopWithOptForSize", ORE, TheLoop); 5542 return true; 5543 } 5544 5545 if (!PSE.getUnionPredicate().getPredicates().empty()) { 5546 reportVectorizationFailure("Runtime SCEV check is required with -Os/-Oz", 5547 "runtime SCEV checks needed. Enable vectorization of this " 5548 "loop with '#pragma clang loop vectorize(enable)' when " 5549 "compiling with -Os/-Oz", 5550 "CantVersionLoopWithOptForSize", ORE, TheLoop); 5551 return true; 5552 } 5553 5554 // FIXME: Avoid specializing for stride==1 instead of bailing out. 5555 if (!Legal->getLAI()->getSymbolicStrides().empty()) { 5556 reportVectorizationFailure("Runtime stride check for small trip count", 5557 "runtime stride == 1 checks needed. Enable vectorization of " 5558 "this loop without such check by compiling with -Os/-Oz", 5559 "CantVersionLoopWithOptForSize", ORE, TheLoop); 5560 return true; 5561 } 5562 5563 return false; 5564 } 5565 5566 ElementCount 5567 LoopVectorizationCostModel::getMaxLegalScalableVF(unsigned MaxSafeElements) { 5568 if (!TTI.supportsScalableVectors() && !ForceTargetSupportsScalableVectors) { 5569 reportVectorizationInfo( 5570 "Disabling scalable vectorization, because target does not " 5571 "support scalable vectors.", 5572 "ScalableVectorsUnsupported", ORE, TheLoop); 5573 return ElementCount::getScalable(0); 5574 } 5575 5576 if (Hints->isScalableVectorizationDisabled()) { 5577 reportVectorizationInfo("Scalable vectorization is explicitly disabled", 5578 "ScalableVectorizationDisabled", ORE, TheLoop); 5579 return ElementCount::getScalable(0); 5580 } 5581 5582 auto MaxScalableVF = ElementCount::getScalable( 5583 std::numeric_limits<ElementCount::ScalarTy>::max()); 5584 5585 // Test that the loop-vectorizer can legalize all operations for this MaxVF. 5586 // FIXME: While for scalable vectors this is currently sufficient, this should 5587 // be replaced by a more detailed mechanism that filters out specific VFs, 5588 // instead of invalidating vectorization for a whole set of VFs based on the 5589 // MaxVF. 5590 5591 // Disable scalable vectorization if the loop contains unsupported reductions. 5592 if (!canVectorizeReductions(MaxScalableVF)) { 5593 reportVectorizationInfo( 5594 "Scalable vectorization not supported for the reduction " 5595 "operations found in this loop.", 5596 "ScalableVFUnfeasible", ORE, TheLoop); 5597 return ElementCount::getScalable(0); 5598 } 5599 5600 // Disable scalable vectorization if the loop contains any instructions 5601 // with element types not supported for scalable vectors. 5602 if (any_of(ElementTypesInLoop, [&](Type *Ty) { 5603 return !Ty->isVoidTy() && 5604 !this->TTI.isElementTypeLegalForScalableVector(Ty); 5605 })) { 5606 reportVectorizationInfo("Scalable vectorization is not supported " 5607 "for all element types found in this loop.", 5608 "ScalableVFUnfeasible", ORE, TheLoop); 5609 return ElementCount::getScalable(0); 5610 } 5611 5612 if (Legal->isSafeForAnyVectorWidth()) 5613 return MaxScalableVF; 5614 5615 // Limit MaxScalableVF by the maximum safe dependence distance. 5616 Optional<unsigned> MaxVScale = TTI.getMaxVScale(); 5617 MaxScalableVF = ElementCount::getScalable( 5618 MaxVScale ? (MaxSafeElements / MaxVScale.getValue()) : 0); 5619 if (!MaxScalableVF) 5620 reportVectorizationInfo( 5621 "Max legal vector width too small, scalable vectorization " 5622 "unfeasible.", 5623 "ScalableVFUnfeasible", ORE, TheLoop); 5624 5625 return MaxScalableVF; 5626 } 5627 5628 FixedScalableVFPair 5629 LoopVectorizationCostModel::computeFeasibleMaxVF(unsigned ConstTripCount, 5630 ElementCount UserVF) { 5631 MinBWs = computeMinimumValueSizes(TheLoop->getBlocks(), *DB, &TTI); 5632 unsigned SmallestType, WidestType; 5633 std::tie(SmallestType, WidestType) = getSmallestAndWidestTypes(); 5634 5635 // Get the maximum safe dependence distance in bits computed by LAA. 5636 // It is computed by MaxVF * sizeOf(type) * 8, where type is taken from 5637 // the memory accesses that is most restrictive (involved in the smallest 5638 // dependence distance). 5639 unsigned MaxSafeElements = 5640 PowerOf2Floor(Legal->getMaxSafeVectorWidthInBits() / WidestType); 5641 5642 auto MaxSafeFixedVF = ElementCount::getFixed(MaxSafeElements); 5643 auto MaxSafeScalableVF = getMaxLegalScalableVF(MaxSafeElements); 5644 5645 LLVM_DEBUG(dbgs() << "LV: The max safe fixed VF is: " << MaxSafeFixedVF 5646 << ".\n"); 5647 LLVM_DEBUG(dbgs() << "LV: The max safe scalable VF is: " << MaxSafeScalableVF 5648 << ".\n"); 5649 5650 // First analyze the UserVF, fall back if the UserVF should be ignored. 5651 if (UserVF) { 5652 auto MaxSafeUserVF = 5653 UserVF.isScalable() ? MaxSafeScalableVF : MaxSafeFixedVF; 5654 5655 if (ElementCount::isKnownLE(UserVF, MaxSafeUserVF)) { 5656 // If `VF=vscale x N` is safe, then so is `VF=N` 5657 if (UserVF.isScalable()) 5658 return FixedScalableVFPair( 5659 ElementCount::getFixed(UserVF.getKnownMinValue()), UserVF); 5660 else 5661 return UserVF; 5662 } 5663 5664 assert(ElementCount::isKnownGT(UserVF, MaxSafeUserVF)); 5665 5666 // Only clamp if the UserVF is not scalable. If the UserVF is scalable, it 5667 // is better to ignore the hint and let the compiler choose a suitable VF. 5668 if (!UserVF.isScalable()) { 5669 LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF 5670 << " is unsafe, clamping to max safe VF=" 5671 << MaxSafeFixedVF << ".\n"); 5672 ORE->emit([&]() { 5673 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor", 5674 TheLoop->getStartLoc(), 5675 TheLoop->getHeader()) 5676 << "User-specified vectorization factor " 5677 << ore::NV("UserVectorizationFactor", UserVF) 5678 << " is unsafe, clamping to maximum safe vectorization factor " 5679 << ore::NV("VectorizationFactor", MaxSafeFixedVF); 5680 }); 5681 return MaxSafeFixedVF; 5682 } 5683 5684 LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF 5685 << " is unsafe. Ignoring scalable UserVF.\n"); 5686 ORE->emit([&]() { 5687 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor", 5688 TheLoop->getStartLoc(), 5689 TheLoop->getHeader()) 5690 << "User-specified vectorization factor " 5691 << ore::NV("UserVectorizationFactor", UserVF) 5692 << " is unsafe. Ignoring the hint to let the compiler pick a " 5693 "suitable VF."; 5694 }); 5695 } 5696 5697 LLVM_DEBUG(dbgs() << "LV: The Smallest and Widest types: " << SmallestType 5698 << " / " << WidestType << " bits.\n"); 5699 5700 FixedScalableVFPair Result(ElementCount::getFixed(1), 5701 ElementCount::getScalable(0)); 5702 if (auto MaxVF = getMaximizedVFForTarget(ConstTripCount, SmallestType, 5703 WidestType, MaxSafeFixedVF)) 5704 Result.FixedVF = MaxVF; 5705 5706 if (auto MaxVF = getMaximizedVFForTarget(ConstTripCount, SmallestType, 5707 WidestType, MaxSafeScalableVF)) 5708 if (MaxVF.isScalable()) { 5709 Result.ScalableVF = MaxVF; 5710 LLVM_DEBUG(dbgs() << "LV: Found feasible scalable VF = " << MaxVF 5711 << "\n"); 5712 } 5713 5714 return Result; 5715 } 5716 5717 FixedScalableVFPair 5718 LoopVectorizationCostModel::computeMaxVF(ElementCount UserVF, unsigned UserIC) { 5719 if (Legal->getRuntimePointerChecking()->Need && TTI.hasBranchDivergence()) { 5720 // TODO: It may by useful to do since it's still likely to be dynamically 5721 // uniform if the target can skip. 5722 reportVectorizationFailure( 5723 "Not inserting runtime ptr check for divergent target", 5724 "runtime pointer checks needed. Not enabled for divergent target", 5725 "CantVersionLoopWithDivergentTarget", ORE, TheLoop); 5726 return FixedScalableVFPair::getNone(); 5727 } 5728 5729 unsigned TC = PSE.getSE()->getSmallConstantTripCount(TheLoop); 5730 LLVM_DEBUG(dbgs() << "LV: Found trip count: " << TC << '\n'); 5731 if (TC == 1) { 5732 reportVectorizationFailure("Single iteration (non) loop", 5733 "loop trip count is one, irrelevant for vectorization", 5734 "SingleIterationLoop", ORE, TheLoop); 5735 return FixedScalableVFPair::getNone(); 5736 } 5737 5738 switch (ScalarEpilogueStatus) { 5739 case CM_ScalarEpilogueAllowed: 5740 return computeFeasibleMaxVF(TC, UserVF); 5741 case CM_ScalarEpilogueNotAllowedUsePredicate: 5742 LLVM_FALLTHROUGH; 5743 case CM_ScalarEpilogueNotNeededUsePredicate: 5744 LLVM_DEBUG( 5745 dbgs() << "LV: vector predicate hint/switch found.\n" 5746 << "LV: Not allowing scalar epilogue, creating predicated " 5747 << "vector loop.\n"); 5748 break; 5749 case CM_ScalarEpilogueNotAllowedLowTripLoop: 5750 // fallthrough as a special case of OptForSize 5751 case CM_ScalarEpilogueNotAllowedOptSize: 5752 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedOptSize) 5753 LLVM_DEBUG( 5754 dbgs() << "LV: Not allowing scalar epilogue due to -Os/-Oz.\n"); 5755 else 5756 LLVM_DEBUG(dbgs() << "LV: Not allowing scalar epilogue due to low trip " 5757 << "count.\n"); 5758 5759 // Bail if runtime checks are required, which are not good when optimising 5760 // for size. 5761 if (runtimeChecksRequired()) 5762 return FixedScalableVFPair::getNone(); 5763 5764 break; 5765 } 5766 5767 // The only loops we can vectorize without a scalar epilogue, are loops with 5768 // a bottom-test and a single exiting block. We'd have to handle the fact 5769 // that not every instruction executes on the last iteration. This will 5770 // require a lane mask which varies through the vector loop body. (TODO) 5771 if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch()) { 5772 // If there was a tail-folding hint/switch, but we can't fold the tail by 5773 // masking, fallback to a vectorization with a scalar epilogue. 5774 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotNeededUsePredicate) { 5775 LLVM_DEBUG(dbgs() << "LV: Cannot fold tail by masking: vectorize with a " 5776 "scalar epilogue instead.\n"); 5777 ScalarEpilogueStatus = CM_ScalarEpilogueAllowed; 5778 return computeFeasibleMaxVF(TC, UserVF); 5779 } 5780 return FixedScalableVFPair::getNone(); 5781 } 5782 5783 // Now try the tail folding 5784 5785 // Invalidate interleave groups that require an epilogue if we can't mask 5786 // the interleave-group. 5787 if (!useMaskedInterleavedAccesses(TTI)) { 5788 assert(WideningDecisions.empty() && Uniforms.empty() && Scalars.empty() && 5789 "No decisions should have been taken at this point"); 5790 // Note: There is no need to invalidate any cost modeling decisions here, as 5791 // non where taken so far. 5792 InterleaveInfo.invalidateGroupsRequiringScalarEpilogue(); 5793 } 5794 5795 FixedScalableVFPair MaxFactors = computeFeasibleMaxVF(TC, UserVF); 5796 // Avoid tail folding if the trip count is known to be a multiple of any VF 5797 // we chose. 5798 // FIXME: The condition below pessimises the case for fixed-width vectors, 5799 // when scalable VFs are also candidates for vectorization. 5800 if (MaxFactors.FixedVF.isVector() && !MaxFactors.ScalableVF) { 5801 ElementCount MaxFixedVF = MaxFactors.FixedVF; 5802 assert((UserVF.isNonZero() || isPowerOf2_32(MaxFixedVF.getFixedValue())) && 5803 "MaxFixedVF must be a power of 2"); 5804 unsigned MaxVFtimesIC = UserIC ? MaxFixedVF.getFixedValue() * UserIC 5805 : MaxFixedVF.getFixedValue(); 5806 ScalarEvolution *SE = PSE.getSE(); 5807 const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount(); 5808 const SCEV *ExitCount = SE->getAddExpr( 5809 BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType())); 5810 const SCEV *Rem = SE->getURemExpr( 5811 SE->applyLoopGuards(ExitCount, TheLoop), 5812 SE->getConstant(BackedgeTakenCount->getType(), MaxVFtimesIC)); 5813 if (Rem->isZero()) { 5814 // Accept MaxFixedVF if we do not have a tail. 5815 LLVM_DEBUG(dbgs() << "LV: No tail will remain for any chosen VF.\n"); 5816 return MaxFactors; 5817 } 5818 } 5819 5820 // For scalable vectors, don't use tail folding as this is currently not yet 5821 // supported. The code is likely to have ended up here if the tripcount is 5822 // low, in which case it makes sense not to use scalable vectors. 5823 if (MaxFactors.ScalableVF.isVector()) 5824 MaxFactors.ScalableVF = ElementCount::getScalable(0); 5825 5826 // If we don't know the precise trip count, or if the trip count that we 5827 // found modulo the vectorization factor is not zero, try to fold the tail 5828 // by masking. 5829 // FIXME: look for a smaller MaxVF that does divide TC rather than masking. 5830 if (Legal->prepareToFoldTailByMasking()) { 5831 FoldTailByMasking = true; 5832 return MaxFactors; 5833 } 5834 5835 // If there was a tail-folding hint/switch, but we can't fold the tail by 5836 // masking, fallback to a vectorization with a scalar epilogue. 5837 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotNeededUsePredicate) { 5838 LLVM_DEBUG(dbgs() << "LV: Cannot fold tail by masking: vectorize with a " 5839 "scalar epilogue instead.\n"); 5840 ScalarEpilogueStatus = CM_ScalarEpilogueAllowed; 5841 return MaxFactors; 5842 } 5843 5844 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedUsePredicate) { 5845 LLVM_DEBUG(dbgs() << "LV: Can't fold tail by masking: don't vectorize\n"); 5846 return FixedScalableVFPair::getNone(); 5847 } 5848 5849 if (TC == 0) { 5850 reportVectorizationFailure( 5851 "Unable to calculate the loop count due to complex control flow", 5852 "unable to calculate the loop count due to complex control flow", 5853 "UnknownLoopCountComplexCFG", ORE, TheLoop); 5854 return FixedScalableVFPair::getNone(); 5855 } 5856 5857 reportVectorizationFailure( 5858 "Cannot optimize for size and vectorize at the same time.", 5859 "cannot optimize for size and vectorize at the same time. " 5860 "Enable vectorization of this loop with '#pragma clang loop " 5861 "vectorize(enable)' when compiling with -Os/-Oz", 5862 "NoTailLoopWithOptForSize", ORE, TheLoop); 5863 return FixedScalableVFPair::getNone(); 5864 } 5865 5866 ElementCount LoopVectorizationCostModel::getMaximizedVFForTarget( 5867 unsigned ConstTripCount, unsigned SmallestType, unsigned WidestType, 5868 const ElementCount &MaxSafeVF) { 5869 bool ComputeScalableMaxVF = MaxSafeVF.isScalable(); 5870 TypeSize WidestRegister = TTI.getRegisterBitWidth( 5871 ComputeScalableMaxVF ? TargetTransformInfo::RGK_ScalableVector 5872 : TargetTransformInfo::RGK_FixedWidthVector); 5873 5874 // Convenience function to return the minimum of two ElementCounts. 5875 auto MinVF = [](const ElementCount &LHS, const ElementCount &RHS) { 5876 assert((LHS.isScalable() == RHS.isScalable()) && 5877 "Scalable flags must match"); 5878 return ElementCount::isKnownLT(LHS, RHS) ? LHS : RHS; 5879 }; 5880 5881 // Ensure MaxVF is a power of 2; the dependence distance bound may not be. 5882 // Note that both WidestRegister and WidestType may not be a powers of 2. 5883 auto MaxVectorElementCount = ElementCount::get( 5884 PowerOf2Floor(WidestRegister.getKnownMinSize() / WidestType), 5885 ComputeScalableMaxVF); 5886 MaxVectorElementCount = MinVF(MaxVectorElementCount, MaxSafeVF); 5887 LLVM_DEBUG(dbgs() << "LV: The Widest register safe to use is: " 5888 << (MaxVectorElementCount * WidestType) << " bits.\n"); 5889 5890 if (!MaxVectorElementCount) { 5891 LLVM_DEBUG(dbgs() << "LV: The target has no " 5892 << (ComputeScalableMaxVF ? "scalable" : "fixed") 5893 << " vector registers.\n"); 5894 return ElementCount::getFixed(1); 5895 } 5896 5897 const auto TripCountEC = ElementCount::getFixed(ConstTripCount); 5898 if (ConstTripCount && 5899 ElementCount::isKnownLE(TripCountEC, MaxVectorElementCount) && 5900 isPowerOf2_32(ConstTripCount)) { 5901 // We need to clamp the VF to be the ConstTripCount. There is no point in 5902 // choosing a higher viable VF as done in the loop below. If 5903 // MaxVectorElementCount is scalable, we only fall back on a fixed VF when 5904 // the TC is less than or equal to the known number of lanes. 5905 LLVM_DEBUG(dbgs() << "LV: Clamping the MaxVF to the constant trip count: " 5906 << ConstTripCount << "\n"); 5907 return TripCountEC; 5908 } 5909 5910 ElementCount MaxVF = MaxVectorElementCount; 5911 if (TTI.shouldMaximizeVectorBandwidth() || 5912 (MaximizeBandwidth && isScalarEpilogueAllowed())) { 5913 auto MaxVectorElementCountMaxBW = ElementCount::get( 5914 PowerOf2Floor(WidestRegister.getKnownMinSize() / SmallestType), 5915 ComputeScalableMaxVF); 5916 MaxVectorElementCountMaxBW = MinVF(MaxVectorElementCountMaxBW, MaxSafeVF); 5917 5918 // Collect all viable vectorization factors larger than the default MaxVF 5919 // (i.e. MaxVectorElementCount). 5920 SmallVector<ElementCount, 8> VFs; 5921 for (ElementCount VS = MaxVectorElementCount * 2; 5922 ElementCount::isKnownLE(VS, MaxVectorElementCountMaxBW); VS *= 2) 5923 VFs.push_back(VS); 5924 5925 // For each VF calculate its register usage. 5926 auto RUs = calculateRegisterUsage(VFs); 5927 5928 // Select the largest VF which doesn't require more registers than existing 5929 // ones. 5930 for (int i = RUs.size() - 1; i >= 0; --i) { 5931 bool Selected = true; 5932 for (auto &pair : RUs[i].MaxLocalUsers) { 5933 unsigned TargetNumRegisters = TTI.getNumberOfRegisters(pair.first); 5934 if (pair.second > TargetNumRegisters) 5935 Selected = false; 5936 } 5937 if (Selected) { 5938 MaxVF = VFs[i]; 5939 break; 5940 } 5941 } 5942 if (ElementCount MinVF = 5943 TTI.getMinimumVF(SmallestType, ComputeScalableMaxVF)) { 5944 if (ElementCount::isKnownLT(MaxVF, MinVF)) { 5945 LLVM_DEBUG(dbgs() << "LV: Overriding calculated MaxVF(" << MaxVF 5946 << ") with target's minimum: " << MinVF << '\n'); 5947 MaxVF = MinVF; 5948 } 5949 } 5950 } 5951 return MaxVF; 5952 } 5953 5954 bool LoopVectorizationCostModel::isMoreProfitable( 5955 const VectorizationFactor &A, const VectorizationFactor &B) const { 5956 InstructionCost CostA = A.Cost; 5957 InstructionCost CostB = B.Cost; 5958 5959 unsigned MaxTripCount = PSE.getSE()->getSmallConstantMaxTripCount(TheLoop); 5960 5961 if (!A.Width.isScalable() && !B.Width.isScalable() && FoldTailByMasking && 5962 MaxTripCount) { 5963 // If we are folding the tail and the trip count is a known (possibly small) 5964 // constant, the trip count will be rounded up to an integer number of 5965 // iterations. The total cost will be PerIterationCost*ceil(TripCount/VF), 5966 // which we compare directly. When not folding the tail, the total cost will 5967 // be PerIterationCost*floor(TC/VF) + Scalar remainder cost, and so is 5968 // approximated with the per-lane cost below instead of using the tripcount 5969 // as here. 5970 auto RTCostA = CostA * divideCeil(MaxTripCount, A.Width.getFixedValue()); 5971 auto RTCostB = CostB * divideCeil(MaxTripCount, B.Width.getFixedValue()); 5972 return RTCostA < RTCostB; 5973 } 5974 5975 // When set to preferred, for now assume vscale may be larger than 1, so 5976 // that scalable vectorization is slightly favorable over fixed-width 5977 // vectorization. 5978 if (Hints->isScalableVectorizationPreferred()) 5979 if (A.Width.isScalable() && !B.Width.isScalable()) 5980 return (CostA * B.Width.getKnownMinValue()) <= 5981 (CostB * A.Width.getKnownMinValue()); 5982 5983 // To avoid the need for FP division: 5984 // (CostA / A.Width) < (CostB / B.Width) 5985 // <=> (CostA * B.Width) < (CostB * A.Width) 5986 return (CostA * B.Width.getKnownMinValue()) < 5987 (CostB * A.Width.getKnownMinValue()); 5988 } 5989 5990 VectorizationFactor LoopVectorizationCostModel::selectVectorizationFactor( 5991 const ElementCountSet &VFCandidates) { 5992 InstructionCost ExpectedCost = expectedCost(ElementCount::getFixed(1)).first; 5993 LLVM_DEBUG(dbgs() << "LV: Scalar loop costs: " << ExpectedCost << ".\n"); 5994 assert(ExpectedCost.isValid() && "Unexpected invalid cost for scalar loop"); 5995 assert(VFCandidates.count(ElementCount::getFixed(1)) && 5996 "Expected Scalar VF to be a candidate"); 5997 5998 const VectorizationFactor ScalarCost(ElementCount::getFixed(1), ExpectedCost); 5999 VectorizationFactor ChosenFactor = ScalarCost; 6000 6001 bool ForceVectorization = Hints->getForce() == LoopVectorizeHints::FK_Enabled; 6002 if (ForceVectorization && VFCandidates.size() > 1) { 6003 // Ignore scalar width, because the user explicitly wants vectorization. 6004 // Initialize cost to max so that VF = 2 is, at least, chosen during cost 6005 // evaluation. 6006 ChosenFactor.Cost = InstructionCost::getMax(); 6007 } 6008 6009 SmallVector<InstructionVFPair> InvalidCosts; 6010 for (const auto &i : VFCandidates) { 6011 // The cost for scalar VF=1 is already calculated, so ignore it. 6012 if (i.isScalar()) 6013 continue; 6014 6015 VectorizationCostTy C = expectedCost(i, &InvalidCosts); 6016 VectorizationFactor Candidate(i, C.first); 6017 LLVM_DEBUG( 6018 dbgs() << "LV: Vector loop of width " << i << " costs: " 6019 << (Candidate.Cost / Candidate.Width.getKnownMinValue()) 6020 << (i.isScalable() ? " (assuming a minimum vscale of 1)" : "") 6021 << ".\n"); 6022 6023 if (!C.second && !ForceVectorization) { 6024 LLVM_DEBUG( 6025 dbgs() << "LV: Not considering vector loop of width " << i 6026 << " because it will not generate any vector instructions.\n"); 6027 continue; 6028 } 6029 6030 // If profitable add it to ProfitableVF list. 6031 if (isMoreProfitable(Candidate, ScalarCost)) 6032 ProfitableVFs.push_back(Candidate); 6033 6034 if (isMoreProfitable(Candidate, ChosenFactor)) 6035 ChosenFactor = Candidate; 6036 } 6037 6038 // Emit a report of VFs with invalid costs in the loop. 6039 if (!InvalidCosts.empty()) { 6040 // Group the remarks per instruction, keeping the instruction order from 6041 // InvalidCosts. 6042 std::map<Instruction *, unsigned> Numbering; 6043 unsigned I = 0; 6044 for (auto &Pair : InvalidCosts) 6045 if (!Numbering.count(Pair.first)) 6046 Numbering[Pair.first] = I++; 6047 6048 // Sort the list, first on instruction(number) then on VF. 6049 llvm::sort(InvalidCosts, 6050 [&Numbering](InstructionVFPair &A, InstructionVFPair &B) { 6051 if (Numbering[A.first] != Numbering[B.first]) 6052 return Numbering[A.first] < Numbering[B.first]; 6053 ElementCountComparator ECC; 6054 return ECC(A.second, B.second); 6055 }); 6056 6057 // For a list of ordered instruction-vf pairs: 6058 // [(load, vf1), (load, vf2), (store, vf1)] 6059 // Group the instructions together to emit separate remarks for: 6060 // load (vf1, vf2) 6061 // store (vf1) 6062 auto Tail = ArrayRef<InstructionVFPair>(InvalidCosts); 6063 auto Subset = ArrayRef<InstructionVFPair>(); 6064 do { 6065 if (Subset.empty()) 6066 Subset = Tail.take_front(1); 6067 6068 Instruction *I = Subset.front().first; 6069 6070 // If the next instruction is different, or if there are no other pairs, 6071 // emit a remark for the collated subset. e.g. 6072 // [(load, vf1), (load, vf2))] 6073 // to emit: 6074 // remark: invalid costs for 'load' at VF=(vf, vf2) 6075 if (Subset == Tail || Tail[Subset.size()].first != I) { 6076 std::string OutString; 6077 raw_string_ostream OS(OutString); 6078 assert(!Subset.empty() && "Unexpected empty range"); 6079 OS << "Instruction with invalid costs prevented vectorization at VF=("; 6080 for (auto &Pair : Subset) 6081 OS << (Pair.second == Subset.front().second ? "" : ", ") 6082 << Pair.second; 6083 OS << "):"; 6084 if (auto *CI = dyn_cast<CallInst>(I)) 6085 OS << " call to " << CI->getCalledFunction()->getName(); 6086 else 6087 OS << " " << I->getOpcodeName(); 6088 OS.flush(); 6089 reportVectorizationInfo(OutString, "InvalidCost", ORE, TheLoop, I); 6090 Tail = Tail.drop_front(Subset.size()); 6091 Subset = {}; 6092 } else 6093 // Grow the subset by one element 6094 Subset = Tail.take_front(Subset.size() + 1); 6095 } while (!Tail.empty()); 6096 } 6097 6098 if (!EnableCondStoresVectorization && NumPredStores) { 6099 reportVectorizationFailure("There are conditional stores.", 6100 "store that is conditionally executed prevents vectorization", 6101 "ConditionalStore", ORE, TheLoop); 6102 ChosenFactor = ScalarCost; 6103 } 6104 6105 LLVM_DEBUG(if (ForceVectorization && !ChosenFactor.Width.isScalar() && 6106 ChosenFactor.Cost >= ScalarCost.Cost) dbgs() 6107 << "LV: Vectorization seems to be not beneficial, " 6108 << "but was forced by a user.\n"); 6109 LLVM_DEBUG(dbgs() << "LV: Selecting VF: " << ChosenFactor.Width << ".\n"); 6110 return ChosenFactor; 6111 } 6112 6113 bool LoopVectorizationCostModel::isCandidateForEpilogueVectorization( 6114 const Loop &L, ElementCount VF) const { 6115 // Cross iteration phis such as reductions need special handling and are 6116 // currently unsupported. 6117 if (any_of(L.getHeader()->phis(), [&](PHINode &Phi) { 6118 return Legal->isFirstOrderRecurrence(&Phi) || 6119 Legal->isReductionVariable(&Phi); 6120 })) 6121 return false; 6122 6123 // Phis with uses outside of the loop require special handling and are 6124 // currently unsupported. 6125 for (auto &Entry : Legal->getInductionVars()) { 6126 // Look for uses of the value of the induction at the last iteration. 6127 Value *PostInc = Entry.first->getIncomingValueForBlock(L.getLoopLatch()); 6128 for (User *U : PostInc->users()) 6129 if (!L.contains(cast<Instruction>(U))) 6130 return false; 6131 // Look for uses of penultimate value of the induction. 6132 for (User *U : Entry.first->users()) 6133 if (!L.contains(cast<Instruction>(U))) 6134 return false; 6135 } 6136 6137 // Induction variables that are widened require special handling that is 6138 // currently not supported. 6139 if (any_of(Legal->getInductionVars(), [&](auto &Entry) { 6140 return !(this->isScalarAfterVectorization(Entry.first, VF) || 6141 this->isProfitableToScalarize(Entry.first, VF)); 6142 })) 6143 return false; 6144 6145 // Epilogue vectorization code has not been auditted to ensure it handles 6146 // non-latch exits properly. It may be fine, but it needs auditted and 6147 // tested. 6148 if (L.getExitingBlock() != L.getLoopLatch()) 6149 return false; 6150 6151 return true; 6152 } 6153 6154 bool LoopVectorizationCostModel::isEpilogueVectorizationProfitable( 6155 const ElementCount VF) const { 6156 // FIXME: We need a much better cost-model to take different parameters such 6157 // as register pressure, code size increase and cost of extra branches into 6158 // account. For now we apply a very crude heuristic and only consider loops 6159 // with vectorization factors larger than a certain value. 6160 // We also consider epilogue vectorization unprofitable for targets that don't 6161 // consider interleaving beneficial (eg. MVE). 6162 if (TTI.getMaxInterleaveFactor(VF.getKnownMinValue()) <= 1) 6163 return false; 6164 if (VF.getFixedValue() >= EpilogueVectorizationMinVF) 6165 return true; 6166 return false; 6167 } 6168 6169 VectorizationFactor 6170 LoopVectorizationCostModel::selectEpilogueVectorizationFactor( 6171 const ElementCount MainLoopVF, const LoopVectorizationPlanner &LVP) { 6172 VectorizationFactor Result = VectorizationFactor::Disabled(); 6173 if (!EnableEpilogueVectorization) { 6174 LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization is disabled.\n";); 6175 return Result; 6176 } 6177 6178 if (!isScalarEpilogueAllowed()) { 6179 LLVM_DEBUG( 6180 dbgs() << "LEV: Unable to vectorize epilogue because no epilogue is " 6181 "allowed.\n";); 6182 return Result; 6183 } 6184 6185 // FIXME: This can be fixed for scalable vectors later, because at this stage 6186 // the LoopVectorizer will only consider vectorizing a loop with scalable 6187 // vectors when the loop has a hint to enable vectorization for a given VF. 6188 if (MainLoopVF.isScalable()) { 6189 LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization for scalable vectors not " 6190 "yet supported.\n"); 6191 return Result; 6192 } 6193 6194 // Not really a cost consideration, but check for unsupported cases here to 6195 // simplify the logic. 6196 if (!isCandidateForEpilogueVectorization(*TheLoop, MainLoopVF)) { 6197 LLVM_DEBUG( 6198 dbgs() << "LEV: Unable to vectorize epilogue because the loop is " 6199 "not a supported candidate.\n";); 6200 return Result; 6201 } 6202 6203 if (EpilogueVectorizationForceVF > 1) { 6204 LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization factor is forced.\n";); 6205 if (LVP.hasPlanWithVFs( 6206 {MainLoopVF, ElementCount::getFixed(EpilogueVectorizationForceVF)})) 6207 return {ElementCount::getFixed(EpilogueVectorizationForceVF), 0}; 6208 else { 6209 LLVM_DEBUG( 6210 dbgs() 6211 << "LEV: Epilogue vectorization forced factor is not viable.\n";); 6212 return Result; 6213 } 6214 } 6215 6216 if (TheLoop->getHeader()->getParent()->hasOptSize() || 6217 TheLoop->getHeader()->getParent()->hasMinSize()) { 6218 LLVM_DEBUG( 6219 dbgs() 6220 << "LEV: Epilogue vectorization skipped due to opt for size.\n";); 6221 return Result; 6222 } 6223 6224 if (!isEpilogueVectorizationProfitable(MainLoopVF)) 6225 return Result; 6226 6227 for (auto &NextVF : ProfitableVFs) 6228 if (ElementCount::isKnownLT(NextVF.Width, MainLoopVF) && 6229 (Result.Width.getFixedValue() == 1 || 6230 isMoreProfitable(NextVF, Result)) && 6231 LVP.hasPlanWithVFs({MainLoopVF, NextVF.Width})) 6232 Result = NextVF; 6233 6234 if (Result != VectorizationFactor::Disabled()) 6235 LLVM_DEBUG(dbgs() << "LEV: Vectorizing epilogue loop with VF = " 6236 << Result.Width.getFixedValue() << "\n";); 6237 return Result; 6238 } 6239 6240 std::pair<unsigned, unsigned> 6241 LoopVectorizationCostModel::getSmallestAndWidestTypes() { 6242 unsigned MinWidth = -1U; 6243 unsigned MaxWidth = 8; 6244 const DataLayout &DL = TheFunction->getParent()->getDataLayout(); 6245 for (Type *T : ElementTypesInLoop) { 6246 MinWidth = std::min<unsigned>( 6247 MinWidth, DL.getTypeSizeInBits(T->getScalarType()).getFixedSize()); 6248 MaxWidth = std::max<unsigned>( 6249 MaxWidth, DL.getTypeSizeInBits(T->getScalarType()).getFixedSize()); 6250 } 6251 return {MinWidth, MaxWidth}; 6252 } 6253 6254 void LoopVectorizationCostModel::collectElementTypesForWidening() { 6255 ElementTypesInLoop.clear(); 6256 // For each block. 6257 for (BasicBlock *BB : TheLoop->blocks()) { 6258 // For each instruction in the loop. 6259 for (Instruction &I : BB->instructionsWithoutDebug()) { 6260 Type *T = I.getType(); 6261 6262 // Skip ignored values. 6263 if (ValuesToIgnore.count(&I)) 6264 continue; 6265 6266 // Only examine Loads, Stores and PHINodes. 6267 if (!isa<LoadInst>(I) && !isa<StoreInst>(I) && !isa<PHINode>(I)) 6268 continue; 6269 6270 // Examine PHI nodes that are reduction variables. Update the type to 6271 // account for the recurrence type. 6272 if (auto *PN = dyn_cast<PHINode>(&I)) { 6273 if (!Legal->isReductionVariable(PN)) 6274 continue; 6275 const RecurrenceDescriptor &RdxDesc = Legal->getReductionVars()[PN]; 6276 if (PreferInLoopReductions || useOrderedReductions(RdxDesc) || 6277 TTI.preferInLoopReduction(RdxDesc.getOpcode(), 6278 RdxDesc.getRecurrenceType(), 6279 TargetTransformInfo::ReductionFlags())) 6280 continue; 6281 T = RdxDesc.getRecurrenceType(); 6282 } 6283 6284 // Examine the stored values. 6285 if (auto *ST = dyn_cast<StoreInst>(&I)) 6286 T = ST->getValueOperand()->getType(); 6287 6288 // Ignore loaded pointer types and stored pointer types that are not 6289 // vectorizable. 6290 // 6291 // FIXME: The check here attempts to predict whether a load or store will 6292 // be vectorized. We only know this for certain after a VF has 6293 // been selected. Here, we assume that if an access can be 6294 // vectorized, it will be. We should also look at extending this 6295 // optimization to non-pointer types. 6296 // 6297 if (T->isPointerTy() && !isConsecutiveLoadOrStore(&I) && 6298 !isAccessInterleaved(&I) && !isLegalGatherOrScatter(&I)) 6299 continue; 6300 6301 ElementTypesInLoop.insert(T); 6302 } 6303 } 6304 } 6305 6306 unsigned LoopVectorizationCostModel::selectInterleaveCount(ElementCount VF, 6307 unsigned LoopCost) { 6308 // -- The interleave heuristics -- 6309 // We interleave the loop in order to expose ILP and reduce the loop overhead. 6310 // There are many micro-architectural considerations that we can't predict 6311 // at this level. For example, frontend pressure (on decode or fetch) due to 6312 // code size, or the number and capabilities of the execution ports. 6313 // 6314 // We use the following heuristics to select the interleave count: 6315 // 1. If the code has reductions, then we interleave to break the cross 6316 // iteration dependency. 6317 // 2. If the loop is really small, then we interleave to reduce the loop 6318 // overhead. 6319 // 3. We don't interleave if we think that we will spill registers to memory 6320 // due to the increased register pressure. 6321 6322 if (!isScalarEpilogueAllowed()) 6323 return 1; 6324 6325 // We used the distance for the interleave count. 6326 if (Legal->getMaxSafeDepDistBytes() != -1U) 6327 return 1; 6328 6329 auto BestKnownTC = getSmallBestKnownTC(*PSE.getSE(), TheLoop); 6330 const bool HasReductions = !Legal->getReductionVars().empty(); 6331 // Do not interleave loops with a relatively small known or estimated trip 6332 // count. But we will interleave when InterleaveSmallLoopScalarReduction is 6333 // enabled, and the code has scalar reductions(HasReductions && VF = 1), 6334 // because with the above conditions interleaving can expose ILP and break 6335 // cross iteration dependences for reductions. 6336 if (BestKnownTC && (*BestKnownTC < TinyTripCountInterleaveThreshold) && 6337 !(InterleaveSmallLoopScalarReduction && HasReductions && VF.isScalar())) 6338 return 1; 6339 6340 RegisterUsage R = calculateRegisterUsage({VF})[0]; 6341 // We divide by these constants so assume that we have at least one 6342 // instruction that uses at least one register. 6343 for (auto& pair : R.MaxLocalUsers) { 6344 pair.second = std::max(pair.second, 1U); 6345 } 6346 6347 // We calculate the interleave count using the following formula. 6348 // Subtract the number of loop invariants from the number of available 6349 // registers. These registers are used by all of the interleaved instances. 6350 // Next, divide the remaining registers by the number of registers that is 6351 // required by the loop, in order to estimate how many parallel instances 6352 // fit without causing spills. All of this is rounded down if necessary to be 6353 // a power of two. We want power of two interleave count to simplify any 6354 // addressing operations or alignment considerations. 6355 // We also want power of two interleave counts to ensure that the induction 6356 // variable of the vector loop wraps to zero, when tail is folded by masking; 6357 // this currently happens when OptForSize, in which case IC is set to 1 above. 6358 unsigned IC = UINT_MAX; 6359 6360 for (auto& pair : R.MaxLocalUsers) { 6361 unsigned TargetNumRegisters = TTI.getNumberOfRegisters(pair.first); 6362 LLVM_DEBUG(dbgs() << "LV: The target has " << TargetNumRegisters 6363 << " registers of " 6364 << TTI.getRegisterClassName(pair.first) << " register class\n"); 6365 if (VF.isScalar()) { 6366 if (ForceTargetNumScalarRegs.getNumOccurrences() > 0) 6367 TargetNumRegisters = ForceTargetNumScalarRegs; 6368 } else { 6369 if (ForceTargetNumVectorRegs.getNumOccurrences() > 0) 6370 TargetNumRegisters = ForceTargetNumVectorRegs; 6371 } 6372 unsigned MaxLocalUsers = pair.second; 6373 unsigned LoopInvariantRegs = 0; 6374 if (R.LoopInvariantRegs.find(pair.first) != R.LoopInvariantRegs.end()) 6375 LoopInvariantRegs = R.LoopInvariantRegs[pair.first]; 6376 6377 unsigned TmpIC = PowerOf2Floor((TargetNumRegisters - LoopInvariantRegs) / MaxLocalUsers); 6378 // Don't count the induction variable as interleaved. 6379 if (EnableIndVarRegisterHeur) { 6380 TmpIC = 6381 PowerOf2Floor((TargetNumRegisters - LoopInvariantRegs - 1) / 6382 std::max(1U, (MaxLocalUsers - 1))); 6383 } 6384 6385 IC = std::min(IC, TmpIC); 6386 } 6387 6388 // Clamp the interleave ranges to reasonable counts. 6389 unsigned MaxInterleaveCount = 6390 TTI.getMaxInterleaveFactor(VF.getKnownMinValue()); 6391 6392 // Check if the user has overridden the max. 6393 if (VF.isScalar()) { 6394 if (ForceTargetMaxScalarInterleaveFactor.getNumOccurrences() > 0) 6395 MaxInterleaveCount = ForceTargetMaxScalarInterleaveFactor; 6396 } else { 6397 if (ForceTargetMaxVectorInterleaveFactor.getNumOccurrences() > 0) 6398 MaxInterleaveCount = ForceTargetMaxVectorInterleaveFactor; 6399 } 6400 6401 // If trip count is known or estimated compile time constant, limit the 6402 // interleave count to be less than the trip count divided by VF, provided it 6403 // is at least 1. 6404 // 6405 // For scalable vectors we can't know if interleaving is beneficial. It may 6406 // not be beneficial for small loops if none of the lanes in the second vector 6407 // iterations is enabled. However, for larger loops, there is likely to be a 6408 // similar benefit as for fixed-width vectors. For now, we choose to leave 6409 // the InterleaveCount as if vscale is '1', although if some information about 6410 // the vector is known (e.g. min vector size), we can make a better decision. 6411 if (BestKnownTC) { 6412 MaxInterleaveCount = 6413 std::min(*BestKnownTC / VF.getKnownMinValue(), MaxInterleaveCount); 6414 // Make sure MaxInterleaveCount is greater than 0. 6415 MaxInterleaveCount = std::max(1u, MaxInterleaveCount); 6416 } 6417 6418 assert(MaxInterleaveCount > 0 && 6419 "Maximum interleave count must be greater than 0"); 6420 6421 // Clamp the calculated IC to be between the 1 and the max interleave count 6422 // that the target and trip count allows. 6423 if (IC > MaxInterleaveCount) 6424 IC = MaxInterleaveCount; 6425 else 6426 // Make sure IC is greater than 0. 6427 IC = std::max(1u, IC); 6428 6429 assert(IC > 0 && "Interleave count must be greater than 0."); 6430 6431 // If we did not calculate the cost for VF (because the user selected the VF) 6432 // then we calculate the cost of VF here. 6433 if (LoopCost == 0) { 6434 InstructionCost C = expectedCost(VF).first; 6435 assert(C.isValid() && "Expected to have chosen a VF with valid cost"); 6436 LoopCost = *C.getValue(); 6437 } 6438 6439 assert(LoopCost && "Non-zero loop cost expected"); 6440 6441 // Interleave if we vectorized this loop and there is a reduction that could 6442 // benefit from interleaving. 6443 if (VF.isVector() && HasReductions) { 6444 LLVM_DEBUG(dbgs() << "LV: Interleaving because of reductions.\n"); 6445 return IC; 6446 } 6447 6448 // Note that if we've already vectorized the loop we will have done the 6449 // runtime check and so interleaving won't require further checks. 6450 bool InterleavingRequiresRuntimePointerCheck = 6451 (VF.isScalar() && Legal->getRuntimePointerChecking()->Need); 6452 6453 // We want to interleave small loops in order to reduce the loop overhead and 6454 // potentially expose ILP opportunities. 6455 LLVM_DEBUG(dbgs() << "LV: Loop cost is " << LoopCost << '\n' 6456 << "LV: IC is " << IC << '\n' 6457 << "LV: VF is " << VF << '\n'); 6458 const bool AggressivelyInterleaveReductions = 6459 TTI.enableAggressiveInterleaving(HasReductions); 6460 if (!InterleavingRequiresRuntimePointerCheck && LoopCost < SmallLoopCost) { 6461 // We assume that the cost overhead is 1 and we use the cost model 6462 // to estimate the cost of the loop and interleave until the cost of the 6463 // loop overhead is about 5% of the cost of the loop. 6464 unsigned SmallIC = 6465 std::min(IC, (unsigned)PowerOf2Floor(SmallLoopCost / LoopCost)); 6466 6467 // Interleave until store/load ports (estimated by max interleave count) are 6468 // saturated. 6469 unsigned NumStores = Legal->getNumStores(); 6470 unsigned NumLoads = Legal->getNumLoads(); 6471 unsigned StoresIC = IC / (NumStores ? NumStores : 1); 6472 unsigned LoadsIC = IC / (NumLoads ? NumLoads : 1); 6473 6474 // If we have a scalar reduction (vector reductions are already dealt with 6475 // by this point), we can increase the critical path length if the loop 6476 // we're interleaving is inside another loop. For tree-wise reductions 6477 // set the limit to 2, and for ordered reductions it's best to disable 6478 // interleaving entirely. 6479 if (HasReductions && TheLoop->getLoopDepth() > 1) { 6480 bool HasOrderedReductions = 6481 any_of(Legal->getReductionVars(), [&](auto &Reduction) -> bool { 6482 const RecurrenceDescriptor &RdxDesc = Reduction.second; 6483 return RdxDesc.isOrdered(); 6484 }); 6485 if (HasOrderedReductions) { 6486 LLVM_DEBUG( 6487 dbgs() << "LV: Not interleaving scalar ordered reductions.\n"); 6488 return 1; 6489 } 6490 6491 unsigned F = static_cast<unsigned>(MaxNestedScalarReductionIC); 6492 SmallIC = std::min(SmallIC, F); 6493 StoresIC = std::min(StoresIC, F); 6494 LoadsIC = std::min(LoadsIC, F); 6495 } 6496 6497 if (EnableLoadStoreRuntimeInterleave && 6498 std::max(StoresIC, LoadsIC) > SmallIC) { 6499 LLVM_DEBUG( 6500 dbgs() << "LV: Interleaving to saturate store or load ports.\n"); 6501 return std::max(StoresIC, LoadsIC); 6502 } 6503 6504 // If there are scalar reductions and TTI has enabled aggressive 6505 // interleaving for reductions, we will interleave to expose ILP. 6506 if (InterleaveSmallLoopScalarReduction && VF.isScalar() && 6507 AggressivelyInterleaveReductions) { 6508 LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n"); 6509 // Interleave no less than SmallIC but not as aggressive as the normal IC 6510 // to satisfy the rare situation when resources are too limited. 6511 return std::max(IC / 2, SmallIC); 6512 } else { 6513 LLVM_DEBUG(dbgs() << "LV: Interleaving to reduce branch cost.\n"); 6514 return SmallIC; 6515 } 6516 } 6517 6518 // Interleave if this is a large loop (small loops are already dealt with by 6519 // this point) that could benefit from interleaving. 6520 if (AggressivelyInterleaveReductions) { 6521 LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n"); 6522 return IC; 6523 } 6524 6525 LLVM_DEBUG(dbgs() << "LV: Not Interleaving.\n"); 6526 return 1; 6527 } 6528 6529 SmallVector<LoopVectorizationCostModel::RegisterUsage, 8> 6530 LoopVectorizationCostModel::calculateRegisterUsage(ArrayRef<ElementCount> VFs) { 6531 // This function calculates the register usage by measuring the highest number 6532 // of values that are alive at a single location. Obviously, this is a very 6533 // rough estimation. We scan the loop in a topological order in order and 6534 // assign a number to each instruction. We use RPO to ensure that defs are 6535 // met before their users. We assume that each instruction that has in-loop 6536 // users starts an interval. We record every time that an in-loop value is 6537 // used, so we have a list of the first and last occurrences of each 6538 // instruction. Next, we transpose this data structure into a multi map that 6539 // holds the list of intervals that *end* at a specific location. This multi 6540 // map allows us to perform a linear search. We scan the instructions linearly 6541 // and record each time that a new interval starts, by placing it in a set. 6542 // If we find this value in the multi-map then we remove it from the set. 6543 // The max register usage is the maximum size of the set. 6544 // We also search for instructions that are defined outside the loop, but are 6545 // used inside the loop. We need this number separately from the max-interval 6546 // usage number because when we unroll, loop-invariant values do not take 6547 // more register. 6548 LoopBlocksDFS DFS(TheLoop); 6549 DFS.perform(LI); 6550 6551 RegisterUsage RU; 6552 6553 // Each 'key' in the map opens a new interval. The values 6554 // of the map are the index of the 'last seen' usage of the 6555 // instruction that is the key. 6556 using IntervalMap = DenseMap<Instruction *, unsigned>; 6557 6558 // Maps instruction to its index. 6559 SmallVector<Instruction *, 64> IdxToInstr; 6560 // Marks the end of each interval. 6561 IntervalMap EndPoint; 6562 // Saves the list of instruction indices that are used in the loop. 6563 SmallPtrSet<Instruction *, 8> Ends; 6564 // Saves the list of values that are used in the loop but are 6565 // defined outside the loop, such as arguments and constants. 6566 SmallPtrSet<Value *, 8> LoopInvariants; 6567 6568 for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) { 6569 for (Instruction &I : BB->instructionsWithoutDebug()) { 6570 IdxToInstr.push_back(&I); 6571 6572 // Save the end location of each USE. 6573 for (Value *U : I.operands()) { 6574 auto *Instr = dyn_cast<Instruction>(U); 6575 6576 // Ignore non-instruction values such as arguments, constants, etc. 6577 if (!Instr) 6578 continue; 6579 6580 // If this instruction is outside the loop then record it and continue. 6581 if (!TheLoop->contains(Instr)) { 6582 LoopInvariants.insert(Instr); 6583 continue; 6584 } 6585 6586 // Overwrite previous end points. 6587 EndPoint[Instr] = IdxToInstr.size(); 6588 Ends.insert(Instr); 6589 } 6590 } 6591 } 6592 6593 // Saves the list of intervals that end with the index in 'key'. 6594 using InstrList = SmallVector<Instruction *, 2>; 6595 DenseMap<unsigned, InstrList> TransposeEnds; 6596 6597 // Transpose the EndPoints to a list of values that end at each index. 6598 for (auto &Interval : EndPoint) 6599 TransposeEnds[Interval.second].push_back(Interval.first); 6600 6601 SmallPtrSet<Instruction *, 8> OpenIntervals; 6602 SmallVector<RegisterUsage, 8> RUs(VFs.size()); 6603 SmallVector<SmallMapVector<unsigned, unsigned, 4>, 8> MaxUsages(VFs.size()); 6604 6605 LLVM_DEBUG(dbgs() << "LV(REG): Calculating max register usage:\n"); 6606 6607 // A lambda that gets the register usage for the given type and VF. 6608 const auto &TTICapture = TTI; 6609 auto GetRegUsage = [&TTICapture](Type *Ty, ElementCount VF) -> unsigned { 6610 if (Ty->isTokenTy() || !VectorType::isValidElementType(Ty)) 6611 return 0; 6612 InstructionCost::CostType RegUsage = 6613 *TTICapture.getRegUsageForType(VectorType::get(Ty, VF)).getValue(); 6614 assert(RegUsage >= 0 && RegUsage <= std::numeric_limits<unsigned>::max() && 6615 "Nonsensical values for register usage."); 6616 return RegUsage; 6617 }; 6618 6619 for (unsigned int i = 0, s = IdxToInstr.size(); i < s; ++i) { 6620 Instruction *I = IdxToInstr[i]; 6621 6622 // Remove all of the instructions that end at this location. 6623 InstrList &List = TransposeEnds[i]; 6624 for (Instruction *ToRemove : List) 6625 OpenIntervals.erase(ToRemove); 6626 6627 // Ignore instructions that are never used within the loop. 6628 if (!Ends.count(I)) 6629 continue; 6630 6631 // Skip ignored values. 6632 if (ValuesToIgnore.count(I)) 6633 continue; 6634 6635 // For each VF find the maximum usage of registers. 6636 for (unsigned j = 0, e = VFs.size(); j < e; ++j) { 6637 // Count the number of live intervals. 6638 SmallMapVector<unsigned, unsigned, 4> RegUsage; 6639 6640 if (VFs[j].isScalar()) { 6641 for (auto Inst : OpenIntervals) { 6642 unsigned ClassID = TTI.getRegisterClassForType(false, Inst->getType()); 6643 if (RegUsage.find(ClassID) == RegUsage.end()) 6644 RegUsage[ClassID] = 1; 6645 else 6646 RegUsage[ClassID] += 1; 6647 } 6648 } else { 6649 collectUniformsAndScalars(VFs[j]); 6650 for (auto Inst : OpenIntervals) { 6651 // Skip ignored values for VF > 1. 6652 if (VecValuesToIgnore.count(Inst)) 6653 continue; 6654 if (isScalarAfterVectorization(Inst, VFs[j])) { 6655 unsigned ClassID = TTI.getRegisterClassForType(false, Inst->getType()); 6656 if (RegUsage.find(ClassID) == RegUsage.end()) 6657 RegUsage[ClassID] = 1; 6658 else 6659 RegUsage[ClassID] += 1; 6660 } else { 6661 unsigned ClassID = TTI.getRegisterClassForType(true, Inst->getType()); 6662 if (RegUsage.find(ClassID) == RegUsage.end()) 6663 RegUsage[ClassID] = GetRegUsage(Inst->getType(), VFs[j]); 6664 else 6665 RegUsage[ClassID] += GetRegUsage(Inst->getType(), VFs[j]); 6666 } 6667 } 6668 } 6669 6670 for (auto& pair : RegUsage) { 6671 if (MaxUsages[j].find(pair.first) != MaxUsages[j].end()) 6672 MaxUsages[j][pair.first] = std::max(MaxUsages[j][pair.first], pair.second); 6673 else 6674 MaxUsages[j][pair.first] = pair.second; 6675 } 6676 } 6677 6678 LLVM_DEBUG(dbgs() << "LV(REG): At #" << i << " Interval # " 6679 << OpenIntervals.size() << '\n'); 6680 6681 // Add the current instruction to the list of open intervals. 6682 OpenIntervals.insert(I); 6683 } 6684 6685 for (unsigned i = 0, e = VFs.size(); i < e; ++i) { 6686 SmallMapVector<unsigned, unsigned, 4> Invariant; 6687 6688 for (auto Inst : LoopInvariants) { 6689 unsigned Usage = 6690 VFs[i].isScalar() ? 1 : GetRegUsage(Inst->getType(), VFs[i]); 6691 unsigned ClassID = 6692 TTI.getRegisterClassForType(VFs[i].isVector(), Inst->getType()); 6693 if (Invariant.find(ClassID) == Invariant.end()) 6694 Invariant[ClassID] = Usage; 6695 else 6696 Invariant[ClassID] += Usage; 6697 } 6698 6699 LLVM_DEBUG({ 6700 dbgs() << "LV(REG): VF = " << VFs[i] << '\n'; 6701 dbgs() << "LV(REG): Found max usage: " << MaxUsages[i].size() 6702 << " item\n"; 6703 for (const auto &pair : MaxUsages[i]) { 6704 dbgs() << "LV(REG): RegisterClass: " 6705 << TTI.getRegisterClassName(pair.first) << ", " << pair.second 6706 << " registers\n"; 6707 } 6708 dbgs() << "LV(REG): Found invariant usage: " << Invariant.size() 6709 << " item\n"; 6710 for (const auto &pair : Invariant) { 6711 dbgs() << "LV(REG): RegisterClass: " 6712 << TTI.getRegisterClassName(pair.first) << ", " << pair.second 6713 << " registers\n"; 6714 } 6715 }); 6716 6717 RU.LoopInvariantRegs = Invariant; 6718 RU.MaxLocalUsers = MaxUsages[i]; 6719 RUs[i] = RU; 6720 } 6721 6722 return RUs; 6723 } 6724 6725 bool LoopVectorizationCostModel::useEmulatedMaskMemRefHack(Instruction *I){ 6726 // TODO: Cost model for emulated masked load/store is completely 6727 // broken. This hack guides the cost model to use an artificially 6728 // high enough value to practically disable vectorization with such 6729 // operations, except where previously deployed legality hack allowed 6730 // using very low cost values. This is to avoid regressions coming simply 6731 // from moving "masked load/store" check from legality to cost model. 6732 // Masked Load/Gather emulation was previously never allowed. 6733 // Limited number of Masked Store/Scatter emulation was allowed. 6734 assert(isPredicatedInst(I) && 6735 "Expecting a scalar emulated instruction"); 6736 return isa<LoadInst>(I) || 6737 (isa<StoreInst>(I) && 6738 NumPredStores > NumberOfStoresToPredicate); 6739 } 6740 6741 void LoopVectorizationCostModel::collectInstsToScalarize(ElementCount VF) { 6742 // If we aren't vectorizing the loop, or if we've already collected the 6743 // instructions to scalarize, there's nothing to do. Collection may already 6744 // have occurred if we have a user-selected VF and are now computing the 6745 // expected cost for interleaving. 6746 if (VF.isScalar() || VF.isZero() || 6747 InstsToScalarize.find(VF) != InstsToScalarize.end()) 6748 return; 6749 6750 // Initialize a mapping for VF in InstsToScalalarize. If we find that it's 6751 // not profitable to scalarize any instructions, the presence of VF in the 6752 // map will indicate that we've analyzed it already. 6753 ScalarCostsTy &ScalarCostsVF = InstsToScalarize[VF]; 6754 6755 // Find all the instructions that are scalar with predication in the loop and 6756 // determine if it would be better to not if-convert the blocks they are in. 6757 // If so, we also record the instructions to scalarize. 6758 for (BasicBlock *BB : TheLoop->blocks()) { 6759 if (!blockNeedsPredication(BB)) 6760 continue; 6761 for (Instruction &I : *BB) 6762 if (isScalarWithPredication(&I)) { 6763 ScalarCostsTy ScalarCosts; 6764 // Do not apply discount if scalable, because that would lead to 6765 // invalid scalarization costs. 6766 // Do not apply discount logic if hacked cost is needed 6767 // for emulated masked memrefs. 6768 if (!VF.isScalable() && !useEmulatedMaskMemRefHack(&I) && 6769 computePredInstDiscount(&I, ScalarCosts, VF) >= 0) 6770 ScalarCostsVF.insert(ScalarCosts.begin(), ScalarCosts.end()); 6771 // Remember that BB will remain after vectorization. 6772 PredicatedBBsAfterVectorization.insert(BB); 6773 } 6774 } 6775 } 6776 6777 int LoopVectorizationCostModel::computePredInstDiscount( 6778 Instruction *PredInst, ScalarCostsTy &ScalarCosts, ElementCount VF) { 6779 assert(!isUniformAfterVectorization(PredInst, VF) && 6780 "Instruction marked uniform-after-vectorization will be predicated"); 6781 6782 // Initialize the discount to zero, meaning that the scalar version and the 6783 // vector version cost the same. 6784 InstructionCost Discount = 0; 6785 6786 // Holds instructions to analyze. The instructions we visit are mapped in 6787 // ScalarCosts. Those instructions are the ones that would be scalarized if 6788 // we find that the scalar version costs less. 6789 SmallVector<Instruction *, 8> Worklist; 6790 6791 // Returns true if the given instruction can be scalarized. 6792 auto canBeScalarized = [&](Instruction *I) -> bool { 6793 // We only attempt to scalarize instructions forming a single-use chain 6794 // from the original predicated block that would otherwise be vectorized. 6795 // Although not strictly necessary, we give up on instructions we know will 6796 // already be scalar to avoid traversing chains that are unlikely to be 6797 // beneficial. 6798 if (!I->hasOneUse() || PredInst->getParent() != I->getParent() || 6799 isScalarAfterVectorization(I, VF)) 6800 return false; 6801 6802 // If the instruction is scalar with predication, it will be analyzed 6803 // separately. We ignore it within the context of PredInst. 6804 if (isScalarWithPredication(I)) 6805 return false; 6806 6807 // If any of the instruction's operands are uniform after vectorization, 6808 // the instruction cannot be scalarized. This prevents, for example, a 6809 // masked load from being scalarized. 6810 // 6811 // We assume we will only emit a value for lane zero of an instruction 6812 // marked uniform after vectorization, rather than VF identical values. 6813 // Thus, if we scalarize an instruction that uses a uniform, we would 6814 // create uses of values corresponding to the lanes we aren't emitting code 6815 // for. This behavior can be changed by allowing getScalarValue to clone 6816 // the lane zero values for uniforms rather than asserting. 6817 for (Use &U : I->operands()) 6818 if (auto *J = dyn_cast<Instruction>(U.get())) 6819 if (isUniformAfterVectorization(J, VF)) 6820 return false; 6821 6822 // Otherwise, we can scalarize the instruction. 6823 return true; 6824 }; 6825 6826 // Compute the expected cost discount from scalarizing the entire expression 6827 // feeding the predicated instruction. We currently only consider expressions 6828 // that are single-use instruction chains. 6829 Worklist.push_back(PredInst); 6830 while (!Worklist.empty()) { 6831 Instruction *I = Worklist.pop_back_val(); 6832 6833 // If we've already analyzed the instruction, there's nothing to do. 6834 if (ScalarCosts.find(I) != ScalarCosts.end()) 6835 continue; 6836 6837 // Compute the cost of the vector instruction. Note that this cost already 6838 // includes the scalarization overhead of the predicated instruction. 6839 InstructionCost VectorCost = getInstructionCost(I, VF).first; 6840 6841 // Compute the cost of the scalarized instruction. This cost is the cost of 6842 // the instruction as if it wasn't if-converted and instead remained in the 6843 // predicated block. We will scale this cost by block probability after 6844 // computing the scalarization overhead. 6845 InstructionCost ScalarCost = 6846 VF.getFixedValue() * 6847 getInstructionCost(I, ElementCount::getFixed(1)).first; 6848 6849 // Compute the scalarization overhead of needed insertelement instructions 6850 // and phi nodes. 6851 if (isScalarWithPredication(I) && !I->getType()->isVoidTy()) { 6852 ScalarCost += TTI.getScalarizationOverhead( 6853 cast<VectorType>(ToVectorTy(I->getType(), VF)), 6854 APInt::getAllOnesValue(VF.getFixedValue()), true, false); 6855 ScalarCost += 6856 VF.getFixedValue() * 6857 TTI.getCFInstrCost(Instruction::PHI, TTI::TCK_RecipThroughput); 6858 } 6859 6860 // Compute the scalarization overhead of needed extractelement 6861 // instructions. For each of the instruction's operands, if the operand can 6862 // be scalarized, add it to the worklist; otherwise, account for the 6863 // overhead. 6864 for (Use &U : I->operands()) 6865 if (auto *J = dyn_cast<Instruction>(U.get())) { 6866 assert(VectorType::isValidElementType(J->getType()) && 6867 "Instruction has non-scalar type"); 6868 if (canBeScalarized(J)) 6869 Worklist.push_back(J); 6870 else if (needsExtract(J, VF)) { 6871 ScalarCost += TTI.getScalarizationOverhead( 6872 cast<VectorType>(ToVectorTy(J->getType(), VF)), 6873 APInt::getAllOnesValue(VF.getFixedValue()), false, true); 6874 } 6875 } 6876 6877 // Scale the total scalar cost by block probability. 6878 ScalarCost /= getReciprocalPredBlockProb(); 6879 6880 // Compute the discount. A non-negative discount means the vector version 6881 // of the instruction costs more, and scalarizing would be beneficial. 6882 Discount += VectorCost - ScalarCost; 6883 ScalarCosts[I] = ScalarCost; 6884 } 6885 6886 return *Discount.getValue(); 6887 } 6888 6889 LoopVectorizationCostModel::VectorizationCostTy 6890 LoopVectorizationCostModel::expectedCost( 6891 ElementCount VF, SmallVectorImpl<InstructionVFPair> *Invalid) { 6892 VectorizationCostTy Cost; 6893 6894 // For each block. 6895 for (BasicBlock *BB : TheLoop->blocks()) { 6896 VectorizationCostTy BlockCost; 6897 6898 // For each instruction in the old loop. 6899 for (Instruction &I : BB->instructionsWithoutDebug()) { 6900 // Skip ignored values. 6901 if (ValuesToIgnore.count(&I) || 6902 (VF.isVector() && VecValuesToIgnore.count(&I))) 6903 continue; 6904 6905 VectorizationCostTy C = getInstructionCost(&I, VF); 6906 6907 // Check if we should override the cost. 6908 if (C.first.isValid() && 6909 ForceTargetInstructionCost.getNumOccurrences() > 0) 6910 C.first = InstructionCost(ForceTargetInstructionCost); 6911 6912 // Keep a list of instructions with invalid costs. 6913 if (Invalid && !C.first.isValid()) 6914 Invalid->emplace_back(&I, VF); 6915 6916 BlockCost.first += C.first; 6917 BlockCost.second |= C.second; 6918 LLVM_DEBUG(dbgs() << "LV: Found an estimated cost of " << C.first 6919 << " for VF " << VF << " For instruction: " << I 6920 << '\n'); 6921 } 6922 6923 // If we are vectorizing a predicated block, it will have been 6924 // if-converted. This means that the block's instructions (aside from 6925 // stores and instructions that may divide by zero) will now be 6926 // unconditionally executed. For the scalar case, we may not always execute 6927 // the predicated block, if it is an if-else block. Thus, scale the block's 6928 // cost by the probability of executing it. blockNeedsPredication from 6929 // Legal is used so as to not include all blocks in tail folded loops. 6930 if (VF.isScalar() && Legal->blockNeedsPredication(BB)) 6931 BlockCost.first /= getReciprocalPredBlockProb(); 6932 6933 Cost.first += BlockCost.first; 6934 Cost.second |= BlockCost.second; 6935 } 6936 6937 return Cost; 6938 } 6939 6940 /// Gets Address Access SCEV after verifying that the access pattern 6941 /// is loop invariant except the induction variable dependence. 6942 /// 6943 /// This SCEV can be sent to the Target in order to estimate the address 6944 /// calculation cost. 6945 static const SCEV *getAddressAccessSCEV( 6946 Value *Ptr, 6947 LoopVectorizationLegality *Legal, 6948 PredicatedScalarEvolution &PSE, 6949 const Loop *TheLoop) { 6950 6951 auto *Gep = dyn_cast<GetElementPtrInst>(Ptr); 6952 if (!Gep) 6953 return nullptr; 6954 6955 // We are looking for a gep with all loop invariant indices except for one 6956 // which should be an induction variable. 6957 auto SE = PSE.getSE(); 6958 unsigned NumOperands = Gep->getNumOperands(); 6959 for (unsigned i = 1; i < NumOperands; ++i) { 6960 Value *Opd = Gep->getOperand(i); 6961 if (!SE->isLoopInvariant(SE->getSCEV(Opd), TheLoop) && 6962 !Legal->isInductionVariable(Opd)) 6963 return nullptr; 6964 } 6965 6966 // Now we know we have a GEP ptr, %inv, %ind, %inv. return the Ptr SCEV. 6967 return PSE.getSCEV(Ptr); 6968 } 6969 6970 static bool isStrideMul(Instruction *I, LoopVectorizationLegality *Legal) { 6971 return Legal->hasStride(I->getOperand(0)) || 6972 Legal->hasStride(I->getOperand(1)); 6973 } 6974 6975 InstructionCost 6976 LoopVectorizationCostModel::getMemInstScalarizationCost(Instruction *I, 6977 ElementCount VF) { 6978 assert(VF.isVector() && 6979 "Scalarization cost of instruction implies vectorization."); 6980 if (VF.isScalable()) 6981 return InstructionCost::getInvalid(); 6982 6983 Type *ValTy = getLoadStoreType(I); 6984 auto SE = PSE.getSE(); 6985 6986 unsigned AS = getLoadStoreAddressSpace(I); 6987 Value *Ptr = getLoadStorePointerOperand(I); 6988 Type *PtrTy = ToVectorTy(Ptr->getType(), VF); 6989 6990 // Figure out whether the access is strided and get the stride value 6991 // if it's known in compile time 6992 const SCEV *PtrSCEV = getAddressAccessSCEV(Ptr, Legal, PSE, TheLoop); 6993 6994 // Get the cost of the scalar memory instruction and address computation. 6995 InstructionCost Cost = 6996 VF.getKnownMinValue() * TTI.getAddressComputationCost(PtrTy, SE, PtrSCEV); 6997 6998 // Don't pass *I here, since it is scalar but will actually be part of a 6999 // vectorized loop where the user of it is a vectorized instruction. 7000 const Align Alignment = getLoadStoreAlignment(I); 7001 Cost += VF.getKnownMinValue() * 7002 TTI.getMemoryOpCost(I->getOpcode(), ValTy->getScalarType(), Alignment, 7003 AS, TTI::TCK_RecipThroughput); 7004 7005 // Get the overhead of the extractelement and insertelement instructions 7006 // we might create due to scalarization. 7007 Cost += getScalarizationOverhead(I, VF); 7008 7009 // If we have a predicated load/store, it will need extra i1 extracts and 7010 // conditional branches, but may not be executed for each vector lane. Scale 7011 // the cost by the probability of executing the predicated block. 7012 if (isPredicatedInst(I)) { 7013 Cost /= getReciprocalPredBlockProb(); 7014 7015 // Add the cost of an i1 extract and a branch 7016 auto *Vec_i1Ty = 7017 VectorType::get(IntegerType::getInt1Ty(ValTy->getContext()), VF); 7018 Cost += TTI.getScalarizationOverhead( 7019 Vec_i1Ty, APInt::getAllOnesValue(VF.getKnownMinValue()), 7020 /*Insert=*/false, /*Extract=*/true); 7021 Cost += TTI.getCFInstrCost(Instruction::Br, TTI::TCK_RecipThroughput); 7022 7023 if (useEmulatedMaskMemRefHack(I)) 7024 // Artificially setting to a high enough value to practically disable 7025 // vectorization with such operations. 7026 Cost = 3000000; 7027 } 7028 7029 return Cost; 7030 } 7031 7032 InstructionCost 7033 LoopVectorizationCostModel::getConsecutiveMemOpCost(Instruction *I, 7034 ElementCount VF) { 7035 Type *ValTy = getLoadStoreType(I); 7036 auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF)); 7037 Value *Ptr = getLoadStorePointerOperand(I); 7038 unsigned AS = getLoadStoreAddressSpace(I); 7039 int ConsecutiveStride = Legal->isConsecutivePtr(Ptr); 7040 enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 7041 7042 assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) && 7043 "Stride should be 1 or -1 for consecutive memory access"); 7044 const Align Alignment = getLoadStoreAlignment(I); 7045 InstructionCost Cost = 0; 7046 if (Legal->isMaskRequired(I)) 7047 Cost += TTI.getMaskedMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS, 7048 CostKind); 7049 else 7050 Cost += TTI.getMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS, 7051 CostKind, I); 7052 7053 bool Reverse = ConsecutiveStride < 0; 7054 if (Reverse) 7055 Cost += 7056 TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, None, 0); 7057 return Cost; 7058 } 7059 7060 InstructionCost 7061 LoopVectorizationCostModel::getUniformMemOpCost(Instruction *I, 7062 ElementCount VF) { 7063 assert(Legal->isUniformMemOp(*I)); 7064 7065 Type *ValTy = getLoadStoreType(I); 7066 auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF)); 7067 const Align Alignment = getLoadStoreAlignment(I); 7068 unsigned AS = getLoadStoreAddressSpace(I); 7069 enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 7070 if (isa<LoadInst>(I)) { 7071 return TTI.getAddressComputationCost(ValTy) + 7072 TTI.getMemoryOpCost(Instruction::Load, ValTy, Alignment, AS, 7073 CostKind) + 7074 TTI.getShuffleCost(TargetTransformInfo::SK_Broadcast, VectorTy); 7075 } 7076 StoreInst *SI = cast<StoreInst>(I); 7077 7078 bool isLoopInvariantStoreValue = Legal->isUniform(SI->getValueOperand()); 7079 return TTI.getAddressComputationCost(ValTy) + 7080 TTI.getMemoryOpCost(Instruction::Store, ValTy, Alignment, AS, 7081 CostKind) + 7082 (isLoopInvariantStoreValue 7083 ? 0 7084 : TTI.getVectorInstrCost(Instruction::ExtractElement, VectorTy, 7085 VF.getKnownMinValue() - 1)); 7086 } 7087 7088 InstructionCost 7089 LoopVectorizationCostModel::getGatherScatterCost(Instruction *I, 7090 ElementCount VF) { 7091 Type *ValTy = getLoadStoreType(I); 7092 auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF)); 7093 const Align Alignment = getLoadStoreAlignment(I); 7094 const Value *Ptr = getLoadStorePointerOperand(I); 7095 7096 return TTI.getAddressComputationCost(VectorTy) + 7097 TTI.getGatherScatterOpCost( 7098 I->getOpcode(), VectorTy, Ptr, Legal->isMaskRequired(I), Alignment, 7099 TargetTransformInfo::TCK_RecipThroughput, I); 7100 } 7101 7102 InstructionCost 7103 LoopVectorizationCostModel::getInterleaveGroupCost(Instruction *I, 7104 ElementCount VF) { 7105 // TODO: Once we have support for interleaving with scalable vectors 7106 // we can calculate the cost properly here. 7107 if (VF.isScalable()) 7108 return InstructionCost::getInvalid(); 7109 7110 Type *ValTy = getLoadStoreType(I); 7111 auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF)); 7112 unsigned AS = getLoadStoreAddressSpace(I); 7113 7114 auto Group = getInterleavedAccessGroup(I); 7115 assert(Group && "Fail to get an interleaved access group."); 7116 7117 unsigned InterleaveFactor = Group->getFactor(); 7118 auto *WideVecTy = VectorType::get(ValTy, VF * InterleaveFactor); 7119 7120 // Holds the indices of existing members in an interleaved load group. 7121 // An interleaved store group doesn't need this as it doesn't allow gaps. 7122 SmallVector<unsigned, 4> Indices; 7123 if (isa<LoadInst>(I)) { 7124 for (unsigned i = 0; i < InterleaveFactor; i++) 7125 if (Group->getMember(i)) 7126 Indices.push_back(i); 7127 } 7128 7129 // Calculate the cost of the whole interleaved group. 7130 bool UseMaskForGaps = 7131 Group->requiresScalarEpilogue() && !isScalarEpilogueAllowed(); 7132 InstructionCost Cost = TTI.getInterleavedMemoryOpCost( 7133 I->getOpcode(), WideVecTy, Group->getFactor(), Indices, Group->getAlign(), 7134 AS, TTI::TCK_RecipThroughput, Legal->isMaskRequired(I), UseMaskForGaps); 7135 7136 if (Group->isReverse()) { 7137 // TODO: Add support for reversed masked interleaved access. 7138 assert(!Legal->isMaskRequired(I) && 7139 "Reverse masked interleaved access not supported."); 7140 Cost += 7141 Group->getNumMembers() * 7142 TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, None, 0); 7143 } 7144 return Cost; 7145 } 7146 7147 Optional<InstructionCost> LoopVectorizationCostModel::getReductionPatternCost( 7148 Instruction *I, ElementCount VF, Type *Ty, TTI::TargetCostKind CostKind) { 7149 using namespace llvm::PatternMatch; 7150 // Early exit for no inloop reductions 7151 if (InLoopReductionChains.empty() || VF.isScalar() || !isa<VectorType>(Ty)) 7152 return None; 7153 auto *VectorTy = cast<VectorType>(Ty); 7154 7155 // We are looking for a pattern of, and finding the minimal acceptable cost: 7156 // reduce(mul(ext(A), ext(B))) or 7157 // reduce(mul(A, B)) or 7158 // reduce(ext(A)) or 7159 // reduce(A). 7160 // The basic idea is that we walk down the tree to do that, finding the root 7161 // reduction instruction in InLoopReductionImmediateChains. From there we find 7162 // the pattern of mul/ext and test the cost of the entire pattern vs the cost 7163 // of the components. If the reduction cost is lower then we return it for the 7164 // reduction instruction and 0 for the other instructions in the pattern. If 7165 // it is not we return an invalid cost specifying the orignal cost method 7166 // should be used. 7167 Instruction *RetI = I; 7168 if (match(RetI, m_ZExtOrSExt(m_Value()))) { 7169 if (!RetI->hasOneUser()) 7170 return None; 7171 RetI = RetI->user_back(); 7172 } 7173 if (match(RetI, m_Mul(m_Value(), m_Value())) && 7174 RetI->user_back()->getOpcode() == Instruction::Add) { 7175 if (!RetI->hasOneUser()) 7176 return None; 7177 RetI = RetI->user_back(); 7178 } 7179 7180 // Test if the found instruction is a reduction, and if not return an invalid 7181 // cost specifying the parent to use the original cost modelling. 7182 if (!InLoopReductionImmediateChains.count(RetI)) 7183 return None; 7184 7185 // Find the reduction this chain is a part of and calculate the basic cost of 7186 // the reduction on its own. 7187 Instruction *LastChain = InLoopReductionImmediateChains[RetI]; 7188 Instruction *ReductionPhi = LastChain; 7189 while (!isa<PHINode>(ReductionPhi)) 7190 ReductionPhi = InLoopReductionImmediateChains[ReductionPhi]; 7191 7192 const RecurrenceDescriptor &RdxDesc = 7193 Legal->getReductionVars()[cast<PHINode>(ReductionPhi)]; 7194 7195 InstructionCost BaseCost = TTI.getArithmeticReductionCost( 7196 RdxDesc.getOpcode(), VectorTy, RdxDesc.getFastMathFlags(), CostKind); 7197 7198 // If we're using ordered reductions then we can just return the base cost 7199 // here, since getArithmeticReductionCost calculates the full ordered 7200 // reduction cost when FP reassociation is not allowed. 7201 if (useOrderedReductions(RdxDesc)) 7202 return BaseCost; 7203 7204 // Get the operand that was not the reduction chain and match it to one of the 7205 // patterns, returning the better cost if it is found. 7206 Instruction *RedOp = RetI->getOperand(1) == LastChain 7207 ? dyn_cast<Instruction>(RetI->getOperand(0)) 7208 : dyn_cast<Instruction>(RetI->getOperand(1)); 7209 7210 VectorTy = VectorType::get(I->getOperand(0)->getType(), VectorTy); 7211 7212 Instruction *Op0, *Op1; 7213 if (RedOp && match(RedOp, m_ZExtOrSExt(m_Value())) && 7214 !TheLoop->isLoopInvariant(RedOp)) { 7215 // Matched reduce(ext(A)) 7216 bool IsUnsigned = isa<ZExtInst>(RedOp); 7217 auto *ExtType = VectorType::get(RedOp->getOperand(0)->getType(), VectorTy); 7218 InstructionCost RedCost = TTI.getExtendedAddReductionCost( 7219 /*IsMLA=*/false, IsUnsigned, RdxDesc.getRecurrenceType(), ExtType, 7220 CostKind); 7221 7222 InstructionCost ExtCost = 7223 TTI.getCastInstrCost(RedOp->getOpcode(), VectorTy, ExtType, 7224 TTI::CastContextHint::None, CostKind, RedOp); 7225 if (RedCost.isValid() && RedCost < BaseCost + ExtCost) 7226 return I == RetI ? RedCost : 0; 7227 } else if (RedOp && 7228 match(RedOp, m_Mul(m_Instruction(Op0), m_Instruction(Op1)))) { 7229 if (match(Op0, m_ZExtOrSExt(m_Value())) && 7230 Op0->getOpcode() == Op1->getOpcode() && 7231 Op0->getOperand(0)->getType() == Op1->getOperand(0)->getType() && 7232 !TheLoop->isLoopInvariant(Op0) && !TheLoop->isLoopInvariant(Op1)) { 7233 bool IsUnsigned = isa<ZExtInst>(Op0); 7234 auto *ExtType = VectorType::get(Op0->getOperand(0)->getType(), VectorTy); 7235 // Matched reduce(mul(ext, ext)) 7236 InstructionCost ExtCost = 7237 TTI.getCastInstrCost(Op0->getOpcode(), VectorTy, ExtType, 7238 TTI::CastContextHint::None, CostKind, Op0); 7239 InstructionCost MulCost = 7240 TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind); 7241 7242 InstructionCost RedCost = TTI.getExtendedAddReductionCost( 7243 /*IsMLA=*/true, IsUnsigned, RdxDesc.getRecurrenceType(), ExtType, 7244 CostKind); 7245 7246 if (RedCost.isValid() && RedCost < ExtCost * 2 + MulCost + BaseCost) 7247 return I == RetI ? RedCost : 0; 7248 } else { 7249 // Matched reduce(mul()) 7250 InstructionCost MulCost = 7251 TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind); 7252 7253 InstructionCost RedCost = TTI.getExtendedAddReductionCost( 7254 /*IsMLA=*/true, true, RdxDesc.getRecurrenceType(), VectorTy, 7255 CostKind); 7256 7257 if (RedCost.isValid() && RedCost < MulCost + BaseCost) 7258 return I == RetI ? RedCost : 0; 7259 } 7260 } 7261 7262 return I == RetI ? Optional<InstructionCost>(BaseCost) : None; 7263 } 7264 7265 InstructionCost 7266 LoopVectorizationCostModel::getMemoryInstructionCost(Instruction *I, 7267 ElementCount VF) { 7268 // Calculate scalar cost only. Vectorization cost should be ready at this 7269 // moment. 7270 if (VF.isScalar()) { 7271 Type *ValTy = getLoadStoreType(I); 7272 const Align Alignment = getLoadStoreAlignment(I); 7273 unsigned AS = getLoadStoreAddressSpace(I); 7274 7275 return TTI.getAddressComputationCost(ValTy) + 7276 TTI.getMemoryOpCost(I->getOpcode(), ValTy, Alignment, AS, 7277 TTI::TCK_RecipThroughput, I); 7278 } 7279 return getWideningCost(I, VF); 7280 } 7281 7282 LoopVectorizationCostModel::VectorizationCostTy 7283 LoopVectorizationCostModel::getInstructionCost(Instruction *I, 7284 ElementCount VF) { 7285 // If we know that this instruction will remain uniform, check the cost of 7286 // the scalar version. 7287 if (isUniformAfterVectorization(I, VF)) 7288 VF = ElementCount::getFixed(1); 7289 7290 if (VF.isVector() && isProfitableToScalarize(I, VF)) 7291 return VectorizationCostTy(InstsToScalarize[VF][I], false); 7292 7293 // Forced scalars do not have any scalarization overhead. 7294 auto ForcedScalar = ForcedScalars.find(VF); 7295 if (VF.isVector() && ForcedScalar != ForcedScalars.end()) { 7296 auto InstSet = ForcedScalar->second; 7297 if (InstSet.count(I)) 7298 return VectorizationCostTy( 7299 (getInstructionCost(I, ElementCount::getFixed(1)).first * 7300 VF.getKnownMinValue()), 7301 false); 7302 } 7303 7304 Type *VectorTy; 7305 InstructionCost C = getInstructionCost(I, VF, VectorTy); 7306 7307 bool TypeNotScalarized = 7308 VF.isVector() && VectorTy->isVectorTy() && 7309 TTI.getNumberOfParts(VectorTy) < VF.getKnownMinValue(); 7310 return VectorizationCostTy(C, TypeNotScalarized); 7311 } 7312 7313 InstructionCost 7314 LoopVectorizationCostModel::getScalarizationOverhead(Instruction *I, 7315 ElementCount VF) const { 7316 7317 // There is no mechanism yet to create a scalable scalarization loop, 7318 // so this is currently Invalid. 7319 if (VF.isScalable()) 7320 return InstructionCost::getInvalid(); 7321 7322 if (VF.isScalar()) 7323 return 0; 7324 7325 InstructionCost Cost = 0; 7326 Type *RetTy = ToVectorTy(I->getType(), VF); 7327 if (!RetTy->isVoidTy() && 7328 (!isa<LoadInst>(I) || !TTI.supportsEfficientVectorElementLoadStore())) 7329 Cost += TTI.getScalarizationOverhead( 7330 cast<VectorType>(RetTy), APInt::getAllOnesValue(VF.getKnownMinValue()), 7331 true, false); 7332 7333 // Some targets keep addresses scalar. 7334 if (isa<LoadInst>(I) && !TTI.prefersVectorizedAddressing()) 7335 return Cost; 7336 7337 // Some targets support efficient element stores. 7338 if (isa<StoreInst>(I) && TTI.supportsEfficientVectorElementLoadStore()) 7339 return Cost; 7340 7341 // Collect operands to consider. 7342 CallInst *CI = dyn_cast<CallInst>(I); 7343 Instruction::op_range Ops = CI ? CI->arg_operands() : I->operands(); 7344 7345 // Skip operands that do not require extraction/scalarization and do not incur 7346 // any overhead. 7347 SmallVector<Type *> Tys; 7348 for (auto *V : filterExtractingOperands(Ops, VF)) 7349 Tys.push_back(MaybeVectorizeType(V->getType(), VF)); 7350 return Cost + TTI.getOperandsScalarizationOverhead( 7351 filterExtractingOperands(Ops, VF), Tys); 7352 } 7353 7354 void LoopVectorizationCostModel::setCostBasedWideningDecision(ElementCount VF) { 7355 if (VF.isScalar()) 7356 return; 7357 NumPredStores = 0; 7358 for (BasicBlock *BB : TheLoop->blocks()) { 7359 // For each instruction in the old loop. 7360 for (Instruction &I : *BB) { 7361 Value *Ptr = getLoadStorePointerOperand(&I); 7362 if (!Ptr) 7363 continue; 7364 7365 // TODO: We should generate better code and update the cost model for 7366 // predicated uniform stores. Today they are treated as any other 7367 // predicated store (see added test cases in 7368 // invariant-store-vectorization.ll). 7369 if (isa<StoreInst>(&I) && isScalarWithPredication(&I)) 7370 NumPredStores++; 7371 7372 if (Legal->isUniformMemOp(I)) { 7373 // TODO: Avoid replicating loads and stores instead of 7374 // relying on instcombine to remove them. 7375 // Load: Scalar load + broadcast 7376 // Store: Scalar store + isLoopInvariantStoreValue ? 0 : extract 7377 InstructionCost Cost; 7378 if (isa<StoreInst>(&I) && VF.isScalable() && 7379 isLegalGatherOrScatter(&I)) { 7380 Cost = getGatherScatterCost(&I, VF); 7381 setWideningDecision(&I, VF, CM_GatherScatter, Cost); 7382 } else { 7383 assert((isa<LoadInst>(&I) || !VF.isScalable()) && 7384 "Cannot yet scalarize uniform stores"); 7385 Cost = getUniformMemOpCost(&I, VF); 7386 setWideningDecision(&I, VF, CM_Scalarize, Cost); 7387 } 7388 continue; 7389 } 7390 7391 // We assume that widening is the best solution when possible. 7392 if (memoryInstructionCanBeWidened(&I, VF)) { 7393 InstructionCost Cost = getConsecutiveMemOpCost(&I, VF); 7394 int ConsecutiveStride = 7395 Legal->isConsecutivePtr(getLoadStorePointerOperand(&I)); 7396 assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) && 7397 "Expected consecutive stride."); 7398 InstWidening Decision = 7399 ConsecutiveStride == 1 ? CM_Widen : CM_Widen_Reverse; 7400 setWideningDecision(&I, VF, Decision, Cost); 7401 continue; 7402 } 7403 7404 // Choose between Interleaving, Gather/Scatter or Scalarization. 7405 InstructionCost InterleaveCost = InstructionCost::getInvalid(); 7406 unsigned NumAccesses = 1; 7407 if (isAccessInterleaved(&I)) { 7408 auto Group = getInterleavedAccessGroup(&I); 7409 assert(Group && "Fail to get an interleaved access group."); 7410 7411 // Make one decision for the whole group. 7412 if (getWideningDecision(&I, VF) != CM_Unknown) 7413 continue; 7414 7415 NumAccesses = Group->getNumMembers(); 7416 if (interleavedAccessCanBeWidened(&I, VF)) 7417 InterleaveCost = getInterleaveGroupCost(&I, VF); 7418 } 7419 7420 InstructionCost GatherScatterCost = 7421 isLegalGatherOrScatter(&I) 7422 ? getGatherScatterCost(&I, VF) * NumAccesses 7423 : InstructionCost::getInvalid(); 7424 7425 InstructionCost ScalarizationCost = 7426 getMemInstScalarizationCost(&I, VF) * NumAccesses; 7427 7428 // Choose better solution for the current VF, 7429 // write down this decision and use it during vectorization. 7430 InstructionCost Cost; 7431 InstWidening Decision; 7432 if (InterleaveCost <= GatherScatterCost && 7433 InterleaveCost < ScalarizationCost) { 7434 Decision = CM_Interleave; 7435 Cost = InterleaveCost; 7436 } else if (GatherScatterCost < ScalarizationCost) { 7437 Decision = CM_GatherScatter; 7438 Cost = GatherScatterCost; 7439 } else { 7440 Decision = CM_Scalarize; 7441 Cost = ScalarizationCost; 7442 } 7443 // If the instructions belongs to an interleave group, the whole group 7444 // receives the same decision. The whole group receives the cost, but 7445 // the cost will actually be assigned to one instruction. 7446 if (auto Group = getInterleavedAccessGroup(&I)) 7447 setWideningDecision(Group, VF, Decision, Cost); 7448 else 7449 setWideningDecision(&I, VF, Decision, Cost); 7450 } 7451 } 7452 7453 // Make sure that any load of address and any other address computation 7454 // remains scalar unless there is gather/scatter support. This avoids 7455 // inevitable extracts into address registers, and also has the benefit of 7456 // activating LSR more, since that pass can't optimize vectorized 7457 // addresses. 7458 if (TTI.prefersVectorizedAddressing()) 7459 return; 7460 7461 // Start with all scalar pointer uses. 7462 SmallPtrSet<Instruction *, 8> AddrDefs; 7463 for (BasicBlock *BB : TheLoop->blocks()) 7464 for (Instruction &I : *BB) { 7465 Instruction *PtrDef = 7466 dyn_cast_or_null<Instruction>(getLoadStorePointerOperand(&I)); 7467 if (PtrDef && TheLoop->contains(PtrDef) && 7468 getWideningDecision(&I, VF) != CM_GatherScatter) 7469 AddrDefs.insert(PtrDef); 7470 } 7471 7472 // Add all instructions used to generate the addresses. 7473 SmallVector<Instruction *, 4> Worklist; 7474 append_range(Worklist, AddrDefs); 7475 while (!Worklist.empty()) { 7476 Instruction *I = Worklist.pop_back_val(); 7477 for (auto &Op : I->operands()) 7478 if (auto *InstOp = dyn_cast<Instruction>(Op)) 7479 if ((InstOp->getParent() == I->getParent()) && !isa<PHINode>(InstOp) && 7480 AddrDefs.insert(InstOp).second) 7481 Worklist.push_back(InstOp); 7482 } 7483 7484 for (auto *I : AddrDefs) { 7485 if (isa<LoadInst>(I)) { 7486 // Setting the desired widening decision should ideally be handled in 7487 // by cost functions, but since this involves the task of finding out 7488 // if the loaded register is involved in an address computation, it is 7489 // instead changed here when we know this is the case. 7490 InstWidening Decision = getWideningDecision(I, VF); 7491 if (Decision == CM_Widen || Decision == CM_Widen_Reverse) 7492 // Scalarize a widened load of address. 7493 setWideningDecision( 7494 I, VF, CM_Scalarize, 7495 (VF.getKnownMinValue() * 7496 getMemoryInstructionCost(I, ElementCount::getFixed(1)))); 7497 else if (auto Group = getInterleavedAccessGroup(I)) { 7498 // Scalarize an interleave group of address loads. 7499 for (unsigned I = 0; I < Group->getFactor(); ++I) { 7500 if (Instruction *Member = Group->getMember(I)) 7501 setWideningDecision( 7502 Member, VF, CM_Scalarize, 7503 (VF.getKnownMinValue() * 7504 getMemoryInstructionCost(Member, ElementCount::getFixed(1)))); 7505 } 7506 } 7507 } else 7508 // Make sure I gets scalarized and a cost estimate without 7509 // scalarization overhead. 7510 ForcedScalars[VF].insert(I); 7511 } 7512 } 7513 7514 InstructionCost 7515 LoopVectorizationCostModel::getInstructionCost(Instruction *I, ElementCount VF, 7516 Type *&VectorTy) { 7517 Type *RetTy = I->getType(); 7518 if (canTruncateToMinimalBitwidth(I, VF)) 7519 RetTy = IntegerType::get(RetTy->getContext(), MinBWs[I]); 7520 auto SE = PSE.getSE(); 7521 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 7522 7523 auto hasSingleCopyAfterVectorization = [this](Instruction *I, 7524 ElementCount VF) -> bool { 7525 if (VF.isScalar()) 7526 return true; 7527 7528 auto Scalarized = InstsToScalarize.find(VF); 7529 assert(Scalarized != InstsToScalarize.end() && 7530 "VF not yet analyzed for scalarization profitability"); 7531 return !Scalarized->second.count(I) && 7532 llvm::all_of(I->users(), [&](User *U) { 7533 auto *UI = cast<Instruction>(U); 7534 return !Scalarized->second.count(UI); 7535 }); 7536 }; 7537 (void) hasSingleCopyAfterVectorization; 7538 7539 if (isScalarAfterVectorization(I, VF)) { 7540 // With the exception of GEPs and PHIs, after scalarization there should 7541 // only be one copy of the instruction generated in the loop. This is 7542 // because the VF is either 1, or any instructions that need scalarizing 7543 // have already been dealt with by the the time we get here. As a result, 7544 // it means we don't have to multiply the instruction cost by VF. 7545 assert(I->getOpcode() == Instruction::GetElementPtr || 7546 I->getOpcode() == Instruction::PHI || 7547 (I->getOpcode() == Instruction::BitCast && 7548 I->getType()->isPointerTy()) || 7549 hasSingleCopyAfterVectorization(I, VF)); 7550 VectorTy = RetTy; 7551 } else 7552 VectorTy = ToVectorTy(RetTy, VF); 7553 7554 // TODO: We need to estimate the cost of intrinsic calls. 7555 switch (I->getOpcode()) { 7556 case Instruction::GetElementPtr: 7557 // We mark this instruction as zero-cost because the cost of GEPs in 7558 // vectorized code depends on whether the corresponding memory instruction 7559 // is scalarized or not. Therefore, we handle GEPs with the memory 7560 // instruction cost. 7561 return 0; 7562 case Instruction::Br: { 7563 // In cases of scalarized and predicated instructions, there will be VF 7564 // predicated blocks in the vectorized loop. Each branch around these 7565 // blocks requires also an extract of its vector compare i1 element. 7566 bool ScalarPredicatedBB = false; 7567 BranchInst *BI = cast<BranchInst>(I); 7568 if (VF.isVector() && BI->isConditional() && 7569 (PredicatedBBsAfterVectorization.count(BI->getSuccessor(0)) || 7570 PredicatedBBsAfterVectorization.count(BI->getSuccessor(1)))) 7571 ScalarPredicatedBB = true; 7572 7573 if (ScalarPredicatedBB) { 7574 // Not possible to scalarize scalable vector with predicated instructions. 7575 if (VF.isScalable()) 7576 return InstructionCost::getInvalid(); 7577 // Return cost for branches around scalarized and predicated blocks. 7578 auto *Vec_i1Ty = 7579 VectorType::get(IntegerType::getInt1Ty(RetTy->getContext()), VF); 7580 return ( 7581 TTI.getScalarizationOverhead( 7582 Vec_i1Ty, APInt::getAllOnesValue(VF.getFixedValue()), false, 7583 true) + 7584 (TTI.getCFInstrCost(Instruction::Br, CostKind) * VF.getFixedValue())); 7585 } else if (I->getParent() == TheLoop->getLoopLatch() || VF.isScalar()) 7586 // The back-edge branch will remain, as will all scalar branches. 7587 return TTI.getCFInstrCost(Instruction::Br, CostKind); 7588 else 7589 // This branch will be eliminated by if-conversion. 7590 return 0; 7591 // Note: We currently assume zero cost for an unconditional branch inside 7592 // a predicated block since it will become a fall-through, although we 7593 // may decide in the future to call TTI for all branches. 7594 } 7595 case Instruction::PHI: { 7596 auto *Phi = cast<PHINode>(I); 7597 7598 // First-order recurrences are replaced by vector shuffles inside the loop. 7599 // NOTE: Don't use ToVectorTy as SK_ExtractSubvector expects a vector type. 7600 if (VF.isVector() && Legal->isFirstOrderRecurrence(Phi)) 7601 return TTI.getShuffleCost( 7602 TargetTransformInfo::SK_ExtractSubvector, cast<VectorType>(VectorTy), 7603 None, VF.getKnownMinValue() - 1, FixedVectorType::get(RetTy, 1)); 7604 7605 // Phi nodes in non-header blocks (not inductions, reductions, etc.) are 7606 // converted into select instructions. We require N - 1 selects per phi 7607 // node, where N is the number of incoming values. 7608 if (VF.isVector() && Phi->getParent() != TheLoop->getHeader()) 7609 return (Phi->getNumIncomingValues() - 1) * 7610 TTI.getCmpSelInstrCost( 7611 Instruction::Select, ToVectorTy(Phi->getType(), VF), 7612 ToVectorTy(Type::getInt1Ty(Phi->getContext()), VF), 7613 CmpInst::BAD_ICMP_PREDICATE, CostKind); 7614 7615 return TTI.getCFInstrCost(Instruction::PHI, CostKind); 7616 } 7617 case Instruction::UDiv: 7618 case Instruction::SDiv: 7619 case Instruction::URem: 7620 case Instruction::SRem: 7621 // If we have a predicated instruction, it may not be executed for each 7622 // vector lane. Get the scalarization cost and scale this amount by the 7623 // probability of executing the predicated block. If the instruction is not 7624 // predicated, we fall through to the next case. 7625 if (VF.isVector() && isScalarWithPredication(I)) { 7626 InstructionCost Cost = 0; 7627 7628 // These instructions have a non-void type, so account for the phi nodes 7629 // that we will create. This cost is likely to be zero. The phi node 7630 // cost, if any, should be scaled by the block probability because it 7631 // models a copy at the end of each predicated block. 7632 Cost += VF.getKnownMinValue() * 7633 TTI.getCFInstrCost(Instruction::PHI, CostKind); 7634 7635 // The cost of the non-predicated instruction. 7636 Cost += VF.getKnownMinValue() * 7637 TTI.getArithmeticInstrCost(I->getOpcode(), RetTy, CostKind); 7638 7639 // The cost of insertelement and extractelement instructions needed for 7640 // scalarization. 7641 Cost += getScalarizationOverhead(I, VF); 7642 7643 // Scale the cost by the probability of executing the predicated blocks. 7644 // This assumes the predicated block for each vector lane is equally 7645 // likely. 7646 return Cost / getReciprocalPredBlockProb(); 7647 } 7648 LLVM_FALLTHROUGH; 7649 case Instruction::Add: 7650 case Instruction::FAdd: 7651 case Instruction::Sub: 7652 case Instruction::FSub: 7653 case Instruction::Mul: 7654 case Instruction::FMul: 7655 case Instruction::FDiv: 7656 case Instruction::FRem: 7657 case Instruction::Shl: 7658 case Instruction::LShr: 7659 case Instruction::AShr: 7660 case Instruction::And: 7661 case Instruction::Or: 7662 case Instruction::Xor: { 7663 // Since we will replace the stride by 1 the multiplication should go away. 7664 if (I->getOpcode() == Instruction::Mul && isStrideMul(I, Legal)) 7665 return 0; 7666 7667 // Detect reduction patterns 7668 if (auto RedCost = getReductionPatternCost(I, VF, VectorTy, CostKind)) 7669 return *RedCost; 7670 7671 // Certain instructions can be cheaper to vectorize if they have a constant 7672 // second vector operand. One example of this are shifts on x86. 7673 Value *Op2 = I->getOperand(1); 7674 TargetTransformInfo::OperandValueProperties Op2VP; 7675 TargetTransformInfo::OperandValueKind Op2VK = 7676 TTI.getOperandInfo(Op2, Op2VP); 7677 if (Op2VK == TargetTransformInfo::OK_AnyValue && Legal->isUniform(Op2)) 7678 Op2VK = TargetTransformInfo::OK_UniformValue; 7679 7680 SmallVector<const Value *, 4> Operands(I->operand_values()); 7681 return TTI.getArithmeticInstrCost( 7682 I->getOpcode(), VectorTy, CostKind, TargetTransformInfo::OK_AnyValue, 7683 Op2VK, TargetTransformInfo::OP_None, Op2VP, Operands, I); 7684 } 7685 case Instruction::FNeg: { 7686 return TTI.getArithmeticInstrCost( 7687 I->getOpcode(), VectorTy, CostKind, TargetTransformInfo::OK_AnyValue, 7688 TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None, 7689 TargetTransformInfo::OP_None, I->getOperand(0), I); 7690 } 7691 case Instruction::Select: { 7692 SelectInst *SI = cast<SelectInst>(I); 7693 const SCEV *CondSCEV = SE->getSCEV(SI->getCondition()); 7694 bool ScalarCond = (SE->isLoopInvariant(CondSCEV, TheLoop)); 7695 7696 const Value *Op0, *Op1; 7697 using namespace llvm::PatternMatch; 7698 if (!ScalarCond && (match(I, m_LogicalAnd(m_Value(Op0), m_Value(Op1))) || 7699 match(I, m_LogicalOr(m_Value(Op0), m_Value(Op1))))) { 7700 // select x, y, false --> x & y 7701 // select x, true, y --> x | y 7702 TTI::OperandValueProperties Op1VP = TTI::OP_None; 7703 TTI::OperandValueProperties Op2VP = TTI::OP_None; 7704 TTI::OperandValueKind Op1VK = TTI::getOperandInfo(Op0, Op1VP); 7705 TTI::OperandValueKind Op2VK = TTI::getOperandInfo(Op1, Op2VP); 7706 assert(Op0->getType()->getScalarSizeInBits() == 1 && 7707 Op1->getType()->getScalarSizeInBits() == 1); 7708 7709 SmallVector<const Value *, 2> Operands{Op0, Op1}; 7710 return TTI.getArithmeticInstrCost( 7711 match(I, m_LogicalOr()) ? Instruction::Or : Instruction::And, VectorTy, 7712 CostKind, Op1VK, Op2VK, Op1VP, Op2VP, Operands, I); 7713 } 7714 7715 Type *CondTy = SI->getCondition()->getType(); 7716 if (!ScalarCond) 7717 CondTy = VectorType::get(CondTy, VF); 7718 return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, CondTy, 7719 CmpInst::BAD_ICMP_PREDICATE, CostKind, I); 7720 } 7721 case Instruction::ICmp: 7722 case Instruction::FCmp: { 7723 Type *ValTy = I->getOperand(0)->getType(); 7724 Instruction *Op0AsInstruction = dyn_cast<Instruction>(I->getOperand(0)); 7725 if (canTruncateToMinimalBitwidth(Op0AsInstruction, VF)) 7726 ValTy = IntegerType::get(ValTy->getContext(), MinBWs[Op0AsInstruction]); 7727 VectorTy = ToVectorTy(ValTy, VF); 7728 return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, nullptr, 7729 CmpInst::BAD_ICMP_PREDICATE, CostKind, I); 7730 } 7731 case Instruction::Store: 7732 case Instruction::Load: { 7733 ElementCount Width = VF; 7734 if (Width.isVector()) { 7735 InstWidening Decision = getWideningDecision(I, Width); 7736 assert(Decision != CM_Unknown && 7737 "CM decision should be taken at this point"); 7738 if (Decision == CM_Scalarize) 7739 Width = ElementCount::getFixed(1); 7740 } 7741 VectorTy = ToVectorTy(getLoadStoreType(I), Width); 7742 return getMemoryInstructionCost(I, VF); 7743 } 7744 case Instruction::BitCast: 7745 if (I->getType()->isPointerTy()) 7746 return 0; 7747 LLVM_FALLTHROUGH; 7748 case Instruction::ZExt: 7749 case Instruction::SExt: 7750 case Instruction::FPToUI: 7751 case Instruction::FPToSI: 7752 case Instruction::FPExt: 7753 case Instruction::PtrToInt: 7754 case Instruction::IntToPtr: 7755 case Instruction::SIToFP: 7756 case Instruction::UIToFP: 7757 case Instruction::Trunc: 7758 case Instruction::FPTrunc: { 7759 // Computes the CastContextHint from a Load/Store instruction. 7760 auto ComputeCCH = [&](Instruction *I) -> TTI::CastContextHint { 7761 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) && 7762 "Expected a load or a store!"); 7763 7764 if (VF.isScalar() || !TheLoop->contains(I)) 7765 return TTI::CastContextHint::Normal; 7766 7767 switch (getWideningDecision(I, VF)) { 7768 case LoopVectorizationCostModel::CM_GatherScatter: 7769 return TTI::CastContextHint::GatherScatter; 7770 case LoopVectorizationCostModel::CM_Interleave: 7771 return TTI::CastContextHint::Interleave; 7772 case LoopVectorizationCostModel::CM_Scalarize: 7773 case LoopVectorizationCostModel::CM_Widen: 7774 return Legal->isMaskRequired(I) ? TTI::CastContextHint::Masked 7775 : TTI::CastContextHint::Normal; 7776 case LoopVectorizationCostModel::CM_Widen_Reverse: 7777 return TTI::CastContextHint::Reversed; 7778 case LoopVectorizationCostModel::CM_Unknown: 7779 llvm_unreachable("Instr did not go through cost modelling?"); 7780 } 7781 7782 llvm_unreachable("Unhandled case!"); 7783 }; 7784 7785 unsigned Opcode = I->getOpcode(); 7786 TTI::CastContextHint CCH = TTI::CastContextHint::None; 7787 // For Trunc, the context is the only user, which must be a StoreInst. 7788 if (Opcode == Instruction::Trunc || Opcode == Instruction::FPTrunc) { 7789 if (I->hasOneUse()) 7790 if (StoreInst *Store = dyn_cast<StoreInst>(*I->user_begin())) 7791 CCH = ComputeCCH(Store); 7792 } 7793 // For Z/Sext, the context is the operand, which must be a LoadInst. 7794 else if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt || 7795 Opcode == Instruction::FPExt) { 7796 if (LoadInst *Load = dyn_cast<LoadInst>(I->getOperand(0))) 7797 CCH = ComputeCCH(Load); 7798 } 7799 7800 // We optimize the truncation of induction variables having constant 7801 // integer steps. The cost of these truncations is the same as the scalar 7802 // operation. 7803 if (isOptimizableIVTruncate(I, VF)) { 7804 auto *Trunc = cast<TruncInst>(I); 7805 return TTI.getCastInstrCost(Instruction::Trunc, Trunc->getDestTy(), 7806 Trunc->getSrcTy(), CCH, CostKind, Trunc); 7807 } 7808 7809 // Detect reduction patterns 7810 if (auto RedCost = getReductionPatternCost(I, VF, VectorTy, CostKind)) 7811 return *RedCost; 7812 7813 Type *SrcScalarTy = I->getOperand(0)->getType(); 7814 Type *SrcVecTy = 7815 VectorTy->isVectorTy() ? ToVectorTy(SrcScalarTy, VF) : SrcScalarTy; 7816 if (canTruncateToMinimalBitwidth(I, VF)) { 7817 // This cast is going to be shrunk. This may remove the cast or it might 7818 // turn it into slightly different cast. For example, if MinBW == 16, 7819 // "zext i8 %1 to i32" becomes "zext i8 %1 to i16". 7820 // 7821 // Calculate the modified src and dest types. 7822 Type *MinVecTy = VectorTy; 7823 if (Opcode == Instruction::Trunc) { 7824 SrcVecTy = smallestIntegerVectorType(SrcVecTy, MinVecTy); 7825 VectorTy = 7826 largestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy); 7827 } else if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt) { 7828 SrcVecTy = largestIntegerVectorType(SrcVecTy, MinVecTy); 7829 VectorTy = 7830 smallestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy); 7831 } 7832 } 7833 7834 return TTI.getCastInstrCost(Opcode, VectorTy, SrcVecTy, CCH, CostKind, I); 7835 } 7836 case Instruction::Call: { 7837 bool NeedToScalarize; 7838 CallInst *CI = cast<CallInst>(I); 7839 InstructionCost CallCost = getVectorCallCost(CI, VF, NeedToScalarize); 7840 if (getVectorIntrinsicIDForCall(CI, TLI)) { 7841 InstructionCost IntrinsicCost = getVectorIntrinsicCost(CI, VF); 7842 return std::min(CallCost, IntrinsicCost); 7843 } 7844 return CallCost; 7845 } 7846 case Instruction::ExtractValue: 7847 return TTI.getInstructionCost(I, TTI::TCK_RecipThroughput); 7848 case Instruction::Alloca: 7849 // We cannot easily widen alloca to a scalable alloca, as 7850 // the result would need to be a vector of pointers. 7851 if (VF.isScalable()) 7852 return InstructionCost::getInvalid(); 7853 LLVM_FALLTHROUGH; 7854 default: 7855 // This opcode is unknown. Assume that it is the same as 'mul'. 7856 return TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind); 7857 } // end of switch. 7858 } 7859 7860 char LoopVectorize::ID = 0; 7861 7862 static const char lv_name[] = "Loop Vectorization"; 7863 7864 INITIALIZE_PASS_BEGIN(LoopVectorize, LV_NAME, lv_name, false, false) 7865 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 7866 INITIALIZE_PASS_DEPENDENCY(BasicAAWrapperPass) 7867 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 7868 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass) 7869 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 7870 INITIALIZE_PASS_DEPENDENCY(BlockFrequencyInfoWrapperPass) 7871 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 7872 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass) 7873 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) 7874 INITIALIZE_PASS_DEPENDENCY(LoopAccessLegacyAnalysis) 7875 INITIALIZE_PASS_DEPENDENCY(DemandedBitsWrapperPass) 7876 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass) 7877 INITIALIZE_PASS_DEPENDENCY(ProfileSummaryInfoWrapperPass) 7878 INITIALIZE_PASS_DEPENDENCY(InjectTLIMappingsLegacy) 7879 INITIALIZE_PASS_END(LoopVectorize, LV_NAME, lv_name, false, false) 7880 7881 namespace llvm { 7882 7883 Pass *createLoopVectorizePass() { return new LoopVectorize(); } 7884 7885 Pass *createLoopVectorizePass(bool InterleaveOnlyWhenForced, 7886 bool VectorizeOnlyWhenForced) { 7887 return new LoopVectorize(InterleaveOnlyWhenForced, VectorizeOnlyWhenForced); 7888 } 7889 7890 } // end namespace llvm 7891 7892 bool LoopVectorizationCostModel::isConsecutiveLoadOrStore(Instruction *Inst) { 7893 // Check if the pointer operand of a load or store instruction is 7894 // consecutive. 7895 if (auto *Ptr = getLoadStorePointerOperand(Inst)) 7896 return Legal->isConsecutivePtr(Ptr); 7897 return false; 7898 } 7899 7900 void LoopVectorizationCostModel::collectValuesToIgnore() { 7901 // Ignore ephemeral values. 7902 CodeMetrics::collectEphemeralValues(TheLoop, AC, ValuesToIgnore); 7903 7904 // Ignore type-promoting instructions we identified during reduction 7905 // detection. 7906 for (auto &Reduction : Legal->getReductionVars()) { 7907 RecurrenceDescriptor &RedDes = Reduction.second; 7908 const SmallPtrSetImpl<Instruction *> &Casts = RedDes.getCastInsts(); 7909 VecValuesToIgnore.insert(Casts.begin(), Casts.end()); 7910 } 7911 // Ignore type-casting instructions we identified during induction 7912 // detection. 7913 for (auto &Induction : Legal->getInductionVars()) { 7914 InductionDescriptor &IndDes = Induction.second; 7915 const SmallVectorImpl<Instruction *> &Casts = IndDes.getCastInsts(); 7916 VecValuesToIgnore.insert(Casts.begin(), Casts.end()); 7917 } 7918 } 7919 7920 void LoopVectorizationCostModel::collectInLoopReductions() { 7921 for (auto &Reduction : Legal->getReductionVars()) { 7922 PHINode *Phi = Reduction.first; 7923 RecurrenceDescriptor &RdxDesc = Reduction.second; 7924 7925 // We don't collect reductions that are type promoted (yet). 7926 if (RdxDesc.getRecurrenceType() != Phi->getType()) 7927 continue; 7928 7929 // If the target would prefer this reduction to happen "in-loop", then we 7930 // want to record it as such. 7931 unsigned Opcode = RdxDesc.getOpcode(); 7932 if (!PreferInLoopReductions && !useOrderedReductions(RdxDesc) && 7933 !TTI.preferInLoopReduction(Opcode, Phi->getType(), 7934 TargetTransformInfo::ReductionFlags())) 7935 continue; 7936 7937 // Check that we can correctly put the reductions into the loop, by 7938 // finding the chain of operations that leads from the phi to the loop 7939 // exit value. 7940 SmallVector<Instruction *, 4> ReductionOperations = 7941 RdxDesc.getReductionOpChain(Phi, TheLoop); 7942 bool InLoop = !ReductionOperations.empty(); 7943 if (InLoop) { 7944 InLoopReductionChains[Phi] = ReductionOperations; 7945 // Add the elements to InLoopReductionImmediateChains for cost modelling. 7946 Instruction *LastChain = Phi; 7947 for (auto *I : ReductionOperations) { 7948 InLoopReductionImmediateChains[I] = LastChain; 7949 LastChain = I; 7950 } 7951 } 7952 LLVM_DEBUG(dbgs() << "LV: Using " << (InLoop ? "inloop" : "out of loop") 7953 << " reduction for phi: " << *Phi << "\n"); 7954 } 7955 } 7956 7957 // TODO: we could return a pair of values that specify the max VF and 7958 // min VF, to be used in `buildVPlans(MinVF, MaxVF)` instead of 7959 // `buildVPlans(VF, VF)`. We cannot do it because VPLAN at the moment 7960 // doesn't have a cost model that can choose which plan to execute if 7961 // more than one is generated. 7962 static unsigned determineVPlanVF(const unsigned WidestVectorRegBits, 7963 LoopVectorizationCostModel &CM) { 7964 unsigned WidestType; 7965 std::tie(std::ignore, WidestType) = CM.getSmallestAndWidestTypes(); 7966 return WidestVectorRegBits / WidestType; 7967 } 7968 7969 VectorizationFactor 7970 LoopVectorizationPlanner::planInVPlanNativePath(ElementCount UserVF) { 7971 assert(!UserVF.isScalable() && "scalable vectors not yet supported"); 7972 ElementCount VF = UserVF; 7973 // Outer loop handling: They may require CFG and instruction level 7974 // transformations before even evaluating whether vectorization is profitable. 7975 // Since we cannot modify the incoming IR, we need to build VPlan upfront in 7976 // the vectorization pipeline. 7977 if (!OrigLoop->isInnermost()) { 7978 // If the user doesn't provide a vectorization factor, determine a 7979 // reasonable one. 7980 if (UserVF.isZero()) { 7981 VF = ElementCount::getFixed(determineVPlanVF( 7982 TTI->getRegisterBitWidth(TargetTransformInfo::RGK_FixedWidthVector) 7983 .getFixedSize(), 7984 CM)); 7985 LLVM_DEBUG(dbgs() << "LV: VPlan computed VF " << VF << ".\n"); 7986 7987 // Make sure we have a VF > 1 for stress testing. 7988 if (VPlanBuildStressTest && (VF.isScalar() || VF.isZero())) { 7989 LLVM_DEBUG(dbgs() << "LV: VPlan stress testing: " 7990 << "overriding computed VF.\n"); 7991 VF = ElementCount::getFixed(4); 7992 } 7993 } 7994 assert(EnableVPlanNativePath && "VPlan-native path is not enabled."); 7995 assert(isPowerOf2_32(VF.getKnownMinValue()) && 7996 "VF needs to be a power of two"); 7997 LLVM_DEBUG(dbgs() << "LV: Using " << (!UserVF.isZero() ? "user " : "") 7998 << "VF " << VF << " to build VPlans.\n"); 7999 buildVPlans(VF, VF); 8000 8001 // For VPlan build stress testing, we bail out after VPlan construction. 8002 if (VPlanBuildStressTest) 8003 return VectorizationFactor::Disabled(); 8004 8005 return {VF, 0 /*Cost*/}; 8006 } 8007 8008 LLVM_DEBUG( 8009 dbgs() << "LV: Not vectorizing. Inner loops aren't supported in the " 8010 "VPlan-native path.\n"); 8011 return VectorizationFactor::Disabled(); 8012 } 8013 8014 Optional<VectorizationFactor> 8015 LoopVectorizationPlanner::plan(ElementCount UserVF, unsigned UserIC) { 8016 assert(OrigLoop->isInnermost() && "Inner loop expected."); 8017 FixedScalableVFPair MaxFactors = CM.computeMaxVF(UserVF, UserIC); 8018 if (!MaxFactors) // Cases that should not to be vectorized nor interleaved. 8019 return None; 8020 8021 // Invalidate interleave groups if all blocks of loop will be predicated. 8022 if (CM.blockNeedsPredication(OrigLoop->getHeader()) && 8023 !useMaskedInterleavedAccesses(*TTI)) { 8024 LLVM_DEBUG( 8025 dbgs() 8026 << "LV: Invalidate all interleaved groups due to fold-tail by masking " 8027 "which requires masked-interleaved support.\n"); 8028 if (CM.InterleaveInfo.invalidateGroups()) 8029 // Invalidating interleave groups also requires invalidating all decisions 8030 // based on them, which includes widening decisions and uniform and scalar 8031 // values. 8032 CM.invalidateCostModelingDecisions(); 8033 } 8034 8035 ElementCount MaxUserVF = 8036 UserVF.isScalable() ? MaxFactors.ScalableVF : MaxFactors.FixedVF; 8037 bool UserVFIsLegal = ElementCount::isKnownLE(UserVF, MaxUserVF); 8038 if (!UserVF.isZero() && UserVFIsLegal) { 8039 assert(isPowerOf2_32(UserVF.getKnownMinValue()) && 8040 "VF needs to be a power of two"); 8041 // Collect the instructions (and their associated costs) that will be more 8042 // profitable to scalarize. 8043 if (CM.selectUserVectorizationFactor(UserVF)) { 8044 LLVM_DEBUG(dbgs() << "LV: Using user VF " << UserVF << ".\n"); 8045 CM.collectInLoopReductions(); 8046 buildVPlansWithVPRecipes(UserVF, UserVF); 8047 LLVM_DEBUG(printPlans(dbgs())); 8048 return {{UserVF, 0}}; 8049 } else 8050 reportVectorizationInfo("UserVF ignored because of invalid costs.", 8051 "InvalidCost", ORE, OrigLoop); 8052 } 8053 8054 // Populate the set of Vectorization Factor Candidates. 8055 ElementCountSet VFCandidates; 8056 for (auto VF = ElementCount::getFixed(1); 8057 ElementCount::isKnownLE(VF, MaxFactors.FixedVF); VF *= 2) 8058 VFCandidates.insert(VF); 8059 for (auto VF = ElementCount::getScalable(1); 8060 ElementCount::isKnownLE(VF, MaxFactors.ScalableVF); VF *= 2) 8061 VFCandidates.insert(VF); 8062 8063 for (const auto &VF : VFCandidates) { 8064 // Collect Uniform and Scalar instructions after vectorization with VF. 8065 CM.collectUniformsAndScalars(VF); 8066 8067 // Collect the instructions (and their associated costs) that will be more 8068 // profitable to scalarize. 8069 if (VF.isVector()) 8070 CM.collectInstsToScalarize(VF); 8071 } 8072 8073 CM.collectInLoopReductions(); 8074 buildVPlansWithVPRecipes(ElementCount::getFixed(1), MaxFactors.FixedVF); 8075 buildVPlansWithVPRecipes(ElementCount::getScalable(1), MaxFactors.ScalableVF); 8076 8077 LLVM_DEBUG(printPlans(dbgs())); 8078 if (!MaxFactors.hasVector()) 8079 return VectorizationFactor::Disabled(); 8080 8081 // Select the optimal vectorization factor. 8082 auto SelectedVF = CM.selectVectorizationFactor(VFCandidates); 8083 8084 // Check if it is profitable to vectorize with runtime checks. 8085 unsigned NumRuntimePointerChecks = Requirements.getNumRuntimePointerChecks(); 8086 if (SelectedVF.Width.getKnownMinValue() > 1 && NumRuntimePointerChecks) { 8087 bool PragmaThresholdReached = 8088 NumRuntimePointerChecks > PragmaVectorizeMemoryCheckThreshold; 8089 bool ThresholdReached = 8090 NumRuntimePointerChecks > VectorizerParams::RuntimeMemoryCheckThreshold; 8091 if ((ThresholdReached && !Hints.allowReordering()) || 8092 PragmaThresholdReached) { 8093 ORE->emit([&]() { 8094 return OptimizationRemarkAnalysisAliasing( 8095 DEBUG_TYPE, "CantReorderMemOps", OrigLoop->getStartLoc(), 8096 OrigLoop->getHeader()) 8097 << "loop not vectorized: cannot prove it is safe to reorder " 8098 "memory operations"; 8099 }); 8100 LLVM_DEBUG(dbgs() << "LV: Too many memory checks needed.\n"); 8101 Hints.emitRemarkWithHints(); 8102 return VectorizationFactor::Disabled(); 8103 } 8104 } 8105 return SelectedVF; 8106 } 8107 8108 void LoopVectorizationPlanner::setBestPlan(ElementCount VF, unsigned UF) { 8109 LLVM_DEBUG(dbgs() << "Setting best plan to VF=" << VF << ", UF=" << UF 8110 << '\n'); 8111 BestVF = VF; 8112 BestUF = UF; 8113 8114 erase_if(VPlans, [VF](const VPlanPtr &Plan) { 8115 return !Plan->hasVF(VF); 8116 }); 8117 assert(VPlans.size() == 1 && "Best VF has not a single VPlan."); 8118 } 8119 8120 void LoopVectorizationPlanner::executePlan(InnerLoopVectorizer &ILV, 8121 DominatorTree *DT) { 8122 // Perform the actual loop transformation. 8123 8124 // 1. Create a new empty loop. Unlink the old loop and connect the new one. 8125 assert(BestVF.hasValue() && "Vectorization Factor is missing"); 8126 assert(VPlans.size() == 1 && "Not a single VPlan to execute."); 8127 8128 VPTransformState State{ 8129 *BestVF, BestUF, LI, DT, ILV.Builder, &ILV, VPlans.front().get()}; 8130 State.CFG.PrevBB = ILV.createVectorizedLoopSkeleton(); 8131 State.TripCount = ILV.getOrCreateTripCount(nullptr); 8132 State.CanonicalIV = ILV.Induction; 8133 8134 ILV.printDebugTracesAtStart(); 8135 8136 //===------------------------------------------------===// 8137 // 8138 // Notice: any optimization or new instruction that go 8139 // into the code below should also be implemented in 8140 // the cost-model. 8141 // 8142 //===------------------------------------------------===// 8143 8144 // 2. Copy and widen instructions from the old loop into the new loop. 8145 VPlans.front()->execute(&State); 8146 8147 // 3. Fix the vectorized code: take care of header phi's, live-outs, 8148 // predication, updating analyses. 8149 ILV.fixVectorizedLoop(State); 8150 8151 ILV.printDebugTracesAtEnd(); 8152 } 8153 8154 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 8155 void LoopVectorizationPlanner::printPlans(raw_ostream &O) { 8156 for (const auto &Plan : VPlans) 8157 if (PrintVPlansInDotFormat) 8158 Plan->printDOT(O); 8159 else 8160 Plan->print(O); 8161 } 8162 #endif 8163 8164 void LoopVectorizationPlanner::collectTriviallyDeadInstructions( 8165 SmallPtrSetImpl<Instruction *> &DeadInstructions) { 8166 8167 // We create new control-flow for the vectorized loop, so the original exit 8168 // conditions will be dead after vectorization if it's only used by the 8169 // terminator 8170 SmallVector<BasicBlock*> ExitingBlocks; 8171 OrigLoop->getExitingBlocks(ExitingBlocks); 8172 for (auto *BB : ExitingBlocks) { 8173 auto *Cmp = dyn_cast<Instruction>(BB->getTerminator()->getOperand(0)); 8174 if (!Cmp || !Cmp->hasOneUse()) 8175 continue; 8176 8177 // TODO: we should introduce a getUniqueExitingBlocks on Loop 8178 if (!DeadInstructions.insert(Cmp).second) 8179 continue; 8180 8181 // The operands of the icmp is often a dead trunc, used by IndUpdate. 8182 // TODO: can recurse through operands in general 8183 for (Value *Op : Cmp->operands()) { 8184 if (isa<TruncInst>(Op) && Op->hasOneUse()) 8185 DeadInstructions.insert(cast<Instruction>(Op)); 8186 } 8187 } 8188 8189 // We create new "steps" for induction variable updates to which the original 8190 // induction variables map. An original update instruction will be dead if 8191 // all its users except the induction variable are dead. 8192 auto *Latch = OrigLoop->getLoopLatch(); 8193 for (auto &Induction : Legal->getInductionVars()) { 8194 PHINode *Ind = Induction.first; 8195 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 8196 8197 // If the tail is to be folded by masking, the primary induction variable, 8198 // if exists, isn't dead: it will be used for masking. Don't kill it. 8199 if (CM.foldTailByMasking() && IndUpdate == Legal->getPrimaryInduction()) 8200 continue; 8201 8202 if (llvm::all_of(IndUpdate->users(), [&](User *U) -> bool { 8203 return U == Ind || DeadInstructions.count(cast<Instruction>(U)); 8204 })) 8205 DeadInstructions.insert(IndUpdate); 8206 8207 // We record as "Dead" also the type-casting instructions we had identified 8208 // during induction analysis. We don't need any handling for them in the 8209 // vectorized loop because we have proven that, under a proper runtime 8210 // test guarding the vectorized loop, the value of the phi, and the casted 8211 // value of the phi, are the same. The last instruction in this casting chain 8212 // will get its scalar/vector/widened def from the scalar/vector/widened def 8213 // of the respective phi node. Any other casts in the induction def-use chain 8214 // have no other uses outside the phi update chain, and will be ignored. 8215 InductionDescriptor &IndDes = Induction.second; 8216 const SmallVectorImpl<Instruction *> &Casts = IndDes.getCastInsts(); 8217 DeadInstructions.insert(Casts.begin(), Casts.end()); 8218 } 8219 } 8220 8221 Value *InnerLoopUnroller::reverseVector(Value *Vec) { return Vec; } 8222 8223 Value *InnerLoopUnroller::getBroadcastInstrs(Value *V) { return V; } 8224 8225 Value *InnerLoopUnroller::getStepVector(Value *Val, int StartIdx, Value *Step, 8226 Instruction::BinaryOps BinOp) { 8227 // When unrolling and the VF is 1, we only need to add a simple scalar. 8228 Type *Ty = Val->getType(); 8229 assert(!Ty->isVectorTy() && "Val must be a scalar"); 8230 8231 if (Ty->isFloatingPointTy()) { 8232 Constant *C = ConstantFP::get(Ty, (double)StartIdx); 8233 8234 // Floating-point operations inherit FMF via the builder's flags. 8235 Value *MulOp = Builder.CreateFMul(C, Step); 8236 return Builder.CreateBinOp(BinOp, Val, MulOp); 8237 } 8238 Constant *C = ConstantInt::get(Ty, StartIdx); 8239 return Builder.CreateAdd(Val, Builder.CreateMul(C, Step), "induction"); 8240 } 8241 8242 static void AddRuntimeUnrollDisableMetaData(Loop *L) { 8243 SmallVector<Metadata *, 4> MDs; 8244 // Reserve first location for self reference to the LoopID metadata node. 8245 MDs.push_back(nullptr); 8246 bool IsUnrollMetadata = false; 8247 MDNode *LoopID = L->getLoopID(); 8248 if (LoopID) { 8249 // First find existing loop unrolling disable metadata. 8250 for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) { 8251 auto *MD = dyn_cast<MDNode>(LoopID->getOperand(i)); 8252 if (MD) { 8253 const auto *S = dyn_cast<MDString>(MD->getOperand(0)); 8254 IsUnrollMetadata = 8255 S && S->getString().startswith("llvm.loop.unroll.disable"); 8256 } 8257 MDs.push_back(LoopID->getOperand(i)); 8258 } 8259 } 8260 8261 if (!IsUnrollMetadata) { 8262 // Add runtime unroll disable metadata. 8263 LLVMContext &Context = L->getHeader()->getContext(); 8264 SmallVector<Metadata *, 1> DisableOperands; 8265 DisableOperands.push_back( 8266 MDString::get(Context, "llvm.loop.unroll.runtime.disable")); 8267 MDNode *DisableNode = MDNode::get(Context, DisableOperands); 8268 MDs.push_back(DisableNode); 8269 MDNode *NewLoopID = MDNode::get(Context, MDs); 8270 // Set operand 0 to refer to the loop id itself. 8271 NewLoopID->replaceOperandWith(0, NewLoopID); 8272 L->setLoopID(NewLoopID); 8273 } 8274 } 8275 8276 //===--------------------------------------------------------------------===// 8277 // EpilogueVectorizerMainLoop 8278 //===--------------------------------------------------------------------===// 8279 8280 /// This function is partially responsible for generating the control flow 8281 /// depicted in https://llvm.org/docs/Vectorizers.html#epilogue-vectorization. 8282 BasicBlock *EpilogueVectorizerMainLoop::createEpilogueVectorizedLoopSkeleton() { 8283 MDNode *OrigLoopID = OrigLoop->getLoopID(); 8284 Loop *Lp = createVectorLoopSkeleton(""); 8285 8286 // Generate the code to check the minimum iteration count of the vector 8287 // epilogue (see below). 8288 EPI.EpilogueIterationCountCheck = 8289 emitMinimumIterationCountCheck(Lp, LoopScalarPreHeader, true); 8290 EPI.EpilogueIterationCountCheck->setName("iter.check"); 8291 8292 // Generate the code to check any assumptions that we've made for SCEV 8293 // expressions. 8294 EPI.SCEVSafetyCheck = emitSCEVChecks(Lp, LoopScalarPreHeader); 8295 8296 // Generate the code that checks at runtime if arrays overlap. We put the 8297 // checks into a separate block to make the more common case of few elements 8298 // faster. 8299 EPI.MemSafetyCheck = emitMemRuntimeChecks(Lp, LoopScalarPreHeader); 8300 8301 // Generate the iteration count check for the main loop, *after* the check 8302 // for the epilogue loop, so that the path-length is shorter for the case 8303 // that goes directly through the vector epilogue. The longer-path length for 8304 // the main loop is compensated for, by the gain from vectorizing the larger 8305 // trip count. Note: the branch will get updated later on when we vectorize 8306 // the epilogue. 8307 EPI.MainLoopIterationCountCheck = 8308 emitMinimumIterationCountCheck(Lp, LoopScalarPreHeader, false); 8309 8310 // Generate the induction variable. 8311 OldInduction = Legal->getPrimaryInduction(); 8312 Type *IdxTy = Legal->getWidestInductionType(); 8313 Value *StartIdx = ConstantInt::get(IdxTy, 0); 8314 Constant *Step = ConstantInt::get(IdxTy, VF.getKnownMinValue() * UF); 8315 Value *CountRoundDown = getOrCreateVectorTripCount(Lp); 8316 EPI.VectorTripCount = CountRoundDown; 8317 Induction = 8318 createInductionVariable(Lp, StartIdx, CountRoundDown, Step, 8319 getDebugLocFromInstOrOperands(OldInduction)); 8320 8321 // Skip induction resume value creation here because they will be created in 8322 // the second pass. If we created them here, they wouldn't be used anyway, 8323 // because the vplan in the second pass still contains the inductions from the 8324 // original loop. 8325 8326 return completeLoopSkeleton(Lp, OrigLoopID); 8327 } 8328 8329 void EpilogueVectorizerMainLoop::printDebugTracesAtStart() { 8330 LLVM_DEBUG({ 8331 dbgs() << "Create Skeleton for epilogue vectorized loop (first pass)\n" 8332 << "Main Loop VF:" << EPI.MainLoopVF.getKnownMinValue() 8333 << ", Main Loop UF:" << EPI.MainLoopUF 8334 << ", Epilogue Loop VF:" << EPI.EpilogueVF.getKnownMinValue() 8335 << ", Epilogue Loop UF:" << EPI.EpilogueUF << "\n"; 8336 }); 8337 } 8338 8339 void EpilogueVectorizerMainLoop::printDebugTracesAtEnd() { 8340 DEBUG_WITH_TYPE(VerboseDebug, { 8341 dbgs() << "intermediate fn:\n" << *Induction->getFunction() << "\n"; 8342 }); 8343 } 8344 8345 BasicBlock *EpilogueVectorizerMainLoop::emitMinimumIterationCountCheck( 8346 Loop *L, BasicBlock *Bypass, bool ForEpilogue) { 8347 assert(L && "Expected valid Loop."); 8348 assert(Bypass && "Expected valid bypass basic block."); 8349 unsigned VFactor = 8350 ForEpilogue ? EPI.EpilogueVF.getKnownMinValue() : VF.getKnownMinValue(); 8351 unsigned UFactor = ForEpilogue ? EPI.EpilogueUF : UF; 8352 Value *Count = getOrCreateTripCount(L); 8353 // Reuse existing vector loop preheader for TC checks. 8354 // Note that new preheader block is generated for vector loop. 8355 BasicBlock *const TCCheckBlock = LoopVectorPreHeader; 8356 IRBuilder<> Builder(TCCheckBlock->getTerminator()); 8357 8358 // Generate code to check if the loop's trip count is less than VF * UF of the 8359 // main vector loop. 8360 auto P = Cost->requiresScalarEpilogue(ForEpilogue ? EPI.EpilogueVF : VF) ? 8361 ICmpInst::ICMP_ULE : ICmpInst::ICMP_ULT; 8362 8363 Value *CheckMinIters = Builder.CreateICmp( 8364 P, Count, ConstantInt::get(Count->getType(), VFactor * UFactor), 8365 "min.iters.check"); 8366 8367 if (!ForEpilogue) 8368 TCCheckBlock->setName("vector.main.loop.iter.check"); 8369 8370 // Create new preheader for vector loop. 8371 LoopVectorPreHeader = SplitBlock(TCCheckBlock, TCCheckBlock->getTerminator(), 8372 DT, LI, nullptr, "vector.ph"); 8373 8374 if (ForEpilogue) { 8375 assert(DT->properlyDominates(DT->getNode(TCCheckBlock), 8376 DT->getNode(Bypass)->getIDom()) && 8377 "TC check is expected to dominate Bypass"); 8378 8379 // Update dominator for Bypass & LoopExit. 8380 DT->changeImmediateDominator(Bypass, TCCheckBlock); 8381 if (!Cost->requiresScalarEpilogue(EPI.EpilogueVF)) 8382 // For loops with multiple exits, there's no edge from the middle block 8383 // to exit blocks (as the epilogue must run) and thus no need to update 8384 // the immediate dominator of the exit blocks. 8385 DT->changeImmediateDominator(LoopExitBlock, TCCheckBlock); 8386 8387 LoopBypassBlocks.push_back(TCCheckBlock); 8388 8389 // Save the trip count so we don't have to regenerate it in the 8390 // vec.epilog.iter.check. This is safe to do because the trip count 8391 // generated here dominates the vector epilog iter check. 8392 EPI.TripCount = Count; 8393 } 8394 8395 ReplaceInstWithInst( 8396 TCCheckBlock->getTerminator(), 8397 BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters)); 8398 8399 return TCCheckBlock; 8400 } 8401 8402 //===--------------------------------------------------------------------===// 8403 // EpilogueVectorizerEpilogueLoop 8404 //===--------------------------------------------------------------------===// 8405 8406 /// This function is partially responsible for generating the control flow 8407 /// depicted in https://llvm.org/docs/Vectorizers.html#epilogue-vectorization. 8408 BasicBlock * 8409 EpilogueVectorizerEpilogueLoop::createEpilogueVectorizedLoopSkeleton() { 8410 MDNode *OrigLoopID = OrigLoop->getLoopID(); 8411 Loop *Lp = createVectorLoopSkeleton("vec.epilog."); 8412 8413 // Now, compare the remaining count and if there aren't enough iterations to 8414 // execute the vectorized epilogue skip to the scalar part. 8415 BasicBlock *VecEpilogueIterationCountCheck = LoopVectorPreHeader; 8416 VecEpilogueIterationCountCheck->setName("vec.epilog.iter.check"); 8417 LoopVectorPreHeader = 8418 SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT, 8419 LI, nullptr, "vec.epilog.ph"); 8420 emitMinimumVectorEpilogueIterCountCheck(Lp, LoopScalarPreHeader, 8421 VecEpilogueIterationCountCheck); 8422 8423 // Adjust the control flow taking the state info from the main loop 8424 // vectorization into account. 8425 assert(EPI.MainLoopIterationCountCheck && EPI.EpilogueIterationCountCheck && 8426 "expected this to be saved from the previous pass."); 8427 EPI.MainLoopIterationCountCheck->getTerminator()->replaceUsesOfWith( 8428 VecEpilogueIterationCountCheck, LoopVectorPreHeader); 8429 8430 DT->changeImmediateDominator(LoopVectorPreHeader, 8431 EPI.MainLoopIterationCountCheck); 8432 8433 EPI.EpilogueIterationCountCheck->getTerminator()->replaceUsesOfWith( 8434 VecEpilogueIterationCountCheck, LoopScalarPreHeader); 8435 8436 if (EPI.SCEVSafetyCheck) 8437 EPI.SCEVSafetyCheck->getTerminator()->replaceUsesOfWith( 8438 VecEpilogueIterationCountCheck, LoopScalarPreHeader); 8439 if (EPI.MemSafetyCheck) 8440 EPI.MemSafetyCheck->getTerminator()->replaceUsesOfWith( 8441 VecEpilogueIterationCountCheck, LoopScalarPreHeader); 8442 8443 DT->changeImmediateDominator( 8444 VecEpilogueIterationCountCheck, 8445 VecEpilogueIterationCountCheck->getSinglePredecessor()); 8446 8447 DT->changeImmediateDominator(LoopScalarPreHeader, 8448 EPI.EpilogueIterationCountCheck); 8449 if (!Cost->requiresScalarEpilogue(EPI.EpilogueVF)) 8450 // If there is an epilogue which must run, there's no edge from the 8451 // middle block to exit blocks and thus no need to update the immediate 8452 // dominator of the exit blocks. 8453 DT->changeImmediateDominator(LoopExitBlock, 8454 EPI.EpilogueIterationCountCheck); 8455 8456 // Keep track of bypass blocks, as they feed start values to the induction 8457 // phis in the scalar loop preheader. 8458 if (EPI.SCEVSafetyCheck) 8459 LoopBypassBlocks.push_back(EPI.SCEVSafetyCheck); 8460 if (EPI.MemSafetyCheck) 8461 LoopBypassBlocks.push_back(EPI.MemSafetyCheck); 8462 LoopBypassBlocks.push_back(EPI.EpilogueIterationCountCheck); 8463 8464 // Generate a resume induction for the vector epilogue and put it in the 8465 // vector epilogue preheader 8466 Type *IdxTy = Legal->getWidestInductionType(); 8467 PHINode *EPResumeVal = PHINode::Create(IdxTy, 2, "vec.epilog.resume.val", 8468 LoopVectorPreHeader->getFirstNonPHI()); 8469 EPResumeVal->addIncoming(EPI.VectorTripCount, VecEpilogueIterationCountCheck); 8470 EPResumeVal->addIncoming(ConstantInt::get(IdxTy, 0), 8471 EPI.MainLoopIterationCountCheck); 8472 8473 // Generate the induction variable. 8474 OldInduction = Legal->getPrimaryInduction(); 8475 Value *CountRoundDown = getOrCreateVectorTripCount(Lp); 8476 Constant *Step = ConstantInt::get(IdxTy, VF.getKnownMinValue() * UF); 8477 Value *StartIdx = EPResumeVal; 8478 Induction = 8479 createInductionVariable(Lp, StartIdx, CountRoundDown, Step, 8480 getDebugLocFromInstOrOperands(OldInduction)); 8481 8482 // Generate induction resume values. These variables save the new starting 8483 // indexes for the scalar loop. They are used to test if there are any tail 8484 // iterations left once the vector loop has completed. 8485 // Note that when the vectorized epilogue is skipped due to iteration count 8486 // check, then the resume value for the induction variable comes from 8487 // the trip count of the main vector loop, hence passing the AdditionalBypass 8488 // argument. 8489 createInductionResumeValues(Lp, CountRoundDown, 8490 {VecEpilogueIterationCountCheck, 8491 EPI.VectorTripCount} /* AdditionalBypass */); 8492 8493 AddRuntimeUnrollDisableMetaData(Lp); 8494 return completeLoopSkeleton(Lp, OrigLoopID); 8495 } 8496 8497 BasicBlock * 8498 EpilogueVectorizerEpilogueLoop::emitMinimumVectorEpilogueIterCountCheck( 8499 Loop *L, BasicBlock *Bypass, BasicBlock *Insert) { 8500 8501 assert(EPI.TripCount && 8502 "Expected trip count to have been safed in the first pass."); 8503 assert( 8504 (!isa<Instruction>(EPI.TripCount) || 8505 DT->dominates(cast<Instruction>(EPI.TripCount)->getParent(), Insert)) && 8506 "saved trip count does not dominate insertion point."); 8507 Value *TC = EPI.TripCount; 8508 IRBuilder<> Builder(Insert->getTerminator()); 8509 Value *Count = Builder.CreateSub(TC, EPI.VectorTripCount, "n.vec.remaining"); 8510 8511 // Generate code to check if the loop's trip count is less than VF * UF of the 8512 // vector epilogue loop. 8513 auto P = Cost->requiresScalarEpilogue(EPI.EpilogueVF) ? 8514 ICmpInst::ICMP_ULE : ICmpInst::ICMP_ULT; 8515 8516 Value *CheckMinIters = Builder.CreateICmp( 8517 P, Count, 8518 ConstantInt::get(Count->getType(), 8519 EPI.EpilogueVF.getKnownMinValue() * EPI.EpilogueUF), 8520 "min.epilog.iters.check"); 8521 8522 ReplaceInstWithInst( 8523 Insert->getTerminator(), 8524 BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters)); 8525 8526 LoopBypassBlocks.push_back(Insert); 8527 return Insert; 8528 } 8529 8530 void EpilogueVectorizerEpilogueLoop::printDebugTracesAtStart() { 8531 LLVM_DEBUG({ 8532 dbgs() << "Create Skeleton for epilogue vectorized loop (second pass)\n" 8533 << "Epilogue Loop VF:" << EPI.EpilogueVF.getKnownMinValue() 8534 << ", Epilogue Loop UF:" << EPI.EpilogueUF << "\n"; 8535 }); 8536 } 8537 8538 void EpilogueVectorizerEpilogueLoop::printDebugTracesAtEnd() { 8539 DEBUG_WITH_TYPE(VerboseDebug, { 8540 dbgs() << "final fn:\n" << *Induction->getFunction() << "\n"; 8541 }); 8542 } 8543 8544 bool LoopVectorizationPlanner::getDecisionAndClampRange( 8545 const std::function<bool(ElementCount)> &Predicate, VFRange &Range) { 8546 assert(!Range.isEmpty() && "Trying to test an empty VF range."); 8547 bool PredicateAtRangeStart = Predicate(Range.Start); 8548 8549 for (ElementCount TmpVF = Range.Start * 2; 8550 ElementCount::isKnownLT(TmpVF, Range.End); TmpVF *= 2) 8551 if (Predicate(TmpVF) != PredicateAtRangeStart) { 8552 Range.End = TmpVF; 8553 break; 8554 } 8555 8556 return PredicateAtRangeStart; 8557 } 8558 8559 /// Build VPlans for the full range of feasible VF's = {\p MinVF, 2 * \p MinVF, 8560 /// 4 * \p MinVF, ..., \p MaxVF} by repeatedly building a VPlan for a sub-range 8561 /// of VF's starting at a given VF and extending it as much as possible. Each 8562 /// vectorization decision can potentially shorten this sub-range during 8563 /// buildVPlan(). 8564 void LoopVectorizationPlanner::buildVPlans(ElementCount MinVF, 8565 ElementCount MaxVF) { 8566 auto MaxVFPlusOne = MaxVF.getWithIncrement(1); 8567 for (ElementCount VF = MinVF; ElementCount::isKnownLT(VF, MaxVFPlusOne);) { 8568 VFRange SubRange = {VF, MaxVFPlusOne}; 8569 VPlans.push_back(buildVPlan(SubRange)); 8570 VF = SubRange.End; 8571 } 8572 } 8573 8574 VPValue *VPRecipeBuilder::createEdgeMask(BasicBlock *Src, BasicBlock *Dst, 8575 VPlanPtr &Plan) { 8576 assert(is_contained(predecessors(Dst), Src) && "Invalid edge"); 8577 8578 // Look for cached value. 8579 std::pair<BasicBlock *, BasicBlock *> Edge(Src, Dst); 8580 EdgeMaskCacheTy::iterator ECEntryIt = EdgeMaskCache.find(Edge); 8581 if (ECEntryIt != EdgeMaskCache.end()) 8582 return ECEntryIt->second; 8583 8584 VPValue *SrcMask = createBlockInMask(Src, Plan); 8585 8586 // The terminator has to be a branch inst! 8587 BranchInst *BI = dyn_cast<BranchInst>(Src->getTerminator()); 8588 assert(BI && "Unexpected terminator found"); 8589 8590 if (!BI->isConditional() || BI->getSuccessor(0) == BI->getSuccessor(1)) 8591 return EdgeMaskCache[Edge] = SrcMask; 8592 8593 // If source is an exiting block, we know the exit edge is dynamically dead 8594 // in the vector loop, and thus we don't need to restrict the mask. Avoid 8595 // adding uses of an otherwise potentially dead instruction. 8596 if (OrigLoop->isLoopExiting(Src)) 8597 return EdgeMaskCache[Edge] = SrcMask; 8598 8599 VPValue *EdgeMask = Plan->getOrAddVPValue(BI->getCondition()); 8600 assert(EdgeMask && "No Edge Mask found for condition"); 8601 8602 if (BI->getSuccessor(0) != Dst) 8603 EdgeMask = Builder.createNot(EdgeMask); 8604 8605 if (SrcMask) { // Otherwise block in-mask is all-one, no need to AND. 8606 // The condition is 'SrcMask && EdgeMask', which is equivalent to 8607 // 'select i1 SrcMask, i1 EdgeMask, i1 false'. 8608 // The select version does not introduce new UB if SrcMask is false and 8609 // EdgeMask is poison. Using 'and' here introduces undefined behavior. 8610 VPValue *False = Plan->getOrAddVPValue( 8611 ConstantInt::getFalse(BI->getCondition()->getType())); 8612 EdgeMask = Builder.createSelect(SrcMask, EdgeMask, False); 8613 } 8614 8615 return EdgeMaskCache[Edge] = EdgeMask; 8616 } 8617 8618 VPValue *VPRecipeBuilder::createBlockInMask(BasicBlock *BB, VPlanPtr &Plan) { 8619 assert(OrigLoop->contains(BB) && "Block is not a part of a loop"); 8620 8621 // Look for cached value. 8622 BlockMaskCacheTy::iterator BCEntryIt = BlockMaskCache.find(BB); 8623 if (BCEntryIt != BlockMaskCache.end()) 8624 return BCEntryIt->second; 8625 8626 // All-one mask is modelled as no-mask following the convention for masked 8627 // load/store/gather/scatter. Initialize BlockMask to no-mask. 8628 VPValue *BlockMask = nullptr; 8629 8630 if (OrigLoop->getHeader() == BB) { 8631 if (!CM.blockNeedsPredication(BB)) 8632 return BlockMaskCache[BB] = BlockMask; // Loop incoming mask is all-one. 8633 8634 // Create the block in mask as the first non-phi instruction in the block. 8635 VPBuilder::InsertPointGuard Guard(Builder); 8636 auto NewInsertionPoint = Builder.getInsertBlock()->getFirstNonPhi(); 8637 Builder.setInsertPoint(Builder.getInsertBlock(), NewInsertionPoint); 8638 8639 // Introduce the early-exit compare IV <= BTC to form header block mask. 8640 // This is used instead of IV < TC because TC may wrap, unlike BTC. 8641 // Start by constructing the desired canonical IV. 8642 VPValue *IV = nullptr; 8643 if (Legal->getPrimaryInduction()) 8644 IV = Plan->getOrAddVPValue(Legal->getPrimaryInduction()); 8645 else { 8646 auto IVRecipe = new VPWidenCanonicalIVRecipe(); 8647 Builder.getInsertBlock()->insert(IVRecipe, NewInsertionPoint); 8648 IV = IVRecipe->getVPSingleValue(); 8649 } 8650 VPValue *BTC = Plan->getOrCreateBackedgeTakenCount(); 8651 bool TailFolded = !CM.isScalarEpilogueAllowed(); 8652 8653 if (TailFolded && CM.TTI.emitGetActiveLaneMask()) { 8654 // While ActiveLaneMask is a binary op that consumes the loop tripcount 8655 // as a second argument, we only pass the IV here and extract the 8656 // tripcount from the transform state where codegen of the VP instructions 8657 // happen. 8658 BlockMask = Builder.createNaryOp(VPInstruction::ActiveLaneMask, {IV}); 8659 } else { 8660 BlockMask = Builder.createNaryOp(VPInstruction::ICmpULE, {IV, BTC}); 8661 } 8662 return BlockMaskCache[BB] = BlockMask; 8663 } 8664 8665 // This is the block mask. We OR all incoming edges. 8666 for (auto *Predecessor : predecessors(BB)) { 8667 VPValue *EdgeMask = createEdgeMask(Predecessor, BB, Plan); 8668 if (!EdgeMask) // Mask of predecessor is all-one so mask of block is too. 8669 return BlockMaskCache[BB] = EdgeMask; 8670 8671 if (!BlockMask) { // BlockMask has its initialized nullptr value. 8672 BlockMask = EdgeMask; 8673 continue; 8674 } 8675 8676 BlockMask = Builder.createOr(BlockMask, EdgeMask); 8677 } 8678 8679 return BlockMaskCache[BB] = BlockMask; 8680 } 8681 8682 VPRecipeBase *VPRecipeBuilder::tryToWidenMemory(Instruction *I, 8683 ArrayRef<VPValue *> Operands, 8684 VFRange &Range, 8685 VPlanPtr &Plan) { 8686 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) && 8687 "Must be called with either a load or store"); 8688 8689 auto willWiden = [&](ElementCount VF) -> bool { 8690 if (VF.isScalar()) 8691 return false; 8692 LoopVectorizationCostModel::InstWidening Decision = 8693 CM.getWideningDecision(I, VF); 8694 assert(Decision != LoopVectorizationCostModel::CM_Unknown && 8695 "CM decision should be taken at this point."); 8696 if (Decision == LoopVectorizationCostModel::CM_Interleave) 8697 return true; 8698 if (CM.isScalarAfterVectorization(I, VF) || 8699 CM.isProfitableToScalarize(I, VF)) 8700 return false; 8701 return Decision != LoopVectorizationCostModel::CM_Scalarize; 8702 }; 8703 8704 if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range)) 8705 return nullptr; 8706 8707 VPValue *Mask = nullptr; 8708 if (Legal->isMaskRequired(I)) 8709 Mask = createBlockInMask(I->getParent(), Plan); 8710 8711 if (LoadInst *Load = dyn_cast<LoadInst>(I)) 8712 return new VPWidenMemoryInstructionRecipe(*Load, Operands[0], Mask); 8713 8714 StoreInst *Store = cast<StoreInst>(I); 8715 return new VPWidenMemoryInstructionRecipe(*Store, Operands[1], Operands[0], 8716 Mask); 8717 } 8718 8719 VPWidenIntOrFpInductionRecipe * 8720 VPRecipeBuilder::tryToOptimizeInductionPHI(PHINode *Phi, 8721 ArrayRef<VPValue *> Operands) const { 8722 // Check if this is an integer or fp induction. If so, build the recipe that 8723 // produces its scalar and vector values. 8724 InductionDescriptor II = Legal->getInductionVars().lookup(Phi); 8725 if (II.getKind() == InductionDescriptor::IK_IntInduction || 8726 II.getKind() == InductionDescriptor::IK_FpInduction) { 8727 assert(II.getStartValue() == 8728 Phi->getIncomingValueForBlock(OrigLoop->getLoopPreheader())); 8729 const SmallVectorImpl<Instruction *> &Casts = II.getCastInsts(); 8730 return new VPWidenIntOrFpInductionRecipe( 8731 Phi, Operands[0], Casts.empty() ? nullptr : Casts.front()); 8732 } 8733 8734 return nullptr; 8735 } 8736 8737 VPWidenIntOrFpInductionRecipe *VPRecipeBuilder::tryToOptimizeInductionTruncate( 8738 TruncInst *I, ArrayRef<VPValue *> Operands, VFRange &Range, 8739 VPlan &Plan) const { 8740 // Optimize the special case where the source is a constant integer 8741 // induction variable. Notice that we can only optimize the 'trunc' case 8742 // because (a) FP conversions lose precision, (b) sext/zext may wrap, and 8743 // (c) other casts depend on pointer size. 8744 8745 // Determine whether \p K is a truncation based on an induction variable that 8746 // can be optimized. 8747 auto isOptimizableIVTruncate = 8748 [&](Instruction *K) -> std::function<bool(ElementCount)> { 8749 return [=](ElementCount VF) -> bool { 8750 return CM.isOptimizableIVTruncate(K, VF); 8751 }; 8752 }; 8753 8754 if (LoopVectorizationPlanner::getDecisionAndClampRange( 8755 isOptimizableIVTruncate(I), Range)) { 8756 8757 InductionDescriptor II = 8758 Legal->getInductionVars().lookup(cast<PHINode>(I->getOperand(0))); 8759 VPValue *Start = Plan.getOrAddVPValue(II.getStartValue()); 8760 return new VPWidenIntOrFpInductionRecipe(cast<PHINode>(I->getOperand(0)), 8761 Start, nullptr, I); 8762 } 8763 return nullptr; 8764 } 8765 8766 VPRecipeOrVPValueTy VPRecipeBuilder::tryToBlend(PHINode *Phi, 8767 ArrayRef<VPValue *> Operands, 8768 VPlanPtr &Plan) { 8769 // If all incoming values are equal, the incoming VPValue can be used directly 8770 // instead of creating a new VPBlendRecipe. 8771 VPValue *FirstIncoming = Operands[0]; 8772 if (all_of(Operands, [FirstIncoming](const VPValue *Inc) { 8773 return FirstIncoming == Inc; 8774 })) { 8775 return Operands[0]; 8776 } 8777 8778 // We know that all PHIs in non-header blocks are converted into selects, so 8779 // we don't have to worry about the insertion order and we can just use the 8780 // builder. At this point we generate the predication tree. There may be 8781 // duplications since this is a simple recursive scan, but future 8782 // optimizations will clean it up. 8783 SmallVector<VPValue *, 2> OperandsWithMask; 8784 unsigned NumIncoming = Phi->getNumIncomingValues(); 8785 8786 for (unsigned In = 0; In < NumIncoming; In++) { 8787 VPValue *EdgeMask = 8788 createEdgeMask(Phi->getIncomingBlock(In), Phi->getParent(), Plan); 8789 assert((EdgeMask || NumIncoming == 1) && 8790 "Multiple predecessors with one having a full mask"); 8791 OperandsWithMask.push_back(Operands[In]); 8792 if (EdgeMask) 8793 OperandsWithMask.push_back(EdgeMask); 8794 } 8795 return toVPRecipeResult(new VPBlendRecipe(Phi, OperandsWithMask)); 8796 } 8797 8798 VPWidenCallRecipe *VPRecipeBuilder::tryToWidenCall(CallInst *CI, 8799 ArrayRef<VPValue *> Operands, 8800 VFRange &Range) const { 8801 8802 bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange( 8803 [this, CI](ElementCount VF) { return CM.isScalarWithPredication(CI); }, 8804 Range); 8805 8806 if (IsPredicated) 8807 return nullptr; 8808 8809 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 8810 if (ID && (ID == Intrinsic::assume || ID == Intrinsic::lifetime_end || 8811 ID == Intrinsic::lifetime_start || ID == Intrinsic::sideeffect || 8812 ID == Intrinsic::pseudoprobe || 8813 ID == Intrinsic::experimental_noalias_scope_decl)) 8814 return nullptr; 8815 8816 auto willWiden = [&](ElementCount VF) -> bool { 8817 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 8818 // The following case may be scalarized depending on the VF. 8819 // The flag shows whether we use Intrinsic or a usual Call for vectorized 8820 // version of the instruction. 8821 // Is it beneficial to perform intrinsic call compared to lib call? 8822 bool NeedToScalarize = false; 8823 InstructionCost CallCost = CM.getVectorCallCost(CI, VF, NeedToScalarize); 8824 InstructionCost IntrinsicCost = ID ? CM.getVectorIntrinsicCost(CI, VF) : 0; 8825 bool UseVectorIntrinsic = ID && IntrinsicCost <= CallCost; 8826 return UseVectorIntrinsic || !NeedToScalarize; 8827 }; 8828 8829 if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range)) 8830 return nullptr; 8831 8832 ArrayRef<VPValue *> Ops = Operands.take_front(CI->getNumArgOperands()); 8833 return new VPWidenCallRecipe(*CI, make_range(Ops.begin(), Ops.end())); 8834 } 8835 8836 bool VPRecipeBuilder::shouldWiden(Instruction *I, VFRange &Range) const { 8837 assert(!isa<BranchInst>(I) && !isa<PHINode>(I) && !isa<LoadInst>(I) && 8838 !isa<StoreInst>(I) && "Instruction should have been handled earlier"); 8839 // Instruction should be widened, unless it is scalar after vectorization, 8840 // scalarization is profitable or it is predicated. 8841 auto WillScalarize = [this, I](ElementCount VF) -> bool { 8842 return CM.isScalarAfterVectorization(I, VF) || 8843 CM.isProfitableToScalarize(I, VF) || CM.isScalarWithPredication(I); 8844 }; 8845 return !LoopVectorizationPlanner::getDecisionAndClampRange(WillScalarize, 8846 Range); 8847 } 8848 8849 VPWidenRecipe *VPRecipeBuilder::tryToWiden(Instruction *I, 8850 ArrayRef<VPValue *> Operands) const { 8851 auto IsVectorizableOpcode = [](unsigned Opcode) { 8852 switch (Opcode) { 8853 case Instruction::Add: 8854 case Instruction::And: 8855 case Instruction::AShr: 8856 case Instruction::BitCast: 8857 case Instruction::FAdd: 8858 case Instruction::FCmp: 8859 case Instruction::FDiv: 8860 case Instruction::FMul: 8861 case Instruction::FNeg: 8862 case Instruction::FPExt: 8863 case Instruction::FPToSI: 8864 case Instruction::FPToUI: 8865 case Instruction::FPTrunc: 8866 case Instruction::FRem: 8867 case Instruction::FSub: 8868 case Instruction::ICmp: 8869 case Instruction::IntToPtr: 8870 case Instruction::LShr: 8871 case Instruction::Mul: 8872 case Instruction::Or: 8873 case Instruction::PtrToInt: 8874 case Instruction::SDiv: 8875 case Instruction::Select: 8876 case Instruction::SExt: 8877 case Instruction::Shl: 8878 case Instruction::SIToFP: 8879 case Instruction::SRem: 8880 case Instruction::Sub: 8881 case Instruction::Trunc: 8882 case Instruction::UDiv: 8883 case Instruction::UIToFP: 8884 case Instruction::URem: 8885 case Instruction::Xor: 8886 case Instruction::ZExt: 8887 return true; 8888 } 8889 return false; 8890 }; 8891 8892 if (!IsVectorizableOpcode(I->getOpcode())) 8893 return nullptr; 8894 8895 // Success: widen this instruction. 8896 return new VPWidenRecipe(*I, make_range(Operands.begin(), Operands.end())); 8897 } 8898 8899 void VPRecipeBuilder::fixHeaderPhis() { 8900 BasicBlock *OrigLatch = OrigLoop->getLoopLatch(); 8901 for (VPWidenPHIRecipe *R : PhisToFix) { 8902 auto *PN = cast<PHINode>(R->getUnderlyingValue()); 8903 VPRecipeBase *IncR = 8904 getRecipe(cast<Instruction>(PN->getIncomingValueForBlock(OrigLatch))); 8905 R->addOperand(IncR->getVPSingleValue()); 8906 } 8907 } 8908 8909 VPBasicBlock *VPRecipeBuilder::handleReplication( 8910 Instruction *I, VFRange &Range, VPBasicBlock *VPBB, 8911 VPlanPtr &Plan) { 8912 bool IsUniform = LoopVectorizationPlanner::getDecisionAndClampRange( 8913 [&](ElementCount VF) { return CM.isUniformAfterVectorization(I, VF); }, 8914 Range); 8915 8916 bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange( 8917 [&](ElementCount VF) { return CM.isPredicatedInst(I); }, Range); 8918 8919 auto *Recipe = new VPReplicateRecipe(I, Plan->mapToVPValues(I->operands()), 8920 IsUniform, IsPredicated); 8921 setRecipe(I, Recipe); 8922 Plan->addVPValue(I, Recipe); 8923 8924 // Find if I uses a predicated instruction. If so, it will use its scalar 8925 // value. Avoid hoisting the insert-element which packs the scalar value into 8926 // a vector value, as that happens iff all users use the vector value. 8927 for (VPValue *Op : Recipe->operands()) { 8928 auto *PredR = dyn_cast_or_null<VPPredInstPHIRecipe>(Op->getDef()); 8929 if (!PredR) 8930 continue; 8931 auto *RepR = 8932 cast_or_null<VPReplicateRecipe>(PredR->getOperand(0)->getDef()); 8933 assert(RepR->isPredicated() && 8934 "expected Replicate recipe to be predicated"); 8935 RepR->setAlsoPack(false); 8936 } 8937 8938 // Finalize the recipe for Instr, first if it is not predicated. 8939 if (!IsPredicated) { 8940 LLVM_DEBUG(dbgs() << "LV: Scalarizing:" << *I << "\n"); 8941 VPBB->appendRecipe(Recipe); 8942 return VPBB; 8943 } 8944 LLVM_DEBUG(dbgs() << "LV: Scalarizing and predicating:" << *I << "\n"); 8945 assert(VPBB->getSuccessors().empty() && 8946 "VPBB has successors when handling predicated replication."); 8947 // Record predicated instructions for above packing optimizations. 8948 VPBlockBase *Region = createReplicateRegion(I, Recipe, Plan); 8949 VPBlockUtils::insertBlockAfter(Region, VPBB); 8950 auto *RegSucc = new VPBasicBlock(); 8951 VPBlockUtils::insertBlockAfter(RegSucc, Region); 8952 return RegSucc; 8953 } 8954 8955 VPRegionBlock *VPRecipeBuilder::createReplicateRegion(Instruction *Instr, 8956 VPRecipeBase *PredRecipe, 8957 VPlanPtr &Plan) { 8958 // Instructions marked for predication are replicated and placed under an 8959 // if-then construct to prevent side-effects. 8960 8961 // Generate recipes to compute the block mask for this region. 8962 VPValue *BlockInMask = createBlockInMask(Instr->getParent(), Plan); 8963 8964 // Build the triangular if-then region. 8965 std::string RegionName = (Twine("pred.") + Instr->getOpcodeName()).str(); 8966 assert(Instr->getParent() && "Predicated instruction not in any basic block"); 8967 auto *BOMRecipe = new VPBranchOnMaskRecipe(BlockInMask); 8968 auto *Entry = new VPBasicBlock(Twine(RegionName) + ".entry", BOMRecipe); 8969 auto *PHIRecipe = Instr->getType()->isVoidTy() 8970 ? nullptr 8971 : new VPPredInstPHIRecipe(Plan->getOrAddVPValue(Instr)); 8972 if (PHIRecipe) { 8973 Plan->removeVPValueFor(Instr); 8974 Plan->addVPValue(Instr, PHIRecipe); 8975 } 8976 auto *Exit = new VPBasicBlock(Twine(RegionName) + ".continue", PHIRecipe); 8977 auto *Pred = new VPBasicBlock(Twine(RegionName) + ".if", PredRecipe); 8978 VPRegionBlock *Region = new VPRegionBlock(Entry, Exit, RegionName, true); 8979 8980 // Note: first set Entry as region entry and then connect successors starting 8981 // from it in order, to propagate the "parent" of each VPBasicBlock. 8982 VPBlockUtils::insertTwoBlocksAfter(Pred, Exit, BlockInMask, Entry); 8983 VPBlockUtils::connectBlocks(Pred, Exit); 8984 8985 return Region; 8986 } 8987 8988 VPRecipeOrVPValueTy 8989 VPRecipeBuilder::tryToCreateWidenRecipe(Instruction *Instr, 8990 ArrayRef<VPValue *> Operands, 8991 VFRange &Range, VPlanPtr &Plan) { 8992 // First, check for specific widening recipes that deal with calls, memory 8993 // operations, inductions and Phi nodes. 8994 if (auto *CI = dyn_cast<CallInst>(Instr)) 8995 return toVPRecipeResult(tryToWidenCall(CI, Operands, Range)); 8996 8997 if (isa<LoadInst>(Instr) || isa<StoreInst>(Instr)) 8998 return toVPRecipeResult(tryToWidenMemory(Instr, Operands, Range, Plan)); 8999 9000 VPRecipeBase *Recipe; 9001 if (auto Phi = dyn_cast<PHINode>(Instr)) { 9002 if (Phi->getParent() != OrigLoop->getHeader()) 9003 return tryToBlend(Phi, Operands, Plan); 9004 if ((Recipe = tryToOptimizeInductionPHI(Phi, Operands))) 9005 return toVPRecipeResult(Recipe); 9006 9007 VPWidenPHIRecipe *PhiRecipe = nullptr; 9008 if (Legal->isReductionVariable(Phi) || Legal->isFirstOrderRecurrence(Phi)) { 9009 VPValue *StartV = Operands[0]; 9010 if (Legal->isReductionVariable(Phi)) { 9011 RecurrenceDescriptor &RdxDesc = Legal->getReductionVars()[Phi]; 9012 assert(RdxDesc.getRecurrenceStartValue() == 9013 Phi->getIncomingValueForBlock(OrigLoop->getLoopPreheader())); 9014 PhiRecipe = new VPReductionPHIRecipe(Phi, RdxDesc, *StartV, 9015 CM.isInLoopReduction(Phi), 9016 CM.useOrderedReductions(RdxDesc)); 9017 } else { 9018 PhiRecipe = new VPFirstOrderRecurrencePHIRecipe(Phi, *StartV); 9019 } 9020 9021 // Record the incoming value from the backedge, so we can add the incoming 9022 // value from the backedge after all recipes have been created. 9023 recordRecipeOf(cast<Instruction>( 9024 Phi->getIncomingValueForBlock(OrigLoop->getLoopLatch()))); 9025 PhisToFix.push_back(PhiRecipe); 9026 } else { 9027 // TODO: record start and backedge value for remaining pointer induction 9028 // phis. 9029 assert(Phi->getType()->isPointerTy() && 9030 "only pointer phis should be handled here"); 9031 PhiRecipe = new VPWidenPHIRecipe(Phi); 9032 } 9033 9034 return toVPRecipeResult(PhiRecipe); 9035 } 9036 9037 if (isa<TruncInst>(Instr) && 9038 (Recipe = tryToOptimizeInductionTruncate(cast<TruncInst>(Instr), Operands, 9039 Range, *Plan))) 9040 return toVPRecipeResult(Recipe); 9041 9042 if (!shouldWiden(Instr, Range)) 9043 return nullptr; 9044 9045 if (auto GEP = dyn_cast<GetElementPtrInst>(Instr)) 9046 return toVPRecipeResult(new VPWidenGEPRecipe( 9047 GEP, make_range(Operands.begin(), Operands.end()), OrigLoop)); 9048 9049 if (auto *SI = dyn_cast<SelectInst>(Instr)) { 9050 bool InvariantCond = 9051 PSE.getSE()->isLoopInvariant(PSE.getSCEV(SI->getOperand(0)), OrigLoop); 9052 return toVPRecipeResult(new VPWidenSelectRecipe( 9053 *SI, make_range(Operands.begin(), Operands.end()), InvariantCond)); 9054 } 9055 9056 return toVPRecipeResult(tryToWiden(Instr, Operands)); 9057 } 9058 9059 void LoopVectorizationPlanner::buildVPlansWithVPRecipes(ElementCount MinVF, 9060 ElementCount MaxVF) { 9061 assert(OrigLoop->isInnermost() && "Inner loop expected."); 9062 9063 // Collect instructions from the original loop that will become trivially dead 9064 // in the vectorized loop. We don't need to vectorize these instructions. For 9065 // example, original induction update instructions can become dead because we 9066 // separately emit induction "steps" when generating code for the new loop. 9067 // Similarly, we create a new latch condition when setting up the structure 9068 // of the new loop, so the old one can become dead. 9069 SmallPtrSet<Instruction *, 4> DeadInstructions; 9070 collectTriviallyDeadInstructions(DeadInstructions); 9071 9072 // Add assume instructions we need to drop to DeadInstructions, to prevent 9073 // them from being added to the VPlan. 9074 // TODO: We only need to drop assumes in blocks that get flattend. If the 9075 // control flow is preserved, we should keep them. 9076 auto &ConditionalAssumes = Legal->getConditionalAssumes(); 9077 DeadInstructions.insert(ConditionalAssumes.begin(), ConditionalAssumes.end()); 9078 9079 MapVector<Instruction *, Instruction *> &SinkAfter = Legal->getSinkAfter(); 9080 // Dead instructions do not need sinking. Remove them from SinkAfter. 9081 for (Instruction *I : DeadInstructions) 9082 SinkAfter.erase(I); 9083 9084 // Cannot sink instructions after dead instructions (there won't be any 9085 // recipes for them). Instead, find the first non-dead previous instruction. 9086 for (auto &P : Legal->getSinkAfter()) { 9087 Instruction *SinkTarget = P.second; 9088 Instruction *FirstInst = &*SinkTarget->getParent()->begin(); 9089 (void)FirstInst; 9090 while (DeadInstructions.contains(SinkTarget)) { 9091 assert( 9092 SinkTarget != FirstInst && 9093 "Must find a live instruction (at least the one feeding the " 9094 "first-order recurrence PHI) before reaching beginning of the block"); 9095 SinkTarget = SinkTarget->getPrevNode(); 9096 assert(SinkTarget != P.first && 9097 "sink source equals target, no sinking required"); 9098 } 9099 P.second = SinkTarget; 9100 } 9101 9102 auto MaxVFPlusOne = MaxVF.getWithIncrement(1); 9103 for (ElementCount VF = MinVF; ElementCount::isKnownLT(VF, MaxVFPlusOne);) { 9104 VFRange SubRange = {VF, MaxVFPlusOne}; 9105 VPlans.push_back( 9106 buildVPlanWithVPRecipes(SubRange, DeadInstructions, SinkAfter)); 9107 VF = SubRange.End; 9108 } 9109 } 9110 9111 VPlanPtr LoopVectorizationPlanner::buildVPlanWithVPRecipes( 9112 VFRange &Range, SmallPtrSetImpl<Instruction *> &DeadInstructions, 9113 const MapVector<Instruction *, Instruction *> &SinkAfter) { 9114 9115 SmallPtrSet<const InterleaveGroup<Instruction> *, 1> InterleaveGroups; 9116 9117 VPRecipeBuilder RecipeBuilder(OrigLoop, TLI, Legal, CM, PSE, Builder); 9118 9119 // --------------------------------------------------------------------------- 9120 // Pre-construction: record ingredients whose recipes we'll need to further 9121 // process after constructing the initial VPlan. 9122 // --------------------------------------------------------------------------- 9123 9124 // Mark instructions we'll need to sink later and their targets as 9125 // ingredients whose recipe we'll need to record. 9126 for (auto &Entry : SinkAfter) { 9127 RecipeBuilder.recordRecipeOf(Entry.first); 9128 RecipeBuilder.recordRecipeOf(Entry.second); 9129 } 9130 for (auto &Reduction : CM.getInLoopReductionChains()) { 9131 PHINode *Phi = Reduction.first; 9132 RecurKind Kind = Legal->getReductionVars()[Phi].getRecurrenceKind(); 9133 const SmallVector<Instruction *, 4> &ReductionOperations = Reduction.second; 9134 9135 RecipeBuilder.recordRecipeOf(Phi); 9136 for (auto &R : ReductionOperations) { 9137 RecipeBuilder.recordRecipeOf(R); 9138 // For min/max reducitons, where we have a pair of icmp/select, we also 9139 // need to record the ICmp recipe, so it can be removed later. 9140 if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) 9141 RecipeBuilder.recordRecipeOf(cast<Instruction>(R->getOperand(0))); 9142 } 9143 } 9144 9145 // For each interleave group which is relevant for this (possibly trimmed) 9146 // Range, add it to the set of groups to be later applied to the VPlan and add 9147 // placeholders for its members' Recipes which we'll be replacing with a 9148 // single VPInterleaveRecipe. 9149 for (InterleaveGroup<Instruction> *IG : IAI.getInterleaveGroups()) { 9150 auto applyIG = [IG, this](ElementCount VF) -> bool { 9151 return (VF.isVector() && // Query is illegal for VF == 1 9152 CM.getWideningDecision(IG->getInsertPos(), VF) == 9153 LoopVectorizationCostModel::CM_Interleave); 9154 }; 9155 if (!getDecisionAndClampRange(applyIG, Range)) 9156 continue; 9157 InterleaveGroups.insert(IG); 9158 for (unsigned i = 0; i < IG->getFactor(); i++) 9159 if (Instruction *Member = IG->getMember(i)) 9160 RecipeBuilder.recordRecipeOf(Member); 9161 }; 9162 9163 // --------------------------------------------------------------------------- 9164 // Build initial VPlan: Scan the body of the loop in a topological order to 9165 // visit each basic block after having visited its predecessor basic blocks. 9166 // --------------------------------------------------------------------------- 9167 9168 // Create a dummy pre-entry VPBasicBlock to start building the VPlan. 9169 auto Plan = std::make_unique<VPlan>(); 9170 VPBasicBlock *VPBB = new VPBasicBlock("Pre-Entry"); 9171 Plan->setEntry(VPBB); 9172 9173 // Scan the body of the loop in a topological order to visit each basic block 9174 // after having visited its predecessor basic blocks. 9175 LoopBlocksDFS DFS(OrigLoop); 9176 DFS.perform(LI); 9177 9178 for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) { 9179 // Relevant instructions from basic block BB will be grouped into VPRecipe 9180 // ingredients and fill a new VPBasicBlock. 9181 unsigned VPBBsForBB = 0; 9182 auto *FirstVPBBForBB = new VPBasicBlock(BB->getName()); 9183 VPBlockUtils::insertBlockAfter(FirstVPBBForBB, VPBB); 9184 VPBB = FirstVPBBForBB; 9185 Builder.setInsertPoint(VPBB); 9186 9187 // Introduce each ingredient into VPlan. 9188 // TODO: Model and preserve debug instrinsics in VPlan. 9189 for (Instruction &I : BB->instructionsWithoutDebug()) { 9190 Instruction *Instr = &I; 9191 9192 // First filter out irrelevant instructions, to ensure no recipes are 9193 // built for them. 9194 if (isa<BranchInst>(Instr) || DeadInstructions.count(Instr)) 9195 continue; 9196 9197 SmallVector<VPValue *, 4> Operands; 9198 auto *Phi = dyn_cast<PHINode>(Instr); 9199 if (Phi && Phi->getParent() == OrigLoop->getHeader()) { 9200 Operands.push_back(Plan->getOrAddVPValue( 9201 Phi->getIncomingValueForBlock(OrigLoop->getLoopPreheader()))); 9202 } else { 9203 auto OpRange = Plan->mapToVPValues(Instr->operands()); 9204 Operands = {OpRange.begin(), OpRange.end()}; 9205 } 9206 if (auto RecipeOrValue = RecipeBuilder.tryToCreateWidenRecipe( 9207 Instr, Operands, Range, Plan)) { 9208 // If Instr can be simplified to an existing VPValue, use it. 9209 if (RecipeOrValue.is<VPValue *>()) { 9210 auto *VPV = RecipeOrValue.get<VPValue *>(); 9211 Plan->addVPValue(Instr, VPV); 9212 // If the re-used value is a recipe, register the recipe for the 9213 // instruction, in case the recipe for Instr needs to be recorded. 9214 if (auto *R = dyn_cast_or_null<VPRecipeBase>(VPV->getDef())) 9215 RecipeBuilder.setRecipe(Instr, R); 9216 continue; 9217 } 9218 // Otherwise, add the new recipe. 9219 VPRecipeBase *Recipe = RecipeOrValue.get<VPRecipeBase *>(); 9220 for (auto *Def : Recipe->definedValues()) { 9221 auto *UV = Def->getUnderlyingValue(); 9222 Plan->addVPValue(UV, Def); 9223 } 9224 9225 RecipeBuilder.setRecipe(Instr, Recipe); 9226 VPBB->appendRecipe(Recipe); 9227 continue; 9228 } 9229 9230 // Otherwise, if all widening options failed, Instruction is to be 9231 // replicated. This may create a successor for VPBB. 9232 VPBasicBlock *NextVPBB = 9233 RecipeBuilder.handleReplication(Instr, Range, VPBB, Plan); 9234 if (NextVPBB != VPBB) { 9235 VPBB = NextVPBB; 9236 VPBB->setName(BB->hasName() ? BB->getName() + "." + Twine(VPBBsForBB++) 9237 : ""); 9238 } 9239 } 9240 } 9241 9242 RecipeBuilder.fixHeaderPhis(); 9243 9244 // Discard empty dummy pre-entry VPBasicBlock. Note that other VPBasicBlocks 9245 // may also be empty, such as the last one VPBB, reflecting original 9246 // basic-blocks with no recipes. 9247 VPBasicBlock *PreEntry = cast<VPBasicBlock>(Plan->getEntry()); 9248 assert(PreEntry->empty() && "Expecting empty pre-entry block."); 9249 VPBlockBase *Entry = Plan->setEntry(PreEntry->getSingleSuccessor()); 9250 VPBlockUtils::disconnectBlocks(PreEntry, Entry); 9251 delete PreEntry; 9252 9253 // --------------------------------------------------------------------------- 9254 // Transform initial VPlan: Apply previously taken decisions, in order, to 9255 // bring the VPlan to its final state. 9256 // --------------------------------------------------------------------------- 9257 9258 // Apply Sink-After legal constraints. 9259 auto GetReplicateRegion = [](VPRecipeBase *R) -> VPRegionBlock * { 9260 auto *Region = dyn_cast_or_null<VPRegionBlock>(R->getParent()->getParent()); 9261 if (Region && Region->isReplicator()) { 9262 assert(Region->getNumSuccessors() == 1 && 9263 Region->getNumPredecessors() == 1 && "Expected SESE region!"); 9264 assert(R->getParent()->size() == 1 && 9265 "A recipe in an original replicator region must be the only " 9266 "recipe in its block"); 9267 return Region; 9268 } 9269 return nullptr; 9270 }; 9271 for (auto &Entry : SinkAfter) { 9272 VPRecipeBase *Sink = RecipeBuilder.getRecipe(Entry.first); 9273 VPRecipeBase *Target = RecipeBuilder.getRecipe(Entry.second); 9274 9275 auto *TargetRegion = GetReplicateRegion(Target); 9276 auto *SinkRegion = GetReplicateRegion(Sink); 9277 if (!SinkRegion) { 9278 // If the sink source is not a replicate region, sink the recipe directly. 9279 if (TargetRegion) { 9280 // The target is in a replication region, make sure to move Sink to 9281 // the block after it, not into the replication region itself. 9282 VPBasicBlock *NextBlock = 9283 cast<VPBasicBlock>(TargetRegion->getSuccessors().front()); 9284 Sink->moveBefore(*NextBlock, NextBlock->getFirstNonPhi()); 9285 } else 9286 Sink->moveAfter(Target); 9287 continue; 9288 } 9289 9290 // The sink source is in a replicate region. Unhook the region from the CFG. 9291 auto *SinkPred = SinkRegion->getSinglePredecessor(); 9292 auto *SinkSucc = SinkRegion->getSingleSuccessor(); 9293 VPBlockUtils::disconnectBlocks(SinkPred, SinkRegion); 9294 VPBlockUtils::disconnectBlocks(SinkRegion, SinkSucc); 9295 VPBlockUtils::connectBlocks(SinkPred, SinkSucc); 9296 9297 if (TargetRegion) { 9298 // The target recipe is also in a replicate region, move the sink region 9299 // after the target region. 9300 auto *TargetSucc = TargetRegion->getSingleSuccessor(); 9301 VPBlockUtils::disconnectBlocks(TargetRegion, TargetSucc); 9302 VPBlockUtils::connectBlocks(TargetRegion, SinkRegion); 9303 VPBlockUtils::connectBlocks(SinkRegion, TargetSucc); 9304 } else { 9305 // The sink source is in a replicate region, we need to move the whole 9306 // replicate region, which should only contain a single recipe in the 9307 // main block. 9308 auto *SplitBlock = 9309 Target->getParent()->splitAt(std::next(Target->getIterator())); 9310 9311 auto *SplitPred = SplitBlock->getSinglePredecessor(); 9312 9313 VPBlockUtils::disconnectBlocks(SplitPred, SplitBlock); 9314 VPBlockUtils::connectBlocks(SplitPred, SinkRegion); 9315 VPBlockUtils::connectBlocks(SinkRegion, SplitBlock); 9316 if (VPBB == SplitPred) 9317 VPBB = SplitBlock; 9318 } 9319 } 9320 9321 // Introduce a recipe to combine the incoming and previous values of a 9322 // first-order recurrence. 9323 for (VPRecipeBase &R : Plan->getEntry()->getEntryBasicBlock()->phis()) { 9324 auto *RecurPhi = dyn_cast<VPFirstOrderRecurrencePHIRecipe>(&R); 9325 if (!RecurPhi) 9326 continue; 9327 9328 auto *RecurSplice = cast<VPInstruction>( 9329 Builder.createNaryOp(VPInstruction::FirstOrderRecurrenceSplice, 9330 {RecurPhi, RecurPhi->getBackedgeValue()})); 9331 9332 VPRecipeBase *PrevRecipe = RecurPhi->getBackedgeRecipe(); 9333 if (auto *Region = GetReplicateRegion(PrevRecipe)) { 9334 VPBasicBlock *Succ = cast<VPBasicBlock>(Region->getSingleSuccessor()); 9335 RecurSplice->moveBefore(*Succ, Succ->getFirstNonPhi()); 9336 } else 9337 RecurSplice->moveAfter(PrevRecipe); 9338 RecurPhi->replaceAllUsesWith(RecurSplice); 9339 // Set the first operand of RecurSplice to RecurPhi again, after replacing 9340 // all users. 9341 RecurSplice->setOperand(0, RecurPhi); 9342 } 9343 9344 // Interleave memory: for each Interleave Group we marked earlier as relevant 9345 // for this VPlan, replace the Recipes widening its memory instructions with a 9346 // single VPInterleaveRecipe at its insertion point. 9347 for (auto IG : InterleaveGroups) { 9348 auto *Recipe = cast<VPWidenMemoryInstructionRecipe>( 9349 RecipeBuilder.getRecipe(IG->getInsertPos())); 9350 SmallVector<VPValue *, 4> StoredValues; 9351 for (unsigned i = 0; i < IG->getFactor(); ++i) 9352 if (auto *SI = dyn_cast_or_null<StoreInst>(IG->getMember(i))) { 9353 auto *StoreR = 9354 cast<VPWidenMemoryInstructionRecipe>(RecipeBuilder.getRecipe(SI)); 9355 StoredValues.push_back(StoreR->getStoredValue()); 9356 } 9357 9358 auto *VPIG = new VPInterleaveRecipe(IG, Recipe->getAddr(), StoredValues, 9359 Recipe->getMask()); 9360 VPIG->insertBefore(Recipe); 9361 unsigned J = 0; 9362 for (unsigned i = 0; i < IG->getFactor(); ++i) 9363 if (Instruction *Member = IG->getMember(i)) { 9364 if (!Member->getType()->isVoidTy()) { 9365 VPValue *OriginalV = Plan->getVPValue(Member); 9366 Plan->removeVPValueFor(Member); 9367 Plan->addVPValue(Member, VPIG->getVPValue(J)); 9368 OriginalV->replaceAllUsesWith(VPIG->getVPValue(J)); 9369 J++; 9370 } 9371 RecipeBuilder.getRecipe(Member)->eraseFromParent(); 9372 } 9373 } 9374 9375 // Adjust the recipes for any inloop reductions. 9376 adjustRecipesForInLoopReductions(Plan, RecipeBuilder, Range.Start); 9377 9378 // Finally, if tail is folded by masking, introduce selects between the phi 9379 // and the live-out instruction of each reduction, at the end of the latch. 9380 if (CM.foldTailByMasking() && !Legal->getReductionVars().empty()) { 9381 Builder.setInsertPoint(VPBB); 9382 auto *Cond = RecipeBuilder.createBlockInMask(OrigLoop->getHeader(), Plan); 9383 for (auto &Reduction : Legal->getReductionVars()) { 9384 if (CM.isInLoopReduction(Reduction.first)) 9385 continue; 9386 VPValue *Phi = Plan->getOrAddVPValue(Reduction.first); 9387 VPValue *Red = Plan->getOrAddVPValue(Reduction.second.getLoopExitInstr()); 9388 Builder.createNaryOp(Instruction::Select, {Cond, Red, Phi}); 9389 } 9390 } 9391 9392 VPlanTransforms::sinkScalarOperands(*Plan); 9393 VPlanTransforms::mergeReplicateRegions(*Plan); 9394 9395 std::string PlanName; 9396 raw_string_ostream RSO(PlanName); 9397 ElementCount VF = Range.Start; 9398 Plan->addVF(VF); 9399 RSO << "Initial VPlan for VF={" << VF; 9400 for (VF *= 2; ElementCount::isKnownLT(VF, Range.End); VF *= 2) { 9401 Plan->addVF(VF); 9402 RSO << "," << VF; 9403 } 9404 RSO << "},UF>=1"; 9405 RSO.flush(); 9406 Plan->setName(PlanName); 9407 9408 return Plan; 9409 } 9410 9411 VPlanPtr LoopVectorizationPlanner::buildVPlan(VFRange &Range) { 9412 // Outer loop handling: They may require CFG and instruction level 9413 // transformations before even evaluating whether vectorization is profitable. 9414 // Since we cannot modify the incoming IR, we need to build VPlan upfront in 9415 // the vectorization pipeline. 9416 assert(!OrigLoop->isInnermost()); 9417 assert(EnableVPlanNativePath && "VPlan-native path is not enabled."); 9418 9419 // Create new empty VPlan 9420 auto Plan = std::make_unique<VPlan>(); 9421 9422 // Build hierarchical CFG 9423 VPlanHCFGBuilder HCFGBuilder(OrigLoop, LI, *Plan); 9424 HCFGBuilder.buildHierarchicalCFG(); 9425 9426 for (ElementCount VF = Range.Start; ElementCount::isKnownLT(VF, Range.End); 9427 VF *= 2) 9428 Plan->addVF(VF); 9429 9430 if (EnableVPlanPredication) { 9431 VPlanPredicator VPP(*Plan); 9432 VPP.predicate(); 9433 9434 // Avoid running transformation to recipes until masked code generation in 9435 // VPlan-native path is in place. 9436 return Plan; 9437 } 9438 9439 SmallPtrSet<Instruction *, 1> DeadInstructions; 9440 VPlanTransforms::VPInstructionsToVPRecipes(OrigLoop, Plan, 9441 Legal->getInductionVars(), 9442 DeadInstructions, *PSE.getSE()); 9443 return Plan; 9444 } 9445 9446 // Adjust the recipes for any inloop reductions. The chain of instructions 9447 // leading from the loop exit instr to the phi need to be converted to 9448 // reductions, with one operand being vector and the other being the scalar 9449 // reduction chain. 9450 void LoopVectorizationPlanner::adjustRecipesForInLoopReductions( 9451 VPlanPtr &Plan, VPRecipeBuilder &RecipeBuilder, ElementCount MinVF) { 9452 for (auto &Reduction : CM.getInLoopReductionChains()) { 9453 PHINode *Phi = Reduction.first; 9454 RecurrenceDescriptor &RdxDesc = Legal->getReductionVars()[Phi]; 9455 const SmallVector<Instruction *, 4> &ReductionOperations = Reduction.second; 9456 9457 if (MinVF.isScalar() && !CM.useOrderedReductions(RdxDesc)) 9458 continue; 9459 9460 // ReductionOperations are orders top-down from the phi's use to the 9461 // LoopExitValue. We keep a track of the previous item (the Chain) to tell 9462 // which of the two operands will remain scalar and which will be reduced. 9463 // For minmax the chain will be the select instructions. 9464 Instruction *Chain = Phi; 9465 for (Instruction *R : ReductionOperations) { 9466 VPRecipeBase *WidenRecipe = RecipeBuilder.getRecipe(R); 9467 RecurKind Kind = RdxDesc.getRecurrenceKind(); 9468 9469 VPValue *ChainOp = Plan->getVPValue(Chain); 9470 unsigned FirstOpId; 9471 if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) { 9472 assert(isa<VPWidenSelectRecipe>(WidenRecipe) && 9473 "Expected to replace a VPWidenSelectSC"); 9474 FirstOpId = 1; 9475 } else { 9476 assert((MinVF.isScalar() || isa<VPWidenRecipe>(WidenRecipe)) && 9477 "Expected to replace a VPWidenSC"); 9478 FirstOpId = 0; 9479 } 9480 unsigned VecOpId = 9481 R->getOperand(FirstOpId) == Chain ? FirstOpId + 1 : FirstOpId; 9482 VPValue *VecOp = Plan->getVPValue(R->getOperand(VecOpId)); 9483 9484 auto *CondOp = CM.foldTailByMasking() 9485 ? RecipeBuilder.createBlockInMask(R->getParent(), Plan) 9486 : nullptr; 9487 VPReductionRecipe *RedRecipe = new VPReductionRecipe( 9488 &RdxDesc, R, ChainOp, VecOp, CondOp, TTI); 9489 WidenRecipe->getVPSingleValue()->replaceAllUsesWith(RedRecipe); 9490 Plan->removeVPValueFor(R); 9491 Plan->addVPValue(R, RedRecipe); 9492 WidenRecipe->getParent()->insert(RedRecipe, WidenRecipe->getIterator()); 9493 WidenRecipe->getVPSingleValue()->replaceAllUsesWith(RedRecipe); 9494 WidenRecipe->eraseFromParent(); 9495 9496 if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) { 9497 VPRecipeBase *CompareRecipe = 9498 RecipeBuilder.getRecipe(cast<Instruction>(R->getOperand(0))); 9499 assert(isa<VPWidenRecipe>(CompareRecipe) && 9500 "Expected to replace a VPWidenSC"); 9501 assert(cast<VPWidenRecipe>(CompareRecipe)->getNumUsers() == 0 && 9502 "Expected no remaining users"); 9503 CompareRecipe->eraseFromParent(); 9504 } 9505 Chain = R; 9506 } 9507 } 9508 } 9509 9510 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 9511 void VPInterleaveRecipe::print(raw_ostream &O, const Twine &Indent, 9512 VPSlotTracker &SlotTracker) const { 9513 O << Indent << "INTERLEAVE-GROUP with factor " << IG->getFactor() << " at "; 9514 IG->getInsertPos()->printAsOperand(O, false); 9515 O << ", "; 9516 getAddr()->printAsOperand(O, SlotTracker); 9517 VPValue *Mask = getMask(); 9518 if (Mask) { 9519 O << ", "; 9520 Mask->printAsOperand(O, SlotTracker); 9521 } 9522 for (unsigned i = 0; i < IG->getFactor(); ++i) 9523 if (Instruction *I = IG->getMember(i)) 9524 O << "\n" << Indent << " " << VPlanIngredient(I) << " " << i; 9525 } 9526 #endif 9527 9528 void VPWidenCallRecipe::execute(VPTransformState &State) { 9529 State.ILV->widenCallInstruction(*cast<CallInst>(getUnderlyingInstr()), this, 9530 *this, State); 9531 } 9532 9533 void VPWidenSelectRecipe::execute(VPTransformState &State) { 9534 State.ILV->widenSelectInstruction(*cast<SelectInst>(getUnderlyingInstr()), 9535 this, *this, InvariantCond, State); 9536 } 9537 9538 void VPWidenRecipe::execute(VPTransformState &State) { 9539 State.ILV->widenInstruction(*getUnderlyingInstr(), this, *this, State); 9540 } 9541 9542 void VPWidenGEPRecipe::execute(VPTransformState &State) { 9543 State.ILV->widenGEP(cast<GetElementPtrInst>(getUnderlyingInstr()), this, 9544 *this, State.UF, State.VF, IsPtrLoopInvariant, 9545 IsIndexLoopInvariant, State); 9546 } 9547 9548 void VPWidenIntOrFpInductionRecipe::execute(VPTransformState &State) { 9549 assert(!State.Instance && "Int or FP induction being replicated."); 9550 State.ILV->widenIntOrFpInduction(IV, getStartValue()->getLiveInIRValue(), 9551 getTruncInst(), getVPValue(0), 9552 getCastValue(), State); 9553 } 9554 9555 void VPWidenPHIRecipe::execute(VPTransformState &State) { 9556 State.ILV->widenPHIInstruction(cast<PHINode>(getUnderlyingValue()), this, 9557 State); 9558 } 9559 9560 void VPBlendRecipe::execute(VPTransformState &State) { 9561 State.ILV->setDebugLocFromInst(Phi, &State.Builder); 9562 // We know that all PHIs in non-header blocks are converted into 9563 // selects, so we don't have to worry about the insertion order and we 9564 // can just use the builder. 9565 // At this point we generate the predication tree. There may be 9566 // duplications since this is a simple recursive scan, but future 9567 // optimizations will clean it up. 9568 9569 unsigned NumIncoming = getNumIncomingValues(); 9570 9571 // Generate a sequence of selects of the form: 9572 // SELECT(Mask3, In3, 9573 // SELECT(Mask2, In2, 9574 // SELECT(Mask1, In1, 9575 // In0))) 9576 // Note that Mask0 is never used: lanes for which no path reaches this phi and 9577 // are essentially undef are taken from In0. 9578 InnerLoopVectorizer::VectorParts Entry(State.UF); 9579 for (unsigned In = 0; In < NumIncoming; ++In) { 9580 for (unsigned Part = 0; Part < State.UF; ++Part) { 9581 // We might have single edge PHIs (blocks) - use an identity 9582 // 'select' for the first PHI operand. 9583 Value *In0 = State.get(getIncomingValue(In), Part); 9584 if (In == 0) 9585 Entry[Part] = In0; // Initialize with the first incoming value. 9586 else { 9587 // Select between the current value and the previous incoming edge 9588 // based on the incoming mask. 9589 Value *Cond = State.get(getMask(In), Part); 9590 Entry[Part] = 9591 State.Builder.CreateSelect(Cond, In0, Entry[Part], "predphi"); 9592 } 9593 } 9594 } 9595 for (unsigned Part = 0; Part < State.UF; ++Part) 9596 State.set(this, Entry[Part], Part); 9597 } 9598 9599 void VPInterleaveRecipe::execute(VPTransformState &State) { 9600 assert(!State.Instance && "Interleave group being replicated."); 9601 State.ILV->vectorizeInterleaveGroup(IG, definedValues(), State, getAddr(), 9602 getStoredValues(), getMask()); 9603 } 9604 9605 void VPReductionRecipe::execute(VPTransformState &State) { 9606 assert(!State.Instance && "Reduction being replicated."); 9607 Value *PrevInChain = State.get(getChainOp(), 0); 9608 for (unsigned Part = 0; Part < State.UF; ++Part) { 9609 RecurKind Kind = RdxDesc->getRecurrenceKind(); 9610 bool IsOrdered = State.ILV->useOrderedReductions(*RdxDesc); 9611 Value *NewVecOp = State.get(getVecOp(), Part); 9612 if (VPValue *Cond = getCondOp()) { 9613 Value *NewCond = State.get(Cond, Part); 9614 VectorType *VecTy = cast<VectorType>(NewVecOp->getType()); 9615 Constant *Iden = RecurrenceDescriptor::getRecurrenceIdentity( 9616 Kind, VecTy->getElementType(), RdxDesc->getFastMathFlags()); 9617 Constant *IdenVec = 9618 ConstantVector::getSplat(VecTy->getElementCount(), Iden); 9619 Value *Select = State.Builder.CreateSelect(NewCond, NewVecOp, IdenVec); 9620 NewVecOp = Select; 9621 } 9622 Value *NewRed; 9623 Value *NextInChain; 9624 if (IsOrdered) { 9625 if (State.VF.isVector()) 9626 NewRed = createOrderedReduction(State.Builder, *RdxDesc, NewVecOp, 9627 PrevInChain); 9628 else 9629 NewRed = State.Builder.CreateBinOp( 9630 (Instruction::BinaryOps)getUnderlyingInstr()->getOpcode(), 9631 PrevInChain, NewVecOp); 9632 PrevInChain = NewRed; 9633 } else { 9634 PrevInChain = State.get(getChainOp(), Part); 9635 NewRed = createTargetReduction(State.Builder, TTI, *RdxDesc, NewVecOp); 9636 } 9637 if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) { 9638 NextInChain = 9639 createMinMaxOp(State.Builder, RdxDesc->getRecurrenceKind(), 9640 NewRed, PrevInChain); 9641 } else if (IsOrdered) 9642 NextInChain = NewRed; 9643 else { 9644 NextInChain = State.Builder.CreateBinOp( 9645 (Instruction::BinaryOps)getUnderlyingInstr()->getOpcode(), NewRed, 9646 PrevInChain); 9647 } 9648 State.set(this, NextInChain, Part); 9649 } 9650 } 9651 9652 void VPReplicateRecipe::execute(VPTransformState &State) { 9653 if (State.Instance) { // Generate a single instance. 9654 assert(!State.VF.isScalable() && "Can't scalarize a scalable vector"); 9655 State.ILV->scalarizeInstruction(getUnderlyingInstr(), this, *this, 9656 *State.Instance, IsPredicated, State); 9657 // Insert scalar instance packing it into a vector. 9658 if (AlsoPack && State.VF.isVector()) { 9659 // If we're constructing lane 0, initialize to start from poison. 9660 if (State.Instance->Lane.isFirstLane()) { 9661 assert(!State.VF.isScalable() && "VF is assumed to be non scalable."); 9662 Value *Poison = PoisonValue::get( 9663 VectorType::get(getUnderlyingValue()->getType(), State.VF)); 9664 State.set(this, Poison, State.Instance->Part); 9665 } 9666 State.ILV->packScalarIntoVectorValue(this, *State.Instance, State); 9667 } 9668 return; 9669 } 9670 9671 // Generate scalar instances for all VF lanes of all UF parts, unless the 9672 // instruction is uniform inwhich case generate only the first lane for each 9673 // of the UF parts. 9674 unsigned EndLane = IsUniform ? 1 : State.VF.getKnownMinValue(); 9675 assert((!State.VF.isScalable() || IsUniform) && 9676 "Can't scalarize a scalable vector"); 9677 for (unsigned Part = 0; Part < State.UF; ++Part) 9678 for (unsigned Lane = 0; Lane < EndLane; ++Lane) 9679 State.ILV->scalarizeInstruction(getUnderlyingInstr(), this, *this, 9680 VPIteration(Part, Lane), IsPredicated, 9681 State); 9682 } 9683 9684 void VPBranchOnMaskRecipe::execute(VPTransformState &State) { 9685 assert(State.Instance && "Branch on Mask works only on single instance."); 9686 9687 unsigned Part = State.Instance->Part; 9688 unsigned Lane = State.Instance->Lane.getKnownLane(); 9689 9690 Value *ConditionBit = nullptr; 9691 VPValue *BlockInMask = getMask(); 9692 if (BlockInMask) { 9693 ConditionBit = State.get(BlockInMask, Part); 9694 if (ConditionBit->getType()->isVectorTy()) 9695 ConditionBit = State.Builder.CreateExtractElement( 9696 ConditionBit, State.Builder.getInt32(Lane)); 9697 } else // Block in mask is all-one. 9698 ConditionBit = State.Builder.getTrue(); 9699 9700 // Replace the temporary unreachable terminator with a new conditional branch, 9701 // whose two destinations will be set later when they are created. 9702 auto *CurrentTerminator = State.CFG.PrevBB->getTerminator(); 9703 assert(isa<UnreachableInst>(CurrentTerminator) && 9704 "Expected to replace unreachable terminator with conditional branch."); 9705 auto *CondBr = BranchInst::Create(State.CFG.PrevBB, nullptr, ConditionBit); 9706 CondBr->setSuccessor(0, nullptr); 9707 ReplaceInstWithInst(CurrentTerminator, CondBr); 9708 } 9709 9710 void VPPredInstPHIRecipe::execute(VPTransformState &State) { 9711 assert(State.Instance && "Predicated instruction PHI works per instance."); 9712 Instruction *ScalarPredInst = 9713 cast<Instruction>(State.get(getOperand(0), *State.Instance)); 9714 BasicBlock *PredicatedBB = ScalarPredInst->getParent(); 9715 BasicBlock *PredicatingBB = PredicatedBB->getSinglePredecessor(); 9716 assert(PredicatingBB && "Predicated block has no single predecessor."); 9717 assert(isa<VPReplicateRecipe>(getOperand(0)) && 9718 "operand must be VPReplicateRecipe"); 9719 9720 // By current pack/unpack logic we need to generate only a single phi node: if 9721 // a vector value for the predicated instruction exists at this point it means 9722 // the instruction has vector users only, and a phi for the vector value is 9723 // needed. In this case the recipe of the predicated instruction is marked to 9724 // also do that packing, thereby "hoisting" the insert-element sequence. 9725 // Otherwise, a phi node for the scalar value is needed. 9726 unsigned Part = State.Instance->Part; 9727 if (State.hasVectorValue(getOperand(0), Part)) { 9728 Value *VectorValue = State.get(getOperand(0), Part); 9729 InsertElementInst *IEI = cast<InsertElementInst>(VectorValue); 9730 PHINode *VPhi = State.Builder.CreatePHI(IEI->getType(), 2); 9731 VPhi->addIncoming(IEI->getOperand(0), PredicatingBB); // Unmodified vector. 9732 VPhi->addIncoming(IEI, PredicatedBB); // New vector with inserted element. 9733 if (State.hasVectorValue(this, Part)) 9734 State.reset(this, VPhi, Part); 9735 else 9736 State.set(this, VPhi, Part); 9737 // NOTE: Currently we need to update the value of the operand, so the next 9738 // predicated iteration inserts its generated value in the correct vector. 9739 State.reset(getOperand(0), VPhi, Part); 9740 } else { 9741 Type *PredInstType = getOperand(0)->getUnderlyingValue()->getType(); 9742 PHINode *Phi = State.Builder.CreatePHI(PredInstType, 2); 9743 Phi->addIncoming(PoisonValue::get(ScalarPredInst->getType()), 9744 PredicatingBB); 9745 Phi->addIncoming(ScalarPredInst, PredicatedBB); 9746 if (State.hasScalarValue(this, *State.Instance)) 9747 State.reset(this, Phi, *State.Instance); 9748 else 9749 State.set(this, Phi, *State.Instance); 9750 // NOTE: Currently we need to update the value of the operand, so the next 9751 // predicated iteration inserts its generated value in the correct vector. 9752 State.reset(getOperand(0), Phi, *State.Instance); 9753 } 9754 } 9755 9756 void VPWidenMemoryInstructionRecipe::execute(VPTransformState &State) { 9757 VPValue *StoredValue = isStore() ? getStoredValue() : nullptr; 9758 State.ILV->vectorizeMemoryInstruction( 9759 &Ingredient, State, StoredValue ? nullptr : getVPSingleValue(), getAddr(), 9760 StoredValue, getMask()); 9761 } 9762 9763 // Determine how to lower the scalar epilogue, which depends on 1) optimising 9764 // for minimum code-size, 2) predicate compiler options, 3) loop hints forcing 9765 // predication, and 4) a TTI hook that analyses whether the loop is suitable 9766 // for predication. 9767 static ScalarEpilogueLowering getScalarEpilogueLowering( 9768 Function *F, Loop *L, LoopVectorizeHints &Hints, ProfileSummaryInfo *PSI, 9769 BlockFrequencyInfo *BFI, TargetTransformInfo *TTI, TargetLibraryInfo *TLI, 9770 AssumptionCache *AC, LoopInfo *LI, ScalarEvolution *SE, DominatorTree *DT, 9771 LoopVectorizationLegality &LVL) { 9772 // 1) OptSize takes precedence over all other options, i.e. if this is set, 9773 // don't look at hints or options, and don't request a scalar epilogue. 9774 // (For PGSO, as shouldOptimizeForSize isn't currently accessible from 9775 // LoopAccessInfo (due to code dependency and not being able to reliably get 9776 // PSI/BFI from a loop analysis under NPM), we cannot suppress the collection 9777 // of strides in LoopAccessInfo::analyzeLoop() and vectorize without 9778 // versioning when the vectorization is forced, unlike hasOptSize. So revert 9779 // back to the old way and vectorize with versioning when forced. See D81345.) 9780 if (F->hasOptSize() || (llvm::shouldOptimizeForSize(L->getHeader(), PSI, BFI, 9781 PGSOQueryType::IRPass) && 9782 Hints.getForce() != LoopVectorizeHints::FK_Enabled)) 9783 return CM_ScalarEpilogueNotAllowedOptSize; 9784 9785 // 2) If set, obey the directives 9786 if (PreferPredicateOverEpilogue.getNumOccurrences()) { 9787 switch (PreferPredicateOverEpilogue) { 9788 case PreferPredicateTy::ScalarEpilogue: 9789 return CM_ScalarEpilogueAllowed; 9790 case PreferPredicateTy::PredicateElseScalarEpilogue: 9791 return CM_ScalarEpilogueNotNeededUsePredicate; 9792 case PreferPredicateTy::PredicateOrDontVectorize: 9793 return CM_ScalarEpilogueNotAllowedUsePredicate; 9794 }; 9795 } 9796 9797 // 3) If set, obey the hints 9798 switch (Hints.getPredicate()) { 9799 case LoopVectorizeHints::FK_Enabled: 9800 return CM_ScalarEpilogueNotNeededUsePredicate; 9801 case LoopVectorizeHints::FK_Disabled: 9802 return CM_ScalarEpilogueAllowed; 9803 }; 9804 9805 // 4) if the TTI hook indicates this is profitable, request predication. 9806 if (TTI->preferPredicateOverEpilogue(L, LI, *SE, *AC, TLI, DT, 9807 LVL.getLAI())) 9808 return CM_ScalarEpilogueNotNeededUsePredicate; 9809 9810 return CM_ScalarEpilogueAllowed; 9811 } 9812 9813 Value *VPTransformState::get(VPValue *Def, unsigned Part) { 9814 // If Values have been set for this Def return the one relevant for \p Part. 9815 if (hasVectorValue(Def, Part)) 9816 return Data.PerPartOutput[Def][Part]; 9817 9818 if (!hasScalarValue(Def, {Part, 0})) { 9819 Value *IRV = Def->getLiveInIRValue(); 9820 Value *B = ILV->getBroadcastInstrs(IRV); 9821 set(Def, B, Part); 9822 return B; 9823 } 9824 9825 Value *ScalarValue = get(Def, {Part, 0}); 9826 // If we aren't vectorizing, we can just copy the scalar map values over 9827 // to the vector map. 9828 if (VF.isScalar()) { 9829 set(Def, ScalarValue, Part); 9830 return ScalarValue; 9831 } 9832 9833 auto *RepR = dyn_cast<VPReplicateRecipe>(Def); 9834 bool IsUniform = RepR && RepR->isUniform(); 9835 9836 unsigned LastLane = IsUniform ? 0 : VF.getKnownMinValue() - 1; 9837 // Check if there is a scalar value for the selected lane. 9838 if (!hasScalarValue(Def, {Part, LastLane})) { 9839 // At the moment, VPWidenIntOrFpInductionRecipes can also be uniform. 9840 assert(isa<VPWidenIntOrFpInductionRecipe>(Def->getDef()) && 9841 "unexpected recipe found to be invariant"); 9842 IsUniform = true; 9843 LastLane = 0; 9844 } 9845 9846 auto *LastInst = cast<Instruction>(get(Def, {Part, LastLane})); 9847 // Set the insert point after the last scalarized instruction or after the 9848 // last PHI, if LastInst is a PHI. This ensures the insertelement sequence 9849 // will directly follow the scalar definitions. 9850 auto OldIP = Builder.saveIP(); 9851 auto NewIP = 9852 isa<PHINode>(LastInst) 9853 ? BasicBlock::iterator(LastInst->getParent()->getFirstNonPHI()) 9854 : std::next(BasicBlock::iterator(LastInst)); 9855 Builder.SetInsertPoint(&*NewIP); 9856 9857 // However, if we are vectorizing, we need to construct the vector values. 9858 // If the value is known to be uniform after vectorization, we can just 9859 // broadcast the scalar value corresponding to lane zero for each unroll 9860 // iteration. Otherwise, we construct the vector values using 9861 // insertelement instructions. Since the resulting vectors are stored in 9862 // State, we will only generate the insertelements once. 9863 Value *VectorValue = nullptr; 9864 if (IsUniform) { 9865 VectorValue = ILV->getBroadcastInstrs(ScalarValue); 9866 set(Def, VectorValue, Part); 9867 } else { 9868 // Initialize packing with insertelements to start from undef. 9869 assert(!VF.isScalable() && "VF is assumed to be non scalable."); 9870 Value *Undef = PoisonValue::get(VectorType::get(LastInst->getType(), VF)); 9871 set(Def, Undef, Part); 9872 for (unsigned Lane = 0; Lane < VF.getKnownMinValue(); ++Lane) 9873 ILV->packScalarIntoVectorValue(Def, {Part, Lane}, *this); 9874 VectorValue = get(Def, Part); 9875 } 9876 Builder.restoreIP(OldIP); 9877 return VectorValue; 9878 } 9879 9880 // Process the loop in the VPlan-native vectorization path. This path builds 9881 // VPlan upfront in the vectorization pipeline, which allows to apply 9882 // VPlan-to-VPlan transformations from the very beginning without modifying the 9883 // input LLVM IR. 9884 static bool processLoopInVPlanNativePath( 9885 Loop *L, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT, 9886 LoopVectorizationLegality *LVL, TargetTransformInfo *TTI, 9887 TargetLibraryInfo *TLI, DemandedBits *DB, AssumptionCache *AC, 9888 OptimizationRemarkEmitter *ORE, BlockFrequencyInfo *BFI, 9889 ProfileSummaryInfo *PSI, LoopVectorizeHints &Hints, 9890 LoopVectorizationRequirements &Requirements) { 9891 9892 if (isa<SCEVCouldNotCompute>(PSE.getBackedgeTakenCount())) { 9893 LLVM_DEBUG(dbgs() << "LV: cannot compute the outer-loop trip count\n"); 9894 return false; 9895 } 9896 assert(EnableVPlanNativePath && "VPlan-native path is disabled."); 9897 Function *F = L->getHeader()->getParent(); 9898 InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL->getLAI()); 9899 9900 ScalarEpilogueLowering SEL = getScalarEpilogueLowering( 9901 F, L, Hints, PSI, BFI, TTI, TLI, AC, LI, PSE.getSE(), DT, *LVL); 9902 9903 LoopVectorizationCostModel CM(SEL, L, PSE, LI, LVL, *TTI, TLI, DB, AC, ORE, F, 9904 &Hints, IAI); 9905 // Use the planner for outer loop vectorization. 9906 // TODO: CM is not used at this point inside the planner. Turn CM into an 9907 // optional argument if we don't need it in the future. 9908 LoopVectorizationPlanner LVP(L, LI, TLI, TTI, LVL, CM, IAI, PSE, Hints, 9909 Requirements, ORE); 9910 9911 // Get user vectorization factor. 9912 ElementCount UserVF = Hints.getWidth(); 9913 9914 CM.collectElementTypesForWidening(); 9915 9916 // Plan how to best vectorize, return the best VF and its cost. 9917 const VectorizationFactor VF = LVP.planInVPlanNativePath(UserVF); 9918 9919 // If we are stress testing VPlan builds, do not attempt to generate vector 9920 // code. Masked vector code generation support will follow soon. 9921 // Also, do not attempt to vectorize if no vector code will be produced. 9922 if (VPlanBuildStressTest || EnableVPlanPredication || 9923 VectorizationFactor::Disabled() == VF) 9924 return false; 9925 9926 LVP.setBestPlan(VF.Width, 1); 9927 9928 { 9929 GeneratedRTChecks Checks(*PSE.getSE(), DT, LI, 9930 F->getParent()->getDataLayout()); 9931 InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, 1, LVL, 9932 &CM, BFI, PSI, Checks); 9933 LLVM_DEBUG(dbgs() << "Vectorizing outer loop in \"" 9934 << L->getHeader()->getParent()->getName() << "\"\n"); 9935 LVP.executePlan(LB, DT); 9936 } 9937 9938 // Mark the loop as already vectorized to avoid vectorizing again. 9939 Hints.setAlreadyVectorized(); 9940 assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs())); 9941 return true; 9942 } 9943 9944 // Emit a remark if there are stores to floats that required a floating point 9945 // extension. If the vectorized loop was generated with floating point there 9946 // will be a performance penalty from the conversion overhead and the change in 9947 // the vector width. 9948 static void checkMixedPrecision(Loop *L, OptimizationRemarkEmitter *ORE) { 9949 SmallVector<Instruction *, 4> Worklist; 9950 for (BasicBlock *BB : L->getBlocks()) { 9951 for (Instruction &Inst : *BB) { 9952 if (auto *S = dyn_cast<StoreInst>(&Inst)) { 9953 if (S->getValueOperand()->getType()->isFloatTy()) 9954 Worklist.push_back(S); 9955 } 9956 } 9957 } 9958 9959 // Traverse the floating point stores upwards searching, for floating point 9960 // conversions. 9961 SmallPtrSet<const Instruction *, 4> Visited; 9962 SmallPtrSet<const Instruction *, 4> EmittedRemark; 9963 while (!Worklist.empty()) { 9964 auto *I = Worklist.pop_back_val(); 9965 if (!L->contains(I)) 9966 continue; 9967 if (!Visited.insert(I).second) 9968 continue; 9969 9970 // Emit a remark if the floating point store required a floating 9971 // point conversion. 9972 // TODO: More work could be done to identify the root cause such as a 9973 // constant or a function return type and point the user to it. 9974 if (isa<FPExtInst>(I) && EmittedRemark.insert(I).second) 9975 ORE->emit([&]() { 9976 return OptimizationRemarkAnalysis(LV_NAME, "VectorMixedPrecision", 9977 I->getDebugLoc(), L->getHeader()) 9978 << "floating point conversion changes vector width. " 9979 << "Mixed floating point precision requires an up/down " 9980 << "cast that will negatively impact performance."; 9981 }); 9982 9983 for (Use &Op : I->operands()) 9984 if (auto *OpI = dyn_cast<Instruction>(Op)) 9985 Worklist.push_back(OpI); 9986 } 9987 } 9988 9989 LoopVectorizePass::LoopVectorizePass(LoopVectorizeOptions Opts) 9990 : InterleaveOnlyWhenForced(Opts.InterleaveOnlyWhenForced || 9991 !EnableLoopInterleaving), 9992 VectorizeOnlyWhenForced(Opts.VectorizeOnlyWhenForced || 9993 !EnableLoopVectorization) {} 9994 9995 bool LoopVectorizePass::processLoop(Loop *L) { 9996 assert((EnableVPlanNativePath || L->isInnermost()) && 9997 "VPlan-native path is not enabled. Only process inner loops."); 9998 9999 #ifndef NDEBUG 10000 const std::string DebugLocStr = getDebugLocString(L); 10001 #endif /* NDEBUG */ 10002 10003 LLVM_DEBUG(dbgs() << "\nLV: Checking a loop in \"" 10004 << L->getHeader()->getParent()->getName() << "\" from " 10005 << DebugLocStr << "\n"); 10006 10007 LoopVectorizeHints Hints(L, InterleaveOnlyWhenForced, *ORE); 10008 10009 LLVM_DEBUG( 10010 dbgs() << "LV: Loop hints:" 10011 << " force=" 10012 << (Hints.getForce() == LoopVectorizeHints::FK_Disabled 10013 ? "disabled" 10014 : (Hints.getForce() == LoopVectorizeHints::FK_Enabled 10015 ? "enabled" 10016 : "?")) 10017 << " width=" << Hints.getWidth() 10018 << " interleave=" << Hints.getInterleave() << "\n"); 10019 10020 // Function containing loop 10021 Function *F = L->getHeader()->getParent(); 10022 10023 // Looking at the diagnostic output is the only way to determine if a loop 10024 // was vectorized (other than looking at the IR or machine code), so it 10025 // is important to generate an optimization remark for each loop. Most of 10026 // these messages are generated as OptimizationRemarkAnalysis. Remarks 10027 // generated as OptimizationRemark and OptimizationRemarkMissed are 10028 // less verbose reporting vectorized loops and unvectorized loops that may 10029 // benefit from vectorization, respectively. 10030 10031 if (!Hints.allowVectorization(F, L, VectorizeOnlyWhenForced)) { 10032 LLVM_DEBUG(dbgs() << "LV: Loop hints prevent vectorization.\n"); 10033 return false; 10034 } 10035 10036 PredicatedScalarEvolution PSE(*SE, *L); 10037 10038 // Check if it is legal to vectorize the loop. 10039 LoopVectorizationRequirements Requirements; 10040 LoopVectorizationLegality LVL(L, PSE, DT, TTI, TLI, AA, F, GetLAA, LI, ORE, 10041 &Requirements, &Hints, DB, AC, BFI, PSI); 10042 if (!LVL.canVectorize(EnableVPlanNativePath)) { 10043 LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Cannot prove legality.\n"); 10044 Hints.emitRemarkWithHints(); 10045 return false; 10046 } 10047 10048 // Check the function attributes and profiles to find out if this function 10049 // should be optimized for size. 10050 ScalarEpilogueLowering SEL = getScalarEpilogueLowering( 10051 F, L, Hints, PSI, BFI, TTI, TLI, AC, LI, PSE.getSE(), DT, LVL); 10052 10053 // Entrance to the VPlan-native vectorization path. Outer loops are processed 10054 // here. They may require CFG and instruction level transformations before 10055 // even evaluating whether vectorization is profitable. Since we cannot modify 10056 // the incoming IR, we need to build VPlan upfront in the vectorization 10057 // pipeline. 10058 if (!L->isInnermost()) 10059 return processLoopInVPlanNativePath(L, PSE, LI, DT, &LVL, TTI, TLI, DB, AC, 10060 ORE, BFI, PSI, Hints, Requirements); 10061 10062 assert(L->isInnermost() && "Inner loop expected."); 10063 10064 // Check the loop for a trip count threshold: vectorize loops with a tiny trip 10065 // count by optimizing for size, to minimize overheads. 10066 auto ExpectedTC = getSmallBestKnownTC(*SE, L); 10067 if (ExpectedTC && *ExpectedTC < TinyTripCountVectorThreshold) { 10068 LLVM_DEBUG(dbgs() << "LV: Found a loop with a very small trip count. " 10069 << "This loop is worth vectorizing only if no scalar " 10070 << "iteration overheads are incurred."); 10071 if (Hints.getForce() == LoopVectorizeHints::FK_Enabled) 10072 LLVM_DEBUG(dbgs() << " But vectorizing was explicitly forced.\n"); 10073 else { 10074 LLVM_DEBUG(dbgs() << "\n"); 10075 SEL = CM_ScalarEpilogueNotAllowedLowTripLoop; 10076 } 10077 } 10078 10079 // Check the function attributes to see if implicit floats are allowed. 10080 // FIXME: This check doesn't seem possibly correct -- what if the loop is 10081 // an integer loop and the vector instructions selected are purely integer 10082 // vector instructions? 10083 if (F->hasFnAttribute(Attribute::NoImplicitFloat)) { 10084 reportVectorizationFailure( 10085 "Can't vectorize when the NoImplicitFloat attribute is used", 10086 "loop not vectorized due to NoImplicitFloat attribute", 10087 "NoImplicitFloat", ORE, L); 10088 Hints.emitRemarkWithHints(); 10089 return false; 10090 } 10091 10092 // Check if the target supports potentially unsafe FP vectorization. 10093 // FIXME: Add a check for the type of safety issue (denormal, signaling) 10094 // for the target we're vectorizing for, to make sure none of the 10095 // additional fp-math flags can help. 10096 if (Hints.isPotentiallyUnsafe() && 10097 TTI->isFPVectorizationPotentiallyUnsafe()) { 10098 reportVectorizationFailure( 10099 "Potentially unsafe FP op prevents vectorization", 10100 "loop not vectorized due to unsafe FP support.", 10101 "UnsafeFP", ORE, L); 10102 Hints.emitRemarkWithHints(); 10103 return false; 10104 } 10105 10106 if (!LVL.canVectorizeFPMath(EnableStrictReductions)) { 10107 ORE->emit([&]() { 10108 auto *ExactFPMathInst = Requirements.getExactFPInst(); 10109 return OptimizationRemarkAnalysisFPCommute(DEBUG_TYPE, "CantReorderFPOps", 10110 ExactFPMathInst->getDebugLoc(), 10111 ExactFPMathInst->getParent()) 10112 << "loop not vectorized: cannot prove it is safe to reorder " 10113 "floating-point operations"; 10114 }); 10115 LLVM_DEBUG(dbgs() << "LV: loop not vectorized: cannot prove it is safe to " 10116 "reorder floating-point operations\n"); 10117 Hints.emitRemarkWithHints(); 10118 return false; 10119 } 10120 10121 bool UseInterleaved = TTI->enableInterleavedAccessVectorization(); 10122 InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL.getLAI()); 10123 10124 // If an override option has been passed in for interleaved accesses, use it. 10125 if (EnableInterleavedMemAccesses.getNumOccurrences() > 0) 10126 UseInterleaved = EnableInterleavedMemAccesses; 10127 10128 // Analyze interleaved memory accesses. 10129 if (UseInterleaved) { 10130 IAI.analyzeInterleaving(useMaskedInterleavedAccesses(*TTI)); 10131 } 10132 10133 // Use the cost model. 10134 LoopVectorizationCostModel CM(SEL, L, PSE, LI, &LVL, *TTI, TLI, DB, AC, ORE, 10135 F, &Hints, IAI); 10136 CM.collectValuesToIgnore(); 10137 CM.collectElementTypesForWidening(); 10138 10139 // Use the planner for vectorization. 10140 LoopVectorizationPlanner LVP(L, LI, TLI, TTI, &LVL, CM, IAI, PSE, Hints, 10141 Requirements, ORE); 10142 10143 // Get user vectorization factor and interleave count. 10144 ElementCount UserVF = Hints.getWidth(); 10145 unsigned UserIC = Hints.getInterleave(); 10146 10147 // Plan how to best vectorize, return the best VF and its cost. 10148 Optional<VectorizationFactor> MaybeVF = LVP.plan(UserVF, UserIC); 10149 10150 VectorizationFactor VF = VectorizationFactor::Disabled(); 10151 unsigned IC = 1; 10152 10153 if (MaybeVF) { 10154 VF = *MaybeVF; 10155 // Select the interleave count. 10156 IC = CM.selectInterleaveCount(VF.Width, *VF.Cost.getValue()); 10157 } 10158 10159 // Identify the diagnostic messages that should be produced. 10160 std::pair<StringRef, std::string> VecDiagMsg, IntDiagMsg; 10161 bool VectorizeLoop = true, InterleaveLoop = true; 10162 if (VF.Width.isScalar()) { 10163 LLVM_DEBUG(dbgs() << "LV: Vectorization is possible but not beneficial.\n"); 10164 VecDiagMsg = std::make_pair( 10165 "VectorizationNotBeneficial", 10166 "the cost-model indicates that vectorization is not beneficial"); 10167 VectorizeLoop = false; 10168 } 10169 10170 if (!MaybeVF && UserIC > 1) { 10171 // Tell the user interleaving was avoided up-front, despite being explicitly 10172 // requested. 10173 LLVM_DEBUG(dbgs() << "LV: Ignoring UserIC, because vectorization and " 10174 "interleaving should be avoided up front\n"); 10175 IntDiagMsg = std::make_pair( 10176 "InterleavingAvoided", 10177 "Ignoring UserIC, because interleaving was avoided up front"); 10178 InterleaveLoop = false; 10179 } else if (IC == 1 && UserIC <= 1) { 10180 // Tell the user interleaving is not beneficial. 10181 LLVM_DEBUG(dbgs() << "LV: Interleaving is not beneficial.\n"); 10182 IntDiagMsg = std::make_pair( 10183 "InterleavingNotBeneficial", 10184 "the cost-model indicates that interleaving is not beneficial"); 10185 InterleaveLoop = false; 10186 if (UserIC == 1) { 10187 IntDiagMsg.first = "InterleavingNotBeneficialAndDisabled"; 10188 IntDiagMsg.second += 10189 " and is explicitly disabled or interleave count is set to 1"; 10190 } 10191 } else if (IC > 1 && UserIC == 1) { 10192 // Tell the user interleaving is beneficial, but it explicitly disabled. 10193 LLVM_DEBUG( 10194 dbgs() << "LV: Interleaving is beneficial but is explicitly disabled."); 10195 IntDiagMsg = std::make_pair( 10196 "InterleavingBeneficialButDisabled", 10197 "the cost-model indicates that interleaving is beneficial " 10198 "but is explicitly disabled or interleave count is set to 1"); 10199 InterleaveLoop = false; 10200 } 10201 10202 // Override IC if user provided an interleave count. 10203 IC = UserIC > 0 ? UserIC : IC; 10204 10205 // Emit diagnostic messages, if any. 10206 const char *VAPassName = Hints.vectorizeAnalysisPassName(); 10207 if (!VectorizeLoop && !InterleaveLoop) { 10208 // Do not vectorize or interleaving the loop. 10209 ORE->emit([&]() { 10210 return OptimizationRemarkMissed(VAPassName, VecDiagMsg.first, 10211 L->getStartLoc(), L->getHeader()) 10212 << VecDiagMsg.second; 10213 }); 10214 ORE->emit([&]() { 10215 return OptimizationRemarkMissed(LV_NAME, IntDiagMsg.first, 10216 L->getStartLoc(), L->getHeader()) 10217 << IntDiagMsg.second; 10218 }); 10219 return false; 10220 } else if (!VectorizeLoop && InterleaveLoop) { 10221 LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n'); 10222 ORE->emit([&]() { 10223 return OptimizationRemarkAnalysis(VAPassName, VecDiagMsg.first, 10224 L->getStartLoc(), L->getHeader()) 10225 << VecDiagMsg.second; 10226 }); 10227 } else if (VectorizeLoop && !InterleaveLoop) { 10228 LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width 10229 << ") in " << DebugLocStr << '\n'); 10230 ORE->emit([&]() { 10231 return OptimizationRemarkAnalysis(LV_NAME, IntDiagMsg.first, 10232 L->getStartLoc(), L->getHeader()) 10233 << IntDiagMsg.second; 10234 }); 10235 } else if (VectorizeLoop && InterleaveLoop) { 10236 LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width 10237 << ") in " << DebugLocStr << '\n'); 10238 LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n'); 10239 } 10240 10241 bool DisableRuntimeUnroll = false; 10242 MDNode *OrigLoopID = L->getLoopID(); 10243 { 10244 // Optimistically generate runtime checks. Drop them if they turn out to not 10245 // be profitable. Limit the scope of Checks, so the cleanup happens 10246 // immediately after vector codegeneration is done. 10247 GeneratedRTChecks Checks(*PSE.getSE(), DT, LI, 10248 F->getParent()->getDataLayout()); 10249 if (!VF.Width.isScalar() || IC > 1) 10250 Checks.Create(L, *LVL.getLAI(), PSE.getUnionPredicate()); 10251 LVP.setBestPlan(VF.Width, IC); 10252 10253 using namespace ore; 10254 if (!VectorizeLoop) { 10255 assert(IC > 1 && "interleave count should not be 1 or 0"); 10256 // If we decided that it is not legal to vectorize the loop, then 10257 // interleave it. 10258 InnerLoopUnroller Unroller(L, PSE, LI, DT, TLI, TTI, AC, ORE, IC, &LVL, 10259 &CM, BFI, PSI, Checks); 10260 LVP.executePlan(Unroller, DT); 10261 10262 ORE->emit([&]() { 10263 return OptimizationRemark(LV_NAME, "Interleaved", L->getStartLoc(), 10264 L->getHeader()) 10265 << "interleaved loop (interleaved count: " 10266 << NV("InterleaveCount", IC) << ")"; 10267 }); 10268 } else { 10269 // If we decided that it is *legal* to vectorize the loop, then do it. 10270 10271 // Consider vectorizing the epilogue too if it's profitable. 10272 VectorizationFactor EpilogueVF = 10273 CM.selectEpilogueVectorizationFactor(VF.Width, LVP); 10274 if (EpilogueVF.Width.isVector()) { 10275 10276 // The first pass vectorizes the main loop and creates a scalar epilogue 10277 // to be vectorized by executing the plan (potentially with a different 10278 // factor) again shortly afterwards. 10279 EpilogueLoopVectorizationInfo EPI(VF.Width.getKnownMinValue(), IC, 10280 EpilogueVF.Width.getKnownMinValue(), 10281 1); 10282 EpilogueVectorizerMainLoop MainILV(L, PSE, LI, DT, TLI, TTI, AC, ORE, 10283 EPI, &LVL, &CM, BFI, PSI, Checks); 10284 10285 LVP.setBestPlan(EPI.MainLoopVF, EPI.MainLoopUF); 10286 LVP.executePlan(MainILV, DT); 10287 ++LoopsVectorized; 10288 10289 simplifyLoop(L, DT, LI, SE, AC, nullptr, false /* PreserveLCSSA */); 10290 formLCSSARecursively(*L, *DT, LI, SE); 10291 10292 // Second pass vectorizes the epilogue and adjusts the control flow 10293 // edges from the first pass. 10294 LVP.setBestPlan(EPI.EpilogueVF, EPI.EpilogueUF); 10295 EPI.MainLoopVF = EPI.EpilogueVF; 10296 EPI.MainLoopUF = EPI.EpilogueUF; 10297 EpilogueVectorizerEpilogueLoop EpilogILV(L, PSE, LI, DT, TLI, TTI, AC, 10298 ORE, EPI, &LVL, &CM, BFI, PSI, 10299 Checks); 10300 LVP.executePlan(EpilogILV, DT); 10301 ++LoopsEpilogueVectorized; 10302 10303 if (!MainILV.areSafetyChecksAdded()) 10304 DisableRuntimeUnroll = true; 10305 } else { 10306 InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, IC, 10307 &LVL, &CM, BFI, PSI, Checks); 10308 LVP.executePlan(LB, DT); 10309 ++LoopsVectorized; 10310 10311 // Add metadata to disable runtime unrolling a scalar loop when there 10312 // are no runtime checks about strides and memory. A scalar loop that is 10313 // rarely used is not worth unrolling. 10314 if (!LB.areSafetyChecksAdded()) 10315 DisableRuntimeUnroll = true; 10316 } 10317 // Report the vectorization decision. 10318 ORE->emit([&]() { 10319 return OptimizationRemark(LV_NAME, "Vectorized", L->getStartLoc(), 10320 L->getHeader()) 10321 << "vectorized loop (vectorization width: " 10322 << NV("VectorizationFactor", VF.Width) 10323 << ", interleaved count: " << NV("InterleaveCount", IC) << ")"; 10324 }); 10325 } 10326 10327 if (ORE->allowExtraAnalysis(LV_NAME)) 10328 checkMixedPrecision(L, ORE); 10329 } 10330 10331 Optional<MDNode *> RemainderLoopID = 10332 makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll, 10333 LLVMLoopVectorizeFollowupEpilogue}); 10334 if (RemainderLoopID.hasValue()) { 10335 L->setLoopID(RemainderLoopID.getValue()); 10336 } else { 10337 if (DisableRuntimeUnroll) 10338 AddRuntimeUnrollDisableMetaData(L); 10339 10340 // Mark the loop as already vectorized to avoid vectorizing again. 10341 Hints.setAlreadyVectorized(); 10342 } 10343 10344 assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs())); 10345 return true; 10346 } 10347 10348 LoopVectorizeResult LoopVectorizePass::runImpl( 10349 Function &F, ScalarEvolution &SE_, LoopInfo &LI_, TargetTransformInfo &TTI_, 10350 DominatorTree &DT_, BlockFrequencyInfo &BFI_, TargetLibraryInfo *TLI_, 10351 DemandedBits &DB_, AAResults &AA_, AssumptionCache &AC_, 10352 std::function<const LoopAccessInfo &(Loop &)> &GetLAA_, 10353 OptimizationRemarkEmitter &ORE_, ProfileSummaryInfo *PSI_) { 10354 SE = &SE_; 10355 LI = &LI_; 10356 TTI = &TTI_; 10357 DT = &DT_; 10358 BFI = &BFI_; 10359 TLI = TLI_; 10360 AA = &AA_; 10361 AC = &AC_; 10362 GetLAA = &GetLAA_; 10363 DB = &DB_; 10364 ORE = &ORE_; 10365 PSI = PSI_; 10366 10367 // Don't attempt if 10368 // 1. the target claims to have no vector registers, and 10369 // 2. interleaving won't help ILP. 10370 // 10371 // The second condition is necessary because, even if the target has no 10372 // vector registers, loop vectorization may still enable scalar 10373 // interleaving. 10374 if (!TTI->getNumberOfRegisters(TTI->getRegisterClassForType(true)) && 10375 TTI->getMaxInterleaveFactor(1) < 2) 10376 return LoopVectorizeResult(false, false); 10377 10378 bool Changed = false, CFGChanged = false; 10379 10380 // The vectorizer requires loops to be in simplified form. 10381 // Since simplification may add new inner loops, it has to run before the 10382 // legality and profitability checks. This means running the loop vectorizer 10383 // will simplify all loops, regardless of whether anything end up being 10384 // vectorized. 10385 for (auto &L : *LI) 10386 Changed |= CFGChanged |= 10387 simplifyLoop(L, DT, LI, SE, AC, nullptr, false /* PreserveLCSSA */); 10388 10389 // Build up a worklist of inner-loops to vectorize. This is necessary as 10390 // the act of vectorizing or partially unrolling a loop creates new loops 10391 // and can invalidate iterators across the loops. 10392 SmallVector<Loop *, 8> Worklist; 10393 10394 for (Loop *L : *LI) 10395 collectSupportedLoops(*L, LI, ORE, Worklist); 10396 10397 LoopsAnalyzed += Worklist.size(); 10398 10399 // Now walk the identified inner loops. 10400 while (!Worklist.empty()) { 10401 Loop *L = Worklist.pop_back_val(); 10402 10403 // For the inner loops we actually process, form LCSSA to simplify the 10404 // transform. 10405 Changed |= formLCSSARecursively(*L, *DT, LI, SE); 10406 10407 Changed |= CFGChanged |= processLoop(L); 10408 } 10409 10410 // Process each loop nest in the function. 10411 return LoopVectorizeResult(Changed, CFGChanged); 10412 } 10413 10414 PreservedAnalyses LoopVectorizePass::run(Function &F, 10415 FunctionAnalysisManager &AM) { 10416 auto &SE = AM.getResult<ScalarEvolutionAnalysis>(F); 10417 auto &LI = AM.getResult<LoopAnalysis>(F); 10418 auto &TTI = AM.getResult<TargetIRAnalysis>(F); 10419 auto &DT = AM.getResult<DominatorTreeAnalysis>(F); 10420 auto &BFI = AM.getResult<BlockFrequencyAnalysis>(F); 10421 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F); 10422 auto &AA = AM.getResult<AAManager>(F); 10423 auto &AC = AM.getResult<AssumptionAnalysis>(F); 10424 auto &DB = AM.getResult<DemandedBitsAnalysis>(F); 10425 auto &ORE = AM.getResult<OptimizationRemarkEmitterAnalysis>(F); 10426 MemorySSA *MSSA = EnableMSSALoopDependency 10427 ? &AM.getResult<MemorySSAAnalysis>(F).getMSSA() 10428 : nullptr; 10429 10430 auto &LAM = AM.getResult<LoopAnalysisManagerFunctionProxy>(F).getManager(); 10431 std::function<const LoopAccessInfo &(Loop &)> GetLAA = 10432 [&](Loop &L) -> const LoopAccessInfo & { 10433 LoopStandardAnalysisResults AR = {AA, AC, DT, LI, SE, 10434 TLI, TTI, nullptr, MSSA}; 10435 return LAM.getResult<LoopAccessAnalysis>(L, AR); 10436 }; 10437 auto &MAMProxy = AM.getResult<ModuleAnalysisManagerFunctionProxy>(F); 10438 ProfileSummaryInfo *PSI = 10439 MAMProxy.getCachedResult<ProfileSummaryAnalysis>(*F.getParent()); 10440 LoopVectorizeResult Result = 10441 runImpl(F, SE, LI, TTI, DT, BFI, &TLI, DB, AA, AC, GetLAA, ORE, PSI); 10442 if (!Result.MadeAnyChange) 10443 return PreservedAnalyses::all(); 10444 PreservedAnalyses PA; 10445 10446 // We currently do not preserve loopinfo/dominator analyses with outer loop 10447 // vectorization. Until this is addressed, mark these analyses as preserved 10448 // only for non-VPlan-native path. 10449 // TODO: Preserve Loop and Dominator analyses for VPlan-native path. 10450 if (!EnableVPlanNativePath) { 10451 PA.preserve<LoopAnalysis>(); 10452 PA.preserve<DominatorTreeAnalysis>(); 10453 } 10454 if (!Result.MadeCFGChange) 10455 PA.preserveSet<CFGAnalyses>(); 10456 return PA; 10457 } 10458