1 //===- LoopVectorize.cpp - A Loop Vectorizer ------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This is the LLVM loop vectorizer. This pass modifies 'vectorizable' loops 10 // and generates target-independent LLVM-IR. 11 // The vectorizer uses the TargetTransformInfo analysis to estimate the costs 12 // of instructions in order to estimate the profitability of vectorization. 13 // 14 // The loop vectorizer combines consecutive loop iterations into a single 15 // 'wide' iteration. After this transformation the index is incremented 16 // by the SIMD vector width, and not by one. 17 // 18 // This pass has three parts: 19 // 1. The main loop pass that drives the different parts. 20 // 2. LoopVectorizationLegality - A unit that checks for the legality 21 // of the vectorization. 22 // 3. InnerLoopVectorizer - A unit that performs the actual 23 // widening of instructions. 24 // 4. LoopVectorizationCostModel - A unit that checks for the profitability 25 // of vectorization. It decides on the optimal vector width, which 26 // can be one, if vectorization is not profitable. 27 // 28 // There is a development effort going on to migrate loop vectorizer to the 29 // VPlan infrastructure and to introduce outer loop vectorization support (see 30 // docs/Proposal/VectorizationPlan.rst and 31 // http://lists.llvm.org/pipermail/llvm-dev/2017-December/119523.html). For this 32 // purpose, we temporarily introduced the VPlan-native vectorization path: an 33 // alternative vectorization path that is natively implemented on top of the 34 // VPlan infrastructure. See EnableVPlanNativePath for enabling. 35 // 36 //===----------------------------------------------------------------------===// 37 // 38 // The reduction-variable vectorization is based on the paper: 39 // D. Nuzman and R. Henderson. Multi-platform Auto-vectorization. 40 // 41 // Variable uniformity checks are inspired by: 42 // Karrenberg, R. and Hack, S. Whole Function Vectorization. 43 // 44 // The interleaved access vectorization is based on the paper: 45 // Dorit Nuzman, Ira Rosen and Ayal Zaks. Auto-Vectorization of Interleaved 46 // Data for SIMD 47 // 48 // Other ideas/concepts are from: 49 // A. Zaks and D. Nuzman. Autovectorization in GCC-two years later. 50 // 51 // S. Maleki, Y. Gao, M. Garzaran, T. Wong and D. Padua. An Evaluation of 52 // Vectorizing Compilers. 53 // 54 //===----------------------------------------------------------------------===// 55 56 #include "llvm/Transforms/Vectorize/LoopVectorize.h" 57 #include "LoopVectorizationPlanner.h" 58 #include "VPRecipeBuilder.h" 59 #include "VPlan.h" 60 #include "VPlanHCFGBuilder.h" 61 #include "VPlanPredicator.h" 62 #include "VPlanTransforms.h" 63 #include "llvm/ADT/APInt.h" 64 #include "llvm/ADT/ArrayRef.h" 65 #include "llvm/ADT/DenseMap.h" 66 #include "llvm/ADT/DenseMapInfo.h" 67 #include "llvm/ADT/Hashing.h" 68 #include "llvm/ADT/MapVector.h" 69 #include "llvm/ADT/None.h" 70 #include "llvm/ADT/Optional.h" 71 #include "llvm/ADT/STLExtras.h" 72 #include "llvm/ADT/SmallPtrSet.h" 73 #include "llvm/ADT/SmallSet.h" 74 #include "llvm/ADT/SmallVector.h" 75 #include "llvm/ADT/Statistic.h" 76 #include "llvm/ADT/StringRef.h" 77 #include "llvm/ADT/Twine.h" 78 #include "llvm/ADT/iterator_range.h" 79 #include "llvm/Analysis/AssumptionCache.h" 80 #include "llvm/Analysis/BasicAliasAnalysis.h" 81 #include "llvm/Analysis/BlockFrequencyInfo.h" 82 #include "llvm/Analysis/CFG.h" 83 #include "llvm/Analysis/CodeMetrics.h" 84 #include "llvm/Analysis/DemandedBits.h" 85 #include "llvm/Analysis/GlobalsModRef.h" 86 #include "llvm/Analysis/LoopAccessAnalysis.h" 87 #include "llvm/Analysis/LoopAnalysisManager.h" 88 #include "llvm/Analysis/LoopInfo.h" 89 #include "llvm/Analysis/LoopIterator.h" 90 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 91 #include "llvm/Analysis/ProfileSummaryInfo.h" 92 #include "llvm/Analysis/ScalarEvolution.h" 93 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 94 #include "llvm/Analysis/TargetLibraryInfo.h" 95 #include "llvm/Analysis/TargetTransformInfo.h" 96 #include "llvm/Analysis/VectorUtils.h" 97 #include "llvm/IR/Attributes.h" 98 #include "llvm/IR/BasicBlock.h" 99 #include "llvm/IR/CFG.h" 100 #include "llvm/IR/Constant.h" 101 #include "llvm/IR/Constants.h" 102 #include "llvm/IR/DataLayout.h" 103 #include "llvm/IR/DebugInfoMetadata.h" 104 #include "llvm/IR/DebugLoc.h" 105 #include "llvm/IR/DerivedTypes.h" 106 #include "llvm/IR/DiagnosticInfo.h" 107 #include "llvm/IR/Dominators.h" 108 #include "llvm/IR/Function.h" 109 #include "llvm/IR/IRBuilder.h" 110 #include "llvm/IR/InstrTypes.h" 111 #include "llvm/IR/Instruction.h" 112 #include "llvm/IR/Instructions.h" 113 #include "llvm/IR/IntrinsicInst.h" 114 #include "llvm/IR/Intrinsics.h" 115 #include "llvm/IR/LLVMContext.h" 116 #include "llvm/IR/Metadata.h" 117 #include "llvm/IR/Module.h" 118 #include "llvm/IR/Operator.h" 119 #include "llvm/IR/PatternMatch.h" 120 #include "llvm/IR/Type.h" 121 #include "llvm/IR/Use.h" 122 #include "llvm/IR/User.h" 123 #include "llvm/IR/Value.h" 124 #include "llvm/IR/ValueHandle.h" 125 #include "llvm/IR/Verifier.h" 126 #include "llvm/InitializePasses.h" 127 #include "llvm/Pass.h" 128 #include "llvm/Support/Casting.h" 129 #include "llvm/Support/CommandLine.h" 130 #include "llvm/Support/Compiler.h" 131 #include "llvm/Support/Debug.h" 132 #include "llvm/Support/ErrorHandling.h" 133 #include "llvm/Support/InstructionCost.h" 134 #include "llvm/Support/MathExtras.h" 135 #include "llvm/Support/raw_ostream.h" 136 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 137 #include "llvm/Transforms/Utils/InjectTLIMappings.h" 138 #include "llvm/Transforms/Utils/LoopSimplify.h" 139 #include "llvm/Transforms/Utils/LoopUtils.h" 140 #include "llvm/Transforms/Utils/LoopVersioning.h" 141 #include "llvm/Transforms/Utils/ScalarEvolutionExpander.h" 142 #include "llvm/Transforms/Utils/SizeOpts.h" 143 #include "llvm/Transforms/Vectorize/LoopVectorizationLegality.h" 144 #include <algorithm> 145 #include <cassert> 146 #include <cstdint> 147 #include <cstdlib> 148 #include <functional> 149 #include <iterator> 150 #include <limits> 151 #include <memory> 152 #include <string> 153 #include <tuple> 154 #include <utility> 155 156 using namespace llvm; 157 158 #define LV_NAME "loop-vectorize" 159 #define DEBUG_TYPE LV_NAME 160 161 #ifndef NDEBUG 162 const char VerboseDebug[] = DEBUG_TYPE "-verbose"; 163 #endif 164 165 /// @{ 166 /// Metadata attribute names 167 const char LLVMLoopVectorizeFollowupAll[] = "llvm.loop.vectorize.followup_all"; 168 const char LLVMLoopVectorizeFollowupVectorized[] = 169 "llvm.loop.vectorize.followup_vectorized"; 170 const char LLVMLoopVectorizeFollowupEpilogue[] = 171 "llvm.loop.vectorize.followup_epilogue"; 172 /// @} 173 174 STATISTIC(LoopsVectorized, "Number of loops vectorized"); 175 STATISTIC(LoopsAnalyzed, "Number of loops analyzed for vectorization"); 176 STATISTIC(LoopsEpilogueVectorized, "Number of epilogues vectorized"); 177 178 static cl::opt<bool> EnableEpilogueVectorization( 179 "enable-epilogue-vectorization", cl::init(true), cl::Hidden, 180 cl::desc("Enable vectorization of epilogue loops.")); 181 182 static cl::opt<unsigned> EpilogueVectorizationForceVF( 183 "epilogue-vectorization-force-VF", cl::init(1), cl::Hidden, 184 cl::desc("When epilogue vectorization is enabled, and a value greater than " 185 "1 is specified, forces the given VF for all applicable epilogue " 186 "loops.")); 187 188 static cl::opt<unsigned> EpilogueVectorizationMinVF( 189 "epilogue-vectorization-minimum-VF", cl::init(16), cl::Hidden, 190 cl::desc("Only loops with vectorization factor equal to or larger than " 191 "the specified value are considered for epilogue vectorization.")); 192 193 /// Loops with a known constant trip count below this number are vectorized only 194 /// if no scalar iteration overheads are incurred. 195 static cl::opt<unsigned> TinyTripCountVectorThreshold( 196 "vectorizer-min-trip-count", cl::init(16), cl::Hidden, 197 cl::desc("Loops with a constant trip count that is smaller than this " 198 "value are vectorized only if no scalar iteration overheads " 199 "are incurred.")); 200 201 static cl::opt<unsigned> PragmaVectorizeMemoryCheckThreshold( 202 "pragma-vectorize-memory-check-threshold", cl::init(128), cl::Hidden, 203 cl::desc("The maximum allowed number of runtime memory checks with a " 204 "vectorize(enable) pragma.")); 205 206 // Option prefer-predicate-over-epilogue indicates that an epilogue is undesired, 207 // that predication is preferred, and this lists all options. I.e., the 208 // vectorizer will try to fold the tail-loop (epilogue) into the vector body 209 // and predicate the instructions accordingly. If tail-folding fails, there are 210 // different fallback strategies depending on these values: 211 namespace PreferPredicateTy { 212 enum Option { 213 ScalarEpilogue = 0, 214 PredicateElseScalarEpilogue, 215 PredicateOrDontVectorize 216 }; 217 } // namespace PreferPredicateTy 218 219 static cl::opt<PreferPredicateTy::Option> PreferPredicateOverEpilogue( 220 "prefer-predicate-over-epilogue", 221 cl::init(PreferPredicateTy::ScalarEpilogue), 222 cl::Hidden, 223 cl::desc("Tail-folding and predication preferences over creating a scalar " 224 "epilogue loop."), 225 cl::values(clEnumValN(PreferPredicateTy::ScalarEpilogue, 226 "scalar-epilogue", 227 "Don't tail-predicate loops, create scalar epilogue"), 228 clEnumValN(PreferPredicateTy::PredicateElseScalarEpilogue, 229 "predicate-else-scalar-epilogue", 230 "prefer tail-folding, create scalar epilogue if tail " 231 "folding fails."), 232 clEnumValN(PreferPredicateTy::PredicateOrDontVectorize, 233 "predicate-dont-vectorize", 234 "prefers tail-folding, don't attempt vectorization if " 235 "tail-folding fails."))); 236 237 static cl::opt<bool> MaximizeBandwidth( 238 "vectorizer-maximize-bandwidth", cl::init(false), cl::Hidden, 239 cl::desc("Maximize bandwidth when selecting vectorization factor which " 240 "will be determined by the smallest type in loop.")); 241 242 static cl::opt<bool> EnableInterleavedMemAccesses( 243 "enable-interleaved-mem-accesses", cl::init(false), cl::Hidden, 244 cl::desc("Enable vectorization on interleaved memory accesses in a loop")); 245 246 /// An interleave-group may need masking if it resides in a block that needs 247 /// predication, or in order to mask away gaps. 248 static cl::opt<bool> EnableMaskedInterleavedMemAccesses( 249 "enable-masked-interleaved-mem-accesses", cl::init(false), cl::Hidden, 250 cl::desc("Enable vectorization on masked interleaved memory accesses in a loop")); 251 252 static cl::opt<unsigned> TinyTripCountInterleaveThreshold( 253 "tiny-trip-count-interleave-threshold", cl::init(128), cl::Hidden, 254 cl::desc("We don't interleave loops with a estimated constant trip count " 255 "below this number")); 256 257 static cl::opt<unsigned> ForceTargetNumScalarRegs( 258 "force-target-num-scalar-regs", cl::init(0), cl::Hidden, 259 cl::desc("A flag that overrides the target's number of scalar registers.")); 260 261 static cl::opt<unsigned> ForceTargetNumVectorRegs( 262 "force-target-num-vector-regs", cl::init(0), cl::Hidden, 263 cl::desc("A flag that overrides the target's number of vector registers.")); 264 265 static cl::opt<unsigned> ForceTargetMaxScalarInterleaveFactor( 266 "force-target-max-scalar-interleave", cl::init(0), cl::Hidden, 267 cl::desc("A flag that overrides the target's max interleave factor for " 268 "scalar loops.")); 269 270 static cl::opt<unsigned> ForceTargetMaxVectorInterleaveFactor( 271 "force-target-max-vector-interleave", cl::init(0), cl::Hidden, 272 cl::desc("A flag that overrides the target's max interleave factor for " 273 "vectorized loops.")); 274 275 static cl::opt<unsigned> ForceTargetInstructionCost( 276 "force-target-instruction-cost", cl::init(0), cl::Hidden, 277 cl::desc("A flag that overrides the target's expected cost for " 278 "an instruction to a single constant value. Mostly " 279 "useful for getting consistent testing.")); 280 281 static cl::opt<bool> ForceTargetSupportsScalableVectors( 282 "force-target-supports-scalable-vectors", cl::init(false), cl::Hidden, 283 cl::desc( 284 "Pretend that scalable vectors are supported, even if the target does " 285 "not support them. This flag should only be used for testing.")); 286 287 static cl::opt<unsigned> SmallLoopCost( 288 "small-loop-cost", cl::init(20), cl::Hidden, 289 cl::desc( 290 "The cost of a loop that is considered 'small' by the interleaver.")); 291 292 static cl::opt<bool> LoopVectorizeWithBlockFrequency( 293 "loop-vectorize-with-block-frequency", cl::init(true), cl::Hidden, 294 cl::desc("Enable the use of the block frequency analysis to access PGO " 295 "heuristics minimizing code growth in cold regions and being more " 296 "aggressive in hot regions.")); 297 298 // Runtime interleave loops for load/store throughput. 299 static cl::opt<bool> EnableLoadStoreRuntimeInterleave( 300 "enable-loadstore-runtime-interleave", cl::init(true), cl::Hidden, 301 cl::desc( 302 "Enable runtime interleaving until load/store ports are saturated")); 303 304 /// Interleave small loops with scalar reductions. 305 static cl::opt<bool> InterleaveSmallLoopScalarReduction( 306 "interleave-small-loop-scalar-reduction", cl::init(false), cl::Hidden, 307 cl::desc("Enable interleaving for loops with small iteration counts that " 308 "contain scalar reductions to expose ILP.")); 309 310 /// The number of stores in a loop that are allowed to need predication. 311 static cl::opt<unsigned> NumberOfStoresToPredicate( 312 "vectorize-num-stores-pred", cl::init(1), cl::Hidden, 313 cl::desc("Max number of stores to be predicated behind an if.")); 314 315 static cl::opt<bool> EnableIndVarRegisterHeur( 316 "enable-ind-var-reg-heur", cl::init(true), cl::Hidden, 317 cl::desc("Count the induction variable only once when interleaving")); 318 319 static cl::opt<bool> EnableCondStoresVectorization( 320 "enable-cond-stores-vec", cl::init(true), cl::Hidden, 321 cl::desc("Enable if predication of stores during vectorization.")); 322 323 static cl::opt<unsigned> MaxNestedScalarReductionIC( 324 "max-nested-scalar-reduction-interleave", cl::init(2), cl::Hidden, 325 cl::desc("The maximum interleave count to use when interleaving a scalar " 326 "reduction in a nested loop.")); 327 328 static cl::opt<bool> 329 PreferInLoopReductions("prefer-inloop-reductions", cl::init(false), 330 cl::Hidden, 331 cl::desc("Prefer in-loop vector reductions, " 332 "overriding the targets preference.")); 333 334 static cl::opt<bool> ForceOrderedReductions( 335 "force-ordered-reductions", cl::init(false), cl::Hidden, 336 cl::desc("Enable the vectorisation of loops with in-order (strict) " 337 "FP reductions")); 338 339 static cl::opt<bool> PreferPredicatedReductionSelect( 340 "prefer-predicated-reduction-select", cl::init(false), cl::Hidden, 341 cl::desc( 342 "Prefer predicating a reduction operation over an after loop select.")); 343 344 cl::opt<bool> EnableVPlanNativePath( 345 "enable-vplan-native-path", cl::init(false), cl::Hidden, 346 cl::desc("Enable VPlan-native vectorization path with " 347 "support for outer loop vectorization.")); 348 349 // FIXME: Remove this switch once we have divergence analysis. Currently we 350 // assume divergent non-backedge branches when this switch is true. 351 cl::opt<bool> EnableVPlanPredication( 352 "enable-vplan-predication", cl::init(false), cl::Hidden, 353 cl::desc("Enable VPlan-native vectorization path predicator with " 354 "support for outer loop vectorization.")); 355 356 // This flag enables the stress testing of the VPlan H-CFG construction in the 357 // VPlan-native vectorization path. It must be used in conjuction with 358 // -enable-vplan-native-path. -vplan-verify-hcfg can also be used to enable the 359 // verification of the H-CFGs built. 360 static cl::opt<bool> VPlanBuildStressTest( 361 "vplan-build-stress-test", cl::init(false), cl::Hidden, 362 cl::desc( 363 "Build VPlan for every supported loop nest in the function and bail " 364 "out right after the build (stress test the VPlan H-CFG construction " 365 "in the VPlan-native vectorization path).")); 366 367 cl::opt<bool> llvm::EnableLoopInterleaving( 368 "interleave-loops", cl::init(true), cl::Hidden, 369 cl::desc("Enable loop interleaving in Loop vectorization passes")); 370 cl::opt<bool> llvm::EnableLoopVectorization( 371 "vectorize-loops", cl::init(true), cl::Hidden, 372 cl::desc("Run the Loop vectorization passes")); 373 374 cl::opt<bool> PrintVPlansInDotFormat( 375 "vplan-print-in-dot-format", cl::init(false), cl::Hidden, 376 cl::desc("Use dot format instead of plain text when dumping VPlans")); 377 378 /// A helper function that returns true if the given type is irregular. The 379 /// type is irregular if its allocated size doesn't equal the store size of an 380 /// element of the corresponding vector type. 381 static bool hasIrregularType(Type *Ty, const DataLayout &DL) { 382 // Determine if an array of N elements of type Ty is "bitcast compatible" 383 // with a <N x Ty> vector. 384 // This is only true if there is no padding between the array elements. 385 return DL.getTypeAllocSizeInBits(Ty) != DL.getTypeSizeInBits(Ty); 386 } 387 388 /// A helper function that returns the reciprocal of the block probability of 389 /// predicated blocks. If we return X, we are assuming the predicated block 390 /// will execute once for every X iterations of the loop header. 391 /// 392 /// TODO: We should use actual block probability here, if available. Currently, 393 /// we always assume predicated blocks have a 50% chance of executing. 394 static unsigned getReciprocalPredBlockProb() { return 2; } 395 396 /// A helper function that returns an integer or floating-point constant with 397 /// value C. 398 static Constant *getSignedIntOrFpConstant(Type *Ty, int64_t C) { 399 return Ty->isIntegerTy() ? ConstantInt::getSigned(Ty, C) 400 : ConstantFP::get(Ty, C); 401 } 402 403 /// Returns "best known" trip count for the specified loop \p L as defined by 404 /// the following procedure: 405 /// 1) Returns exact trip count if it is known. 406 /// 2) Returns expected trip count according to profile data if any. 407 /// 3) Returns upper bound estimate if it is known. 408 /// 4) Returns None if all of the above failed. 409 static Optional<unsigned> getSmallBestKnownTC(ScalarEvolution &SE, Loop *L) { 410 // Check if exact trip count is known. 411 if (unsigned ExpectedTC = SE.getSmallConstantTripCount(L)) 412 return ExpectedTC; 413 414 // Check if there is an expected trip count available from profile data. 415 if (LoopVectorizeWithBlockFrequency) 416 if (auto EstimatedTC = getLoopEstimatedTripCount(L)) 417 return EstimatedTC; 418 419 // Check if upper bound estimate is known. 420 if (unsigned ExpectedTC = SE.getSmallConstantMaxTripCount(L)) 421 return ExpectedTC; 422 423 return None; 424 } 425 426 // Forward declare GeneratedRTChecks. 427 class GeneratedRTChecks; 428 429 namespace llvm { 430 431 /// InnerLoopVectorizer vectorizes loops which contain only one basic 432 /// block to a specified vectorization factor (VF). 433 /// This class performs the widening of scalars into vectors, or multiple 434 /// scalars. This class also implements the following features: 435 /// * It inserts an epilogue loop for handling loops that don't have iteration 436 /// counts that are known to be a multiple of the vectorization factor. 437 /// * It handles the code generation for reduction variables. 438 /// * Scalarization (implementation using scalars) of un-vectorizable 439 /// instructions. 440 /// InnerLoopVectorizer does not perform any vectorization-legality 441 /// checks, and relies on the caller to check for the different legality 442 /// aspects. The InnerLoopVectorizer relies on the 443 /// LoopVectorizationLegality class to provide information about the induction 444 /// and reduction variables that were found to a given vectorization factor. 445 class InnerLoopVectorizer { 446 public: 447 InnerLoopVectorizer(Loop *OrigLoop, PredicatedScalarEvolution &PSE, 448 LoopInfo *LI, DominatorTree *DT, 449 const TargetLibraryInfo *TLI, 450 const TargetTransformInfo *TTI, AssumptionCache *AC, 451 OptimizationRemarkEmitter *ORE, ElementCount VecWidth, 452 unsigned UnrollFactor, LoopVectorizationLegality *LVL, 453 LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI, 454 ProfileSummaryInfo *PSI, GeneratedRTChecks &RTChecks) 455 : OrigLoop(OrigLoop), PSE(PSE), LI(LI), DT(DT), TLI(TLI), TTI(TTI), 456 AC(AC), ORE(ORE), VF(VecWidth), UF(UnrollFactor), 457 Builder(PSE.getSE()->getContext()), Legal(LVL), Cost(CM), BFI(BFI), 458 PSI(PSI), RTChecks(RTChecks) { 459 // Query this against the original loop and save it here because the profile 460 // of the original loop header may change as the transformation happens. 461 OptForSizeBasedOnProfile = llvm::shouldOptimizeForSize( 462 OrigLoop->getHeader(), PSI, BFI, PGSOQueryType::IRPass); 463 } 464 465 virtual ~InnerLoopVectorizer() = default; 466 467 /// Create a new empty loop that will contain vectorized instructions later 468 /// on, while the old loop will be used as the scalar remainder. Control flow 469 /// is generated around the vectorized (and scalar epilogue) loops consisting 470 /// of various checks and bypasses. Return the pre-header block of the new 471 /// loop. 472 /// In the case of epilogue vectorization, this function is overriden to 473 /// handle the more complex control flow around the loops. 474 virtual BasicBlock *createVectorizedLoopSkeleton(); 475 476 /// Widen a single instruction within the innermost loop. 477 void widenInstruction(Instruction &I, VPValue *Def, VPUser &Operands, 478 VPTransformState &State); 479 480 /// Widen a single call instruction within the innermost loop. 481 void widenCallInstruction(CallInst &I, VPValue *Def, VPUser &ArgOperands, 482 VPTransformState &State); 483 484 /// Widen a single select instruction within the innermost loop. 485 void widenSelectInstruction(SelectInst &I, VPValue *VPDef, VPUser &Operands, 486 bool InvariantCond, VPTransformState &State); 487 488 /// Fix the vectorized code, taking care of header phi's, live-outs, and more. 489 void fixVectorizedLoop(VPTransformState &State); 490 491 // Return true if any runtime check is added. 492 bool areSafetyChecksAdded() { return AddedSafetyChecks; } 493 494 /// A type for vectorized values in the new loop. Each value from the 495 /// original loop, when vectorized, is represented by UF vector values in the 496 /// new unrolled loop, where UF is the unroll factor. 497 using VectorParts = SmallVector<Value *, 2>; 498 499 /// Vectorize a single GetElementPtrInst based on information gathered and 500 /// decisions taken during planning. 501 void widenGEP(GetElementPtrInst *GEP, VPValue *VPDef, VPUser &Indices, 502 unsigned UF, ElementCount VF, bool IsPtrLoopInvariant, 503 SmallBitVector &IsIndexLoopInvariant, VPTransformState &State); 504 505 /// Vectorize a single first-order recurrence or pointer induction PHINode in 506 /// a block. This method handles the induction variable canonicalization. It 507 /// supports both VF = 1 for unrolled loops and arbitrary length vectors. 508 void widenPHIInstruction(Instruction *PN, VPWidenPHIRecipe *PhiR, 509 VPTransformState &State); 510 511 /// A helper function to scalarize a single Instruction in the innermost loop. 512 /// Generates a sequence of scalar instances for each lane between \p MinLane 513 /// and \p MaxLane, times each part between \p MinPart and \p MaxPart, 514 /// inclusive. Uses the VPValue operands from \p Operands instead of \p 515 /// Instr's operands. 516 void scalarizeInstruction(Instruction *Instr, VPValue *Def, VPUser &Operands, 517 const VPIteration &Instance, bool IfPredicateInstr, 518 VPTransformState &State); 519 520 /// Widen an integer or floating-point induction variable \p IV. If \p Trunc 521 /// is provided, the integer induction variable will first be truncated to 522 /// the corresponding type. 523 void widenIntOrFpInduction(PHINode *IV, Value *Start, TruncInst *Trunc, 524 VPValue *Def, VPValue *CastDef, 525 VPTransformState &State); 526 527 /// Construct the vector value of a scalarized value \p V one lane at a time. 528 void packScalarIntoVectorValue(VPValue *Def, const VPIteration &Instance, 529 VPTransformState &State); 530 531 /// Try to vectorize interleaved access group \p Group with the base address 532 /// given in \p Addr, optionally masking the vector operations if \p 533 /// BlockInMask is non-null. Use \p State to translate given VPValues to IR 534 /// values in the vectorized loop. 535 void vectorizeInterleaveGroup(const InterleaveGroup<Instruction> *Group, 536 ArrayRef<VPValue *> VPDefs, 537 VPTransformState &State, VPValue *Addr, 538 ArrayRef<VPValue *> StoredValues, 539 VPValue *BlockInMask = nullptr); 540 541 /// Vectorize Load and Store instructions with the base address given in \p 542 /// Addr, optionally masking the vector operations if \p BlockInMask is 543 /// non-null. Use \p State to translate given VPValues to IR values in the 544 /// vectorized loop. 545 void vectorizeMemoryInstruction(Instruction *Instr, VPTransformState &State, 546 VPValue *Def, VPValue *Addr, 547 VPValue *StoredValue, VPValue *BlockInMask, 548 bool ConsecutiveStride, bool Reverse); 549 550 /// Set the debug location in the builder \p Ptr using the debug location in 551 /// \p V. If \p Ptr is None then it uses the class member's Builder. 552 void setDebugLocFromInst(const Value *V, 553 Optional<IRBuilder<> *> CustomBuilder = None); 554 555 /// Fix the non-induction PHIs in the OrigPHIsToFix vector. 556 void fixNonInductionPHIs(VPTransformState &State); 557 558 /// Returns true if the reordering of FP operations is not allowed, but we are 559 /// able to vectorize with strict in-order reductions for the given RdxDesc. 560 bool useOrderedReductions(RecurrenceDescriptor &RdxDesc); 561 562 /// Create a broadcast instruction. This method generates a broadcast 563 /// instruction (shuffle) for loop invariant values and for the induction 564 /// value. If this is the induction variable then we extend it to N, N+1, ... 565 /// this is needed because each iteration in the loop corresponds to a SIMD 566 /// element. 567 virtual Value *getBroadcastInstrs(Value *V); 568 569 protected: 570 friend class LoopVectorizationPlanner; 571 572 /// A small list of PHINodes. 573 using PhiVector = SmallVector<PHINode *, 4>; 574 575 /// A type for scalarized values in the new loop. Each value from the 576 /// original loop, when scalarized, is represented by UF x VF scalar values 577 /// in the new unrolled loop, where UF is the unroll factor and VF is the 578 /// vectorization factor. 579 using ScalarParts = SmallVector<SmallVector<Value *, 4>, 2>; 580 581 /// Set up the values of the IVs correctly when exiting the vector loop. 582 void fixupIVUsers(PHINode *OrigPhi, const InductionDescriptor &II, 583 Value *CountRoundDown, Value *EndValue, 584 BasicBlock *MiddleBlock); 585 586 /// Create a new induction variable inside L. 587 PHINode *createInductionVariable(Loop *L, Value *Start, Value *End, 588 Value *Step, Instruction *DL); 589 590 /// Handle all cross-iteration phis in the header. 591 void fixCrossIterationPHIs(VPTransformState &State); 592 593 /// Create the exit value of first order recurrences in the middle block and 594 /// update their users. 595 void fixFirstOrderRecurrence(VPWidenPHIRecipe *PhiR, VPTransformState &State); 596 597 /// Create code for the loop exit value of the reduction. 598 void fixReduction(VPReductionPHIRecipe *Phi, VPTransformState &State); 599 600 /// Clear NSW/NUW flags from reduction instructions if necessary. 601 void clearReductionWrapFlags(const RecurrenceDescriptor &RdxDesc, 602 VPTransformState &State); 603 604 /// Fixup the LCSSA phi nodes in the unique exit block. This simply 605 /// means we need to add the appropriate incoming value from the middle 606 /// block as exiting edges from the scalar epilogue loop (if present) are 607 /// already in place, and we exit the vector loop exclusively to the middle 608 /// block. 609 void fixLCSSAPHIs(VPTransformState &State); 610 611 /// Iteratively sink the scalarized operands of a predicated instruction into 612 /// the block that was created for it. 613 void sinkScalarOperands(Instruction *PredInst); 614 615 /// Shrinks vector element sizes to the smallest bitwidth they can be legally 616 /// represented as. 617 void truncateToMinimalBitwidths(VPTransformState &State); 618 619 /// This function adds 620 /// (StartIdx * Step, (StartIdx + 1) * Step, (StartIdx + 2) * Step, ...) 621 /// to each vector element of Val. The sequence starts at StartIndex. 622 /// \p Opcode is relevant for FP induction variable. 623 virtual Value * 624 getStepVector(Value *Val, Value *StartIdx, Value *Step, 625 Instruction::BinaryOps Opcode = Instruction::BinaryOpsEnd); 626 627 /// Compute scalar induction steps. \p ScalarIV is the scalar induction 628 /// variable on which to base the steps, \p Step is the size of the step, and 629 /// \p EntryVal is the value from the original loop that maps to the steps. 630 /// Note that \p EntryVal doesn't have to be an induction variable - it 631 /// can also be a truncate instruction. 632 void buildScalarSteps(Value *ScalarIV, Value *Step, Instruction *EntryVal, 633 const InductionDescriptor &ID, VPValue *Def, 634 VPValue *CastDef, VPTransformState &State); 635 636 /// Create a vector induction phi node based on an existing scalar one. \p 637 /// EntryVal is the value from the original loop that maps to the vector phi 638 /// node, and \p Step is the loop-invariant step. If \p EntryVal is a 639 /// truncate instruction, instead of widening the original IV, we widen a 640 /// version of the IV truncated to \p EntryVal's type. 641 void createVectorIntOrFpInductionPHI(const InductionDescriptor &II, 642 Value *Step, Value *Start, 643 Instruction *EntryVal, VPValue *Def, 644 VPValue *CastDef, 645 VPTransformState &State); 646 647 /// Returns true if an instruction \p I should be scalarized instead of 648 /// vectorized for the chosen vectorization factor. 649 bool shouldScalarizeInstruction(Instruction *I) const; 650 651 /// Returns true if we should generate a scalar version of \p IV. 652 bool needsScalarInduction(Instruction *IV) const; 653 654 /// If there is a cast involved in the induction variable \p ID, which should 655 /// be ignored in the vectorized loop body, this function records the 656 /// VectorLoopValue of the respective Phi also as the VectorLoopValue of the 657 /// cast. We had already proved that the casted Phi is equal to the uncasted 658 /// Phi in the vectorized loop (under a runtime guard), and therefore 659 /// there is no need to vectorize the cast - the same value can be used in the 660 /// vector loop for both the Phi and the cast. 661 /// If \p VectorLoopValue is a scalarized value, \p Lane is also specified, 662 /// Otherwise, \p VectorLoopValue is a widened/vectorized value. 663 /// 664 /// \p EntryVal is the value from the original loop that maps to the vector 665 /// phi node and is used to distinguish what is the IV currently being 666 /// processed - original one (if \p EntryVal is a phi corresponding to the 667 /// original IV) or the "newly-created" one based on the proof mentioned above 668 /// (see also buildScalarSteps() and createVectorIntOrFPInductionPHI()). In the 669 /// latter case \p EntryVal is a TruncInst and we must not record anything for 670 /// that IV, but it's error-prone to expect callers of this routine to care 671 /// about that, hence this explicit parameter. 672 void recordVectorLoopValueForInductionCast( 673 const InductionDescriptor &ID, const Instruction *EntryVal, 674 Value *VectorLoopValue, VPValue *CastDef, VPTransformState &State, 675 unsigned Part, unsigned Lane = UINT_MAX); 676 677 /// Generate a shuffle sequence that will reverse the vector Vec. 678 virtual Value *reverseVector(Value *Vec); 679 680 /// Returns (and creates if needed) the original loop trip count. 681 Value *getOrCreateTripCount(Loop *NewLoop); 682 683 /// Returns (and creates if needed) the trip count of the widened loop. 684 Value *getOrCreateVectorTripCount(Loop *NewLoop); 685 686 /// Returns a bitcasted value to the requested vector type. 687 /// Also handles bitcasts of vector<float> <-> vector<pointer> types. 688 Value *createBitOrPointerCast(Value *V, VectorType *DstVTy, 689 const DataLayout &DL); 690 691 /// Emit a bypass check to see if the vector trip count is zero, including if 692 /// it overflows. 693 void emitMinimumIterationCountCheck(Loop *L, BasicBlock *Bypass); 694 695 /// Emit a bypass check to see if all of the SCEV assumptions we've 696 /// had to make are correct. Returns the block containing the checks or 697 /// nullptr if no checks have been added. 698 BasicBlock *emitSCEVChecks(Loop *L, BasicBlock *Bypass); 699 700 /// Emit bypass checks to check any memory assumptions we may have made. 701 /// Returns the block containing the checks or nullptr if no checks have been 702 /// added. 703 BasicBlock *emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass); 704 705 /// Compute the transformed value of Index at offset StartValue using step 706 /// StepValue. 707 /// For integer induction, returns StartValue + Index * StepValue. 708 /// For pointer induction, returns StartValue[Index * StepValue]. 709 /// FIXME: The newly created binary instructions should contain nsw/nuw 710 /// flags, which can be found from the original scalar operations. 711 Value *emitTransformedIndex(IRBuilder<> &B, Value *Index, ScalarEvolution *SE, 712 const DataLayout &DL, 713 const InductionDescriptor &ID) const; 714 715 /// Emit basic blocks (prefixed with \p Prefix) for the iteration check, 716 /// vector loop preheader, middle block and scalar preheader. Also 717 /// allocate a loop object for the new vector loop and return it. 718 Loop *createVectorLoopSkeleton(StringRef Prefix); 719 720 /// Create new phi nodes for the induction variables to resume iteration count 721 /// in the scalar epilogue, from where the vectorized loop left off (given by 722 /// \p VectorTripCount). 723 /// In cases where the loop skeleton is more complicated (eg. epilogue 724 /// vectorization) and the resume values can come from an additional bypass 725 /// block, the \p AdditionalBypass pair provides information about the bypass 726 /// block and the end value on the edge from bypass to this loop. 727 void createInductionResumeValues( 728 Loop *L, Value *VectorTripCount, 729 std::pair<BasicBlock *, Value *> AdditionalBypass = {nullptr, nullptr}); 730 731 /// Complete the loop skeleton by adding debug MDs, creating appropriate 732 /// conditional branches in the middle block, preparing the builder and 733 /// running the verifier. Take in the vector loop \p L as argument, and return 734 /// the preheader of the completed vector loop. 735 BasicBlock *completeLoopSkeleton(Loop *L, MDNode *OrigLoopID); 736 737 /// Add additional metadata to \p To that was not present on \p Orig. 738 /// 739 /// Currently this is used to add the noalias annotations based on the 740 /// inserted memchecks. Use this for instructions that are *cloned* into the 741 /// vector loop. 742 void addNewMetadata(Instruction *To, const Instruction *Orig); 743 744 /// Add metadata from one instruction to another. 745 /// 746 /// This includes both the original MDs from \p From and additional ones (\see 747 /// addNewMetadata). Use this for *newly created* instructions in the vector 748 /// loop. 749 void addMetadata(Instruction *To, Instruction *From); 750 751 /// Similar to the previous function but it adds the metadata to a 752 /// vector of instructions. 753 void addMetadata(ArrayRef<Value *> To, Instruction *From); 754 755 /// Allow subclasses to override and print debug traces before/after vplan 756 /// execution, when trace information is requested. 757 virtual void printDebugTracesAtStart(){}; 758 virtual void printDebugTracesAtEnd(){}; 759 760 /// The original loop. 761 Loop *OrigLoop; 762 763 /// A wrapper around ScalarEvolution used to add runtime SCEV checks. Applies 764 /// dynamic knowledge to simplify SCEV expressions and converts them to a 765 /// more usable form. 766 PredicatedScalarEvolution &PSE; 767 768 /// Loop Info. 769 LoopInfo *LI; 770 771 /// Dominator Tree. 772 DominatorTree *DT; 773 774 /// Alias Analysis. 775 AAResults *AA; 776 777 /// Target Library Info. 778 const TargetLibraryInfo *TLI; 779 780 /// Target Transform Info. 781 const TargetTransformInfo *TTI; 782 783 /// Assumption Cache. 784 AssumptionCache *AC; 785 786 /// Interface to emit optimization remarks. 787 OptimizationRemarkEmitter *ORE; 788 789 /// LoopVersioning. It's only set up (non-null) if memchecks were 790 /// used. 791 /// 792 /// This is currently only used to add no-alias metadata based on the 793 /// memchecks. The actually versioning is performed manually. 794 std::unique_ptr<LoopVersioning> LVer; 795 796 /// The vectorization SIMD factor to use. Each vector will have this many 797 /// vector elements. 798 ElementCount VF; 799 800 /// The vectorization unroll factor to use. Each scalar is vectorized to this 801 /// many different vector instructions. 802 unsigned UF; 803 804 /// The builder that we use 805 IRBuilder<> Builder; 806 807 // --- Vectorization state --- 808 809 /// The vector-loop preheader. 810 BasicBlock *LoopVectorPreHeader; 811 812 /// The scalar-loop preheader. 813 BasicBlock *LoopScalarPreHeader; 814 815 /// Middle Block between the vector and the scalar. 816 BasicBlock *LoopMiddleBlock; 817 818 /// The unique ExitBlock of the scalar loop if one exists. Note that 819 /// there can be multiple exiting edges reaching this block. 820 BasicBlock *LoopExitBlock; 821 822 /// The vector loop body. 823 BasicBlock *LoopVectorBody; 824 825 /// The scalar loop body. 826 BasicBlock *LoopScalarBody; 827 828 /// A list of all bypass blocks. The first block is the entry of the loop. 829 SmallVector<BasicBlock *, 4> LoopBypassBlocks; 830 831 /// The new Induction variable which was added to the new block. 832 PHINode *Induction = nullptr; 833 834 /// The induction variable of the old basic block. 835 PHINode *OldInduction = nullptr; 836 837 /// Store instructions that were predicated. 838 SmallVector<Instruction *, 4> PredicatedInstructions; 839 840 /// Trip count of the original loop. 841 Value *TripCount = nullptr; 842 843 /// Trip count of the widened loop (TripCount - TripCount % (VF*UF)) 844 Value *VectorTripCount = nullptr; 845 846 /// The legality analysis. 847 LoopVectorizationLegality *Legal; 848 849 /// The profitablity analysis. 850 LoopVectorizationCostModel *Cost; 851 852 // Record whether runtime checks are added. 853 bool AddedSafetyChecks = false; 854 855 // Holds the end values for each induction variable. We save the end values 856 // so we can later fix-up the external users of the induction variables. 857 DenseMap<PHINode *, Value *> IVEndValues; 858 859 // Vector of original scalar PHIs whose corresponding widened PHIs need to be 860 // fixed up at the end of vector code generation. 861 SmallVector<PHINode *, 8> OrigPHIsToFix; 862 863 /// BFI and PSI are used to check for profile guided size optimizations. 864 BlockFrequencyInfo *BFI; 865 ProfileSummaryInfo *PSI; 866 867 // Whether this loop should be optimized for size based on profile guided size 868 // optimizatios. 869 bool OptForSizeBasedOnProfile; 870 871 /// Structure to hold information about generated runtime checks, responsible 872 /// for cleaning the checks, if vectorization turns out unprofitable. 873 GeneratedRTChecks &RTChecks; 874 }; 875 876 class InnerLoopUnroller : public InnerLoopVectorizer { 877 public: 878 InnerLoopUnroller(Loop *OrigLoop, PredicatedScalarEvolution &PSE, 879 LoopInfo *LI, DominatorTree *DT, 880 const TargetLibraryInfo *TLI, 881 const TargetTransformInfo *TTI, AssumptionCache *AC, 882 OptimizationRemarkEmitter *ORE, unsigned UnrollFactor, 883 LoopVectorizationLegality *LVL, 884 LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI, 885 ProfileSummaryInfo *PSI, GeneratedRTChecks &Check) 886 : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 887 ElementCount::getFixed(1), UnrollFactor, LVL, CM, 888 BFI, PSI, Check) {} 889 890 private: 891 Value *getBroadcastInstrs(Value *V) override; 892 Value *getStepVector( 893 Value *Val, Value *StartIdx, Value *Step, 894 Instruction::BinaryOps Opcode = Instruction::BinaryOpsEnd) override; 895 Value *reverseVector(Value *Vec) override; 896 }; 897 898 /// Encapsulate information regarding vectorization of a loop and its epilogue. 899 /// This information is meant to be updated and used across two stages of 900 /// epilogue vectorization. 901 struct EpilogueLoopVectorizationInfo { 902 ElementCount MainLoopVF = ElementCount::getFixed(0); 903 unsigned MainLoopUF = 0; 904 ElementCount EpilogueVF = ElementCount::getFixed(0); 905 unsigned EpilogueUF = 0; 906 BasicBlock *MainLoopIterationCountCheck = nullptr; 907 BasicBlock *EpilogueIterationCountCheck = nullptr; 908 BasicBlock *SCEVSafetyCheck = nullptr; 909 BasicBlock *MemSafetyCheck = nullptr; 910 Value *TripCount = nullptr; 911 Value *VectorTripCount = nullptr; 912 913 EpilogueLoopVectorizationInfo(ElementCount MVF, unsigned MUF, 914 ElementCount EVF, unsigned EUF) 915 : MainLoopVF(MVF), MainLoopUF(MUF), EpilogueVF(EVF), EpilogueUF(EUF) { 916 assert(EUF == 1 && 917 "A high UF for the epilogue loop is likely not beneficial."); 918 } 919 }; 920 921 /// An extension of the inner loop vectorizer that creates a skeleton for a 922 /// vectorized loop that has its epilogue (residual) also vectorized. 923 /// The idea is to run the vplan on a given loop twice, firstly to setup the 924 /// skeleton and vectorize the main loop, and secondly to complete the skeleton 925 /// from the first step and vectorize the epilogue. This is achieved by 926 /// deriving two concrete strategy classes from this base class and invoking 927 /// them in succession from the loop vectorizer planner. 928 class InnerLoopAndEpilogueVectorizer : public InnerLoopVectorizer { 929 public: 930 InnerLoopAndEpilogueVectorizer( 931 Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, 932 DominatorTree *DT, const TargetLibraryInfo *TLI, 933 const TargetTransformInfo *TTI, AssumptionCache *AC, 934 OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI, 935 LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM, 936 BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI, 937 GeneratedRTChecks &Checks) 938 : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 939 EPI.MainLoopVF, EPI.MainLoopUF, LVL, CM, BFI, PSI, 940 Checks), 941 EPI(EPI) {} 942 943 // Override this function to handle the more complex control flow around the 944 // three loops. 945 BasicBlock *createVectorizedLoopSkeleton() final override { 946 return createEpilogueVectorizedLoopSkeleton(); 947 } 948 949 /// The interface for creating a vectorized skeleton using one of two 950 /// different strategies, each corresponding to one execution of the vplan 951 /// as described above. 952 virtual BasicBlock *createEpilogueVectorizedLoopSkeleton() = 0; 953 954 /// Holds and updates state information required to vectorize the main loop 955 /// and its epilogue in two separate passes. This setup helps us avoid 956 /// regenerating and recomputing runtime safety checks. It also helps us to 957 /// shorten the iteration-count-check path length for the cases where the 958 /// iteration count of the loop is so small that the main vector loop is 959 /// completely skipped. 960 EpilogueLoopVectorizationInfo &EPI; 961 }; 962 963 /// A specialized derived class of inner loop vectorizer that performs 964 /// vectorization of *main* loops in the process of vectorizing loops and their 965 /// epilogues. 966 class EpilogueVectorizerMainLoop : public InnerLoopAndEpilogueVectorizer { 967 public: 968 EpilogueVectorizerMainLoop( 969 Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, 970 DominatorTree *DT, const TargetLibraryInfo *TLI, 971 const TargetTransformInfo *TTI, AssumptionCache *AC, 972 OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI, 973 LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM, 974 BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI, 975 GeneratedRTChecks &Check) 976 : InnerLoopAndEpilogueVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 977 EPI, LVL, CM, BFI, PSI, Check) {} 978 /// Implements the interface for creating a vectorized skeleton using the 979 /// *main loop* strategy (ie the first pass of vplan execution). 980 BasicBlock *createEpilogueVectorizedLoopSkeleton() final override; 981 982 protected: 983 /// Emits an iteration count bypass check once for the main loop (when \p 984 /// ForEpilogue is false) and once for the epilogue loop (when \p 985 /// ForEpilogue is true). 986 BasicBlock *emitMinimumIterationCountCheck(Loop *L, BasicBlock *Bypass, 987 bool ForEpilogue); 988 void printDebugTracesAtStart() override; 989 void printDebugTracesAtEnd() override; 990 }; 991 992 // A specialized derived class of inner loop vectorizer that performs 993 // vectorization of *epilogue* loops in the process of vectorizing loops and 994 // their epilogues. 995 class EpilogueVectorizerEpilogueLoop : public InnerLoopAndEpilogueVectorizer { 996 public: 997 EpilogueVectorizerEpilogueLoop( 998 Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, 999 DominatorTree *DT, const TargetLibraryInfo *TLI, 1000 const TargetTransformInfo *TTI, AssumptionCache *AC, 1001 OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI, 1002 LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM, 1003 BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI, 1004 GeneratedRTChecks &Checks) 1005 : InnerLoopAndEpilogueVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 1006 EPI, LVL, CM, BFI, PSI, Checks) {} 1007 /// Implements the interface for creating a vectorized skeleton using the 1008 /// *epilogue loop* strategy (ie the second pass of vplan execution). 1009 BasicBlock *createEpilogueVectorizedLoopSkeleton() final override; 1010 1011 protected: 1012 /// Emits an iteration count bypass check after the main vector loop has 1013 /// finished to see if there are any iterations left to execute by either 1014 /// the vector epilogue or the scalar epilogue. 1015 BasicBlock *emitMinimumVectorEpilogueIterCountCheck(Loop *L, 1016 BasicBlock *Bypass, 1017 BasicBlock *Insert); 1018 void printDebugTracesAtStart() override; 1019 void printDebugTracesAtEnd() override; 1020 }; 1021 } // end namespace llvm 1022 1023 /// Look for a meaningful debug location on the instruction or it's 1024 /// operands. 1025 static Instruction *getDebugLocFromInstOrOperands(Instruction *I) { 1026 if (!I) 1027 return I; 1028 1029 DebugLoc Empty; 1030 if (I->getDebugLoc() != Empty) 1031 return I; 1032 1033 for (Use &Op : I->operands()) { 1034 if (Instruction *OpInst = dyn_cast<Instruction>(Op)) 1035 if (OpInst->getDebugLoc() != Empty) 1036 return OpInst; 1037 } 1038 1039 return I; 1040 } 1041 1042 void InnerLoopVectorizer::setDebugLocFromInst( 1043 const Value *V, Optional<IRBuilder<> *> CustomBuilder) { 1044 IRBuilder<> *B = (CustomBuilder == None) ? &Builder : *CustomBuilder; 1045 if (const Instruction *Inst = dyn_cast_or_null<Instruction>(V)) { 1046 const DILocation *DIL = Inst->getDebugLoc(); 1047 1048 // When a FSDiscriminator is enabled, we don't need to add the multiply 1049 // factors to the discriminators. 1050 if (DIL && Inst->getFunction()->isDebugInfoForProfiling() && 1051 !isa<DbgInfoIntrinsic>(Inst) && !EnableFSDiscriminator) { 1052 // FIXME: For scalable vectors, assume vscale=1. 1053 auto NewDIL = 1054 DIL->cloneByMultiplyingDuplicationFactor(UF * VF.getKnownMinValue()); 1055 if (NewDIL) 1056 B->SetCurrentDebugLocation(NewDIL.getValue()); 1057 else 1058 LLVM_DEBUG(dbgs() 1059 << "Failed to create new discriminator: " 1060 << DIL->getFilename() << " Line: " << DIL->getLine()); 1061 } else 1062 B->SetCurrentDebugLocation(DIL); 1063 } else 1064 B->SetCurrentDebugLocation(DebugLoc()); 1065 } 1066 1067 /// Write a \p DebugMsg about vectorization to the debug output stream. If \p I 1068 /// is passed, the message relates to that particular instruction. 1069 #ifndef NDEBUG 1070 static void debugVectorizationMessage(const StringRef Prefix, 1071 const StringRef DebugMsg, 1072 Instruction *I) { 1073 dbgs() << "LV: " << Prefix << DebugMsg; 1074 if (I != nullptr) 1075 dbgs() << " " << *I; 1076 else 1077 dbgs() << '.'; 1078 dbgs() << '\n'; 1079 } 1080 #endif 1081 1082 /// Create an analysis remark that explains why vectorization failed 1083 /// 1084 /// \p PassName is the name of the pass (e.g. can be AlwaysPrint). \p 1085 /// RemarkName is the identifier for the remark. If \p I is passed it is an 1086 /// instruction that prevents vectorization. Otherwise \p TheLoop is used for 1087 /// the location of the remark. \return the remark object that can be 1088 /// streamed to. 1089 static OptimizationRemarkAnalysis createLVAnalysis(const char *PassName, 1090 StringRef RemarkName, Loop *TheLoop, Instruction *I) { 1091 Value *CodeRegion = TheLoop->getHeader(); 1092 DebugLoc DL = TheLoop->getStartLoc(); 1093 1094 if (I) { 1095 CodeRegion = I->getParent(); 1096 // If there is no debug location attached to the instruction, revert back to 1097 // using the loop's. 1098 if (I->getDebugLoc()) 1099 DL = I->getDebugLoc(); 1100 } 1101 1102 return OptimizationRemarkAnalysis(PassName, RemarkName, DL, CodeRegion); 1103 } 1104 1105 /// Return a value for Step multiplied by VF. 1106 static Value *createStepForVF(IRBuilder<> &B, Type *Ty, ElementCount VF, 1107 int64_t Step) { 1108 assert(Ty->isIntegerTy() && "Expected an integer step"); 1109 Constant *StepVal = ConstantInt::get(Ty, Step * VF.getKnownMinValue()); 1110 return VF.isScalable() ? B.CreateVScale(StepVal) : StepVal; 1111 } 1112 1113 namespace llvm { 1114 1115 /// Return the runtime value for VF. 1116 Value *getRuntimeVF(IRBuilder<> &B, Type *Ty, ElementCount VF) { 1117 Constant *EC = ConstantInt::get(Ty, VF.getKnownMinValue()); 1118 return VF.isScalable() ? B.CreateVScale(EC) : EC; 1119 } 1120 1121 static Value *getRuntimeVFAsFloat(IRBuilder<> &B, Type *FTy, ElementCount VF) { 1122 assert(FTy->isFloatingPointTy() && "Expected floating point type!"); 1123 Type *IntTy = IntegerType::get(FTy->getContext(), FTy->getScalarSizeInBits()); 1124 Value *RuntimeVF = getRuntimeVF(B, IntTy, VF); 1125 return B.CreateUIToFP(RuntimeVF, FTy); 1126 } 1127 1128 void reportVectorizationFailure(const StringRef DebugMsg, 1129 const StringRef OREMsg, const StringRef ORETag, 1130 OptimizationRemarkEmitter *ORE, Loop *TheLoop, 1131 Instruction *I) { 1132 LLVM_DEBUG(debugVectorizationMessage("Not vectorizing: ", DebugMsg, I)); 1133 LoopVectorizeHints Hints(TheLoop, true /* doesn't matter */, *ORE); 1134 ORE->emit( 1135 createLVAnalysis(Hints.vectorizeAnalysisPassName(), ORETag, TheLoop, I) 1136 << "loop not vectorized: " << OREMsg); 1137 } 1138 1139 void reportVectorizationInfo(const StringRef Msg, const StringRef ORETag, 1140 OptimizationRemarkEmitter *ORE, Loop *TheLoop, 1141 Instruction *I) { 1142 LLVM_DEBUG(debugVectorizationMessage("", Msg, I)); 1143 LoopVectorizeHints Hints(TheLoop, true /* doesn't matter */, *ORE); 1144 ORE->emit( 1145 createLVAnalysis(Hints.vectorizeAnalysisPassName(), ORETag, TheLoop, I) 1146 << Msg); 1147 } 1148 1149 } // end namespace llvm 1150 1151 #ifndef NDEBUG 1152 /// \return string containing a file name and a line # for the given loop. 1153 static std::string getDebugLocString(const Loop *L) { 1154 std::string Result; 1155 if (L) { 1156 raw_string_ostream OS(Result); 1157 if (const DebugLoc LoopDbgLoc = L->getStartLoc()) 1158 LoopDbgLoc.print(OS); 1159 else 1160 // Just print the module name. 1161 OS << L->getHeader()->getParent()->getParent()->getModuleIdentifier(); 1162 OS.flush(); 1163 } 1164 return Result; 1165 } 1166 #endif 1167 1168 void InnerLoopVectorizer::addNewMetadata(Instruction *To, 1169 const Instruction *Orig) { 1170 // If the loop was versioned with memchecks, add the corresponding no-alias 1171 // metadata. 1172 if (LVer && (isa<LoadInst>(Orig) || isa<StoreInst>(Orig))) 1173 LVer->annotateInstWithNoAlias(To, Orig); 1174 } 1175 1176 void InnerLoopVectorizer::addMetadata(Instruction *To, 1177 Instruction *From) { 1178 propagateMetadata(To, From); 1179 addNewMetadata(To, From); 1180 } 1181 1182 void InnerLoopVectorizer::addMetadata(ArrayRef<Value *> To, 1183 Instruction *From) { 1184 for (Value *V : To) { 1185 if (Instruction *I = dyn_cast<Instruction>(V)) 1186 addMetadata(I, From); 1187 } 1188 } 1189 1190 namespace llvm { 1191 1192 // Loop vectorization cost-model hints how the scalar epilogue loop should be 1193 // lowered. 1194 enum ScalarEpilogueLowering { 1195 1196 // The default: allowing scalar epilogues. 1197 CM_ScalarEpilogueAllowed, 1198 1199 // Vectorization with OptForSize: don't allow epilogues. 1200 CM_ScalarEpilogueNotAllowedOptSize, 1201 1202 // A special case of vectorisation with OptForSize: loops with a very small 1203 // trip count are considered for vectorization under OptForSize, thereby 1204 // making sure the cost of their loop body is dominant, free of runtime 1205 // guards and scalar iteration overheads. 1206 CM_ScalarEpilogueNotAllowedLowTripLoop, 1207 1208 // Loop hint predicate indicating an epilogue is undesired. 1209 CM_ScalarEpilogueNotNeededUsePredicate, 1210 1211 // Directive indicating we must either tail fold or not vectorize 1212 CM_ScalarEpilogueNotAllowedUsePredicate 1213 }; 1214 1215 /// ElementCountComparator creates a total ordering for ElementCount 1216 /// for the purposes of using it in a set structure. 1217 struct ElementCountComparator { 1218 bool operator()(const ElementCount &LHS, const ElementCount &RHS) const { 1219 return std::make_tuple(LHS.isScalable(), LHS.getKnownMinValue()) < 1220 std::make_tuple(RHS.isScalable(), RHS.getKnownMinValue()); 1221 } 1222 }; 1223 using ElementCountSet = SmallSet<ElementCount, 16, ElementCountComparator>; 1224 1225 /// LoopVectorizationCostModel - estimates the expected speedups due to 1226 /// vectorization. 1227 /// In many cases vectorization is not profitable. This can happen because of 1228 /// a number of reasons. In this class we mainly attempt to predict the 1229 /// expected speedup/slowdowns due to the supported instruction set. We use the 1230 /// TargetTransformInfo to query the different backends for the cost of 1231 /// different operations. 1232 class LoopVectorizationCostModel { 1233 public: 1234 LoopVectorizationCostModel(ScalarEpilogueLowering SEL, Loop *L, 1235 PredicatedScalarEvolution &PSE, LoopInfo *LI, 1236 LoopVectorizationLegality *Legal, 1237 const TargetTransformInfo &TTI, 1238 const TargetLibraryInfo *TLI, DemandedBits *DB, 1239 AssumptionCache *AC, 1240 OptimizationRemarkEmitter *ORE, const Function *F, 1241 const LoopVectorizeHints *Hints, 1242 InterleavedAccessInfo &IAI) 1243 : ScalarEpilogueStatus(SEL), TheLoop(L), PSE(PSE), LI(LI), Legal(Legal), 1244 TTI(TTI), TLI(TLI), DB(DB), AC(AC), ORE(ORE), TheFunction(F), 1245 Hints(Hints), InterleaveInfo(IAI) {} 1246 1247 /// \return An upper bound for the vectorization factors (both fixed and 1248 /// scalable). If the factors are 0, vectorization and interleaving should be 1249 /// avoided up front. 1250 FixedScalableVFPair computeMaxVF(ElementCount UserVF, unsigned UserIC); 1251 1252 /// \return True if runtime checks are required for vectorization, and false 1253 /// otherwise. 1254 bool runtimeChecksRequired(); 1255 1256 /// \return The most profitable vectorization factor and the cost of that VF. 1257 /// This method checks every VF in \p CandidateVFs. If UserVF is not ZERO 1258 /// then this vectorization factor will be selected if vectorization is 1259 /// possible. 1260 VectorizationFactor 1261 selectVectorizationFactor(const ElementCountSet &CandidateVFs); 1262 1263 VectorizationFactor 1264 selectEpilogueVectorizationFactor(const ElementCount MaxVF, 1265 const LoopVectorizationPlanner &LVP); 1266 1267 /// Setup cost-based decisions for user vectorization factor. 1268 /// \return true if the UserVF is a feasible VF to be chosen. 1269 bool selectUserVectorizationFactor(ElementCount UserVF) { 1270 collectUniformsAndScalars(UserVF); 1271 collectInstsToScalarize(UserVF); 1272 return expectedCost(UserVF).first.isValid(); 1273 } 1274 1275 /// \return The size (in bits) of the smallest and widest types in the code 1276 /// that needs to be vectorized. We ignore values that remain scalar such as 1277 /// 64 bit loop indices. 1278 std::pair<unsigned, unsigned> getSmallestAndWidestTypes(); 1279 1280 /// \return The desired interleave count. 1281 /// If interleave count has been specified by metadata it will be returned. 1282 /// Otherwise, the interleave count is computed and returned. VF and LoopCost 1283 /// are the selected vectorization factor and the cost of the selected VF. 1284 unsigned selectInterleaveCount(ElementCount VF, unsigned LoopCost); 1285 1286 /// Memory access instruction may be vectorized in more than one way. 1287 /// Form of instruction after vectorization depends on cost. 1288 /// This function takes cost-based decisions for Load/Store instructions 1289 /// and collects them in a map. This decisions map is used for building 1290 /// the lists of loop-uniform and loop-scalar instructions. 1291 /// The calculated cost is saved with widening decision in order to 1292 /// avoid redundant calculations. 1293 void setCostBasedWideningDecision(ElementCount VF); 1294 1295 /// A struct that represents some properties of the register usage 1296 /// of a loop. 1297 struct RegisterUsage { 1298 /// Holds the number of loop invariant values that are used in the loop. 1299 /// The key is ClassID of target-provided register class. 1300 SmallMapVector<unsigned, unsigned, 4> LoopInvariantRegs; 1301 /// Holds the maximum number of concurrent live intervals in the loop. 1302 /// The key is ClassID of target-provided register class. 1303 SmallMapVector<unsigned, unsigned, 4> MaxLocalUsers; 1304 }; 1305 1306 /// \return Returns information about the register usages of the loop for the 1307 /// given vectorization factors. 1308 SmallVector<RegisterUsage, 8> 1309 calculateRegisterUsage(ArrayRef<ElementCount> VFs); 1310 1311 /// Collect values we want to ignore in the cost model. 1312 void collectValuesToIgnore(); 1313 1314 /// Collect all element types in the loop for which widening is needed. 1315 void collectElementTypesForWidening(); 1316 1317 /// Split reductions into those that happen in the loop, and those that happen 1318 /// outside. In loop reductions are collected into InLoopReductionChains. 1319 void collectInLoopReductions(); 1320 1321 /// Returns true if we should use strict in-order reductions for the given 1322 /// RdxDesc. This is true if the -enable-strict-reductions flag is passed, 1323 /// the IsOrdered flag of RdxDesc is set and we do not allow reordering 1324 /// of FP operations. 1325 bool useOrderedReductions(const RecurrenceDescriptor &RdxDesc) { 1326 return !Hints->allowReordering() && RdxDesc.isOrdered(); 1327 } 1328 1329 /// \returns The smallest bitwidth each instruction can be represented with. 1330 /// The vector equivalents of these instructions should be truncated to this 1331 /// type. 1332 const MapVector<Instruction *, uint64_t> &getMinimalBitwidths() const { 1333 return MinBWs; 1334 } 1335 1336 /// \returns True if it is more profitable to scalarize instruction \p I for 1337 /// vectorization factor \p VF. 1338 bool isProfitableToScalarize(Instruction *I, ElementCount VF) const { 1339 assert(VF.isVector() && 1340 "Profitable to scalarize relevant only for VF > 1."); 1341 1342 // Cost model is not run in the VPlan-native path - return conservative 1343 // result until this changes. 1344 if (EnableVPlanNativePath) 1345 return false; 1346 1347 auto Scalars = InstsToScalarize.find(VF); 1348 assert(Scalars != InstsToScalarize.end() && 1349 "VF not yet analyzed for scalarization profitability"); 1350 return Scalars->second.find(I) != Scalars->second.end(); 1351 } 1352 1353 /// Returns true if \p I is known to be uniform after vectorization. 1354 bool isUniformAfterVectorization(Instruction *I, ElementCount VF) const { 1355 if (VF.isScalar()) 1356 return true; 1357 1358 // Cost model is not run in the VPlan-native path - return conservative 1359 // result until this changes. 1360 if (EnableVPlanNativePath) 1361 return false; 1362 1363 auto UniformsPerVF = Uniforms.find(VF); 1364 assert(UniformsPerVF != Uniforms.end() && 1365 "VF not yet analyzed for uniformity"); 1366 return UniformsPerVF->second.count(I); 1367 } 1368 1369 /// Returns true if \p I is known to be scalar after vectorization. 1370 bool isScalarAfterVectorization(Instruction *I, ElementCount VF) const { 1371 if (VF.isScalar()) 1372 return true; 1373 1374 // Cost model is not run in the VPlan-native path - return conservative 1375 // result until this changes. 1376 if (EnableVPlanNativePath) 1377 return false; 1378 1379 auto ScalarsPerVF = Scalars.find(VF); 1380 assert(ScalarsPerVF != Scalars.end() && 1381 "Scalar values are not calculated for VF"); 1382 return ScalarsPerVF->second.count(I); 1383 } 1384 1385 /// \returns True if instruction \p I can be truncated to a smaller bitwidth 1386 /// for vectorization factor \p VF. 1387 bool canTruncateToMinimalBitwidth(Instruction *I, ElementCount VF) const { 1388 return VF.isVector() && MinBWs.find(I) != MinBWs.end() && 1389 !isProfitableToScalarize(I, VF) && 1390 !isScalarAfterVectorization(I, VF); 1391 } 1392 1393 /// Decision that was taken during cost calculation for memory instruction. 1394 enum InstWidening { 1395 CM_Unknown, 1396 CM_Widen, // For consecutive accesses with stride +1. 1397 CM_Widen_Reverse, // For consecutive accesses with stride -1. 1398 CM_Interleave, 1399 CM_GatherScatter, 1400 CM_Scalarize 1401 }; 1402 1403 /// Save vectorization decision \p W and \p Cost taken by the cost model for 1404 /// instruction \p I and vector width \p VF. 1405 void setWideningDecision(Instruction *I, ElementCount VF, InstWidening W, 1406 InstructionCost Cost) { 1407 assert(VF.isVector() && "Expected VF >=2"); 1408 WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost); 1409 } 1410 1411 /// Save vectorization decision \p W and \p Cost taken by the cost model for 1412 /// interleaving group \p Grp and vector width \p VF. 1413 void setWideningDecision(const InterleaveGroup<Instruction> *Grp, 1414 ElementCount VF, InstWidening W, 1415 InstructionCost Cost) { 1416 assert(VF.isVector() && "Expected VF >=2"); 1417 /// Broadcast this decicion to all instructions inside the group. 1418 /// But the cost will be assigned to one instruction only. 1419 for (unsigned i = 0; i < Grp->getFactor(); ++i) { 1420 if (auto *I = Grp->getMember(i)) { 1421 if (Grp->getInsertPos() == I) 1422 WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost); 1423 else 1424 WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, 0); 1425 } 1426 } 1427 } 1428 1429 /// Return the cost model decision for the given instruction \p I and vector 1430 /// width \p VF. Return CM_Unknown if this instruction did not pass 1431 /// through the cost modeling. 1432 InstWidening getWideningDecision(Instruction *I, ElementCount VF) const { 1433 assert(VF.isVector() && "Expected VF to be a vector VF"); 1434 // Cost model is not run in the VPlan-native path - return conservative 1435 // result until this changes. 1436 if (EnableVPlanNativePath) 1437 return CM_GatherScatter; 1438 1439 std::pair<Instruction *, ElementCount> InstOnVF = std::make_pair(I, VF); 1440 auto Itr = WideningDecisions.find(InstOnVF); 1441 if (Itr == WideningDecisions.end()) 1442 return CM_Unknown; 1443 return Itr->second.first; 1444 } 1445 1446 /// Return the vectorization cost for the given instruction \p I and vector 1447 /// width \p VF. 1448 InstructionCost getWideningCost(Instruction *I, ElementCount VF) { 1449 assert(VF.isVector() && "Expected VF >=2"); 1450 std::pair<Instruction *, ElementCount> InstOnVF = std::make_pair(I, VF); 1451 assert(WideningDecisions.find(InstOnVF) != WideningDecisions.end() && 1452 "The cost is not calculated"); 1453 return WideningDecisions[InstOnVF].second; 1454 } 1455 1456 /// Return True if instruction \p I is an optimizable truncate whose operand 1457 /// is an induction variable. Such a truncate will be removed by adding a new 1458 /// induction variable with the destination type. 1459 bool isOptimizableIVTruncate(Instruction *I, ElementCount VF) { 1460 // If the instruction is not a truncate, return false. 1461 auto *Trunc = dyn_cast<TruncInst>(I); 1462 if (!Trunc) 1463 return false; 1464 1465 // Get the source and destination types of the truncate. 1466 Type *SrcTy = ToVectorTy(cast<CastInst>(I)->getSrcTy(), VF); 1467 Type *DestTy = ToVectorTy(cast<CastInst>(I)->getDestTy(), VF); 1468 1469 // If the truncate is free for the given types, return false. Replacing a 1470 // free truncate with an induction variable would add an induction variable 1471 // update instruction to each iteration of the loop. We exclude from this 1472 // check the primary induction variable since it will need an update 1473 // instruction regardless. 1474 Value *Op = Trunc->getOperand(0); 1475 if (Op != Legal->getPrimaryInduction() && TTI.isTruncateFree(SrcTy, DestTy)) 1476 return false; 1477 1478 // If the truncated value is not an induction variable, return false. 1479 return Legal->isInductionPhi(Op); 1480 } 1481 1482 /// Collects the instructions to scalarize for each predicated instruction in 1483 /// the loop. 1484 void collectInstsToScalarize(ElementCount VF); 1485 1486 /// Collect Uniform and Scalar values for the given \p VF. 1487 /// The sets depend on CM decision for Load/Store instructions 1488 /// that may be vectorized as interleave, gather-scatter or scalarized. 1489 void collectUniformsAndScalars(ElementCount VF) { 1490 // Do the analysis once. 1491 if (VF.isScalar() || Uniforms.find(VF) != Uniforms.end()) 1492 return; 1493 setCostBasedWideningDecision(VF); 1494 collectLoopUniforms(VF); 1495 collectLoopScalars(VF); 1496 } 1497 1498 /// Returns true if the target machine supports masked store operation 1499 /// for the given \p DataType and kind of access to \p Ptr. 1500 bool isLegalMaskedStore(Type *DataType, Value *Ptr, Align Alignment) const { 1501 return Legal->isConsecutivePtr(DataType, Ptr) && 1502 TTI.isLegalMaskedStore(DataType, Alignment); 1503 } 1504 1505 /// Returns true if the target machine supports masked load operation 1506 /// for the given \p DataType and kind of access to \p Ptr. 1507 bool isLegalMaskedLoad(Type *DataType, Value *Ptr, Align Alignment) const { 1508 return Legal->isConsecutivePtr(DataType, Ptr) && 1509 TTI.isLegalMaskedLoad(DataType, Alignment); 1510 } 1511 1512 /// Returns true if the target machine can represent \p V as a masked gather 1513 /// or scatter operation. 1514 bool isLegalGatherOrScatter(Value *V) { 1515 bool LI = isa<LoadInst>(V); 1516 bool SI = isa<StoreInst>(V); 1517 if (!LI && !SI) 1518 return false; 1519 auto *Ty = getLoadStoreType(V); 1520 Align Align = getLoadStoreAlignment(V); 1521 return (LI && TTI.isLegalMaskedGather(Ty, Align)) || 1522 (SI && TTI.isLegalMaskedScatter(Ty, Align)); 1523 } 1524 1525 /// Returns true if the target machine supports all of the reduction 1526 /// variables found for the given VF. 1527 bool canVectorizeReductions(ElementCount VF) const { 1528 return (all_of(Legal->getReductionVars(), [&](auto &Reduction) -> bool { 1529 const RecurrenceDescriptor &RdxDesc = Reduction.second; 1530 return TTI.isLegalToVectorizeReduction(RdxDesc, VF); 1531 })); 1532 } 1533 1534 /// Returns true if \p I is an instruction that will be scalarized with 1535 /// predication. Such instructions include conditional stores and 1536 /// instructions that may divide by zero. 1537 /// If a non-zero VF has been calculated, we check if I will be scalarized 1538 /// predication for that VF. 1539 bool isScalarWithPredication(Instruction *I) const; 1540 1541 // Returns true if \p I is an instruction that will be predicated either 1542 // through scalar predication or masked load/store or masked gather/scatter. 1543 // Superset of instructions that return true for isScalarWithPredication. 1544 bool isPredicatedInst(Instruction *I) { 1545 if (!blockNeedsPredicationForAnyReason(I->getParent())) 1546 return false; 1547 // Loads and stores that need some form of masked operation are predicated 1548 // instructions. 1549 if (isa<LoadInst>(I) || isa<StoreInst>(I)) 1550 return Legal->isMaskRequired(I); 1551 return isScalarWithPredication(I); 1552 } 1553 1554 /// Returns true if \p I is a memory instruction with consecutive memory 1555 /// access that can be widened. 1556 bool 1557 memoryInstructionCanBeWidened(Instruction *I, 1558 ElementCount VF = ElementCount::getFixed(1)); 1559 1560 /// Returns true if \p I is a memory instruction in an interleaved-group 1561 /// of memory accesses that can be vectorized with wide vector loads/stores 1562 /// and shuffles. 1563 bool 1564 interleavedAccessCanBeWidened(Instruction *I, 1565 ElementCount VF = ElementCount::getFixed(1)); 1566 1567 /// Check if \p Instr belongs to any interleaved access group. 1568 bool isAccessInterleaved(Instruction *Instr) { 1569 return InterleaveInfo.isInterleaved(Instr); 1570 } 1571 1572 /// Get the interleaved access group that \p Instr belongs to. 1573 const InterleaveGroup<Instruction> * 1574 getInterleavedAccessGroup(Instruction *Instr) { 1575 return InterleaveInfo.getInterleaveGroup(Instr); 1576 } 1577 1578 /// Returns true if we're required to use a scalar epilogue for at least 1579 /// the final iteration of the original loop. 1580 bool requiresScalarEpilogue(ElementCount VF) const { 1581 if (!isScalarEpilogueAllowed()) 1582 return false; 1583 // If we might exit from anywhere but the latch, must run the exiting 1584 // iteration in scalar form. 1585 if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch()) 1586 return true; 1587 return VF.isVector() && InterleaveInfo.requiresScalarEpilogue(); 1588 } 1589 1590 /// Returns true if a scalar epilogue is not allowed due to optsize or a 1591 /// loop hint annotation. 1592 bool isScalarEpilogueAllowed() const { 1593 return ScalarEpilogueStatus == CM_ScalarEpilogueAllowed; 1594 } 1595 1596 /// Returns true if all loop blocks should be masked to fold tail loop. 1597 bool foldTailByMasking() const { return FoldTailByMasking; } 1598 1599 /// Returns true if the instructions in this block requires predication 1600 /// for any reason, e.g. because tail folding now requires a predicate 1601 /// or because the block in the original loop was predicated. 1602 bool blockNeedsPredicationForAnyReason(BasicBlock *BB) const { 1603 return foldTailByMasking() || Legal->blockNeedsPredication(BB); 1604 } 1605 1606 /// A SmallMapVector to store the InLoop reduction op chains, mapping phi 1607 /// nodes to the chain of instructions representing the reductions. Uses a 1608 /// MapVector to ensure deterministic iteration order. 1609 using ReductionChainMap = 1610 SmallMapVector<PHINode *, SmallVector<Instruction *, 4>, 4>; 1611 1612 /// Return the chain of instructions representing an inloop reduction. 1613 const ReductionChainMap &getInLoopReductionChains() const { 1614 return InLoopReductionChains; 1615 } 1616 1617 /// Returns true if the Phi is part of an inloop reduction. 1618 bool isInLoopReduction(PHINode *Phi) const { 1619 return InLoopReductionChains.count(Phi); 1620 } 1621 1622 /// Estimate cost of an intrinsic call instruction CI if it were vectorized 1623 /// with factor VF. Return the cost of the instruction, including 1624 /// scalarization overhead if it's needed. 1625 InstructionCost getVectorIntrinsicCost(CallInst *CI, ElementCount VF) const; 1626 1627 /// Estimate cost of a call instruction CI if it were vectorized with factor 1628 /// VF. Return the cost of the instruction, including scalarization overhead 1629 /// if it's needed. The flag NeedToScalarize shows if the call needs to be 1630 /// scalarized - 1631 /// i.e. either vector version isn't available, or is too expensive. 1632 InstructionCost getVectorCallCost(CallInst *CI, ElementCount VF, 1633 bool &NeedToScalarize) const; 1634 1635 /// Returns true if the per-lane cost of VectorizationFactor A is lower than 1636 /// that of B. 1637 bool isMoreProfitable(const VectorizationFactor &A, 1638 const VectorizationFactor &B) const; 1639 1640 /// Invalidates decisions already taken by the cost model. 1641 void invalidateCostModelingDecisions() { 1642 WideningDecisions.clear(); 1643 Uniforms.clear(); 1644 Scalars.clear(); 1645 } 1646 1647 private: 1648 unsigned NumPredStores = 0; 1649 1650 /// \return An upper bound for the vectorization factors for both 1651 /// fixed and scalable vectorization, where the minimum-known number of 1652 /// elements is a power-of-2 larger than zero. If scalable vectorization is 1653 /// disabled or unsupported, then the scalable part will be equal to 1654 /// ElementCount::getScalable(0). 1655 FixedScalableVFPair computeFeasibleMaxVF(unsigned ConstTripCount, 1656 ElementCount UserVF); 1657 1658 /// \return the maximized element count based on the targets vector 1659 /// registers and the loop trip-count, but limited to a maximum safe VF. 1660 /// This is a helper function of computeFeasibleMaxVF. 1661 /// FIXME: MaxSafeVF is currently passed by reference to avoid some obscure 1662 /// issue that occurred on one of the buildbots which cannot be reproduced 1663 /// without having access to the properietary compiler (see comments on 1664 /// D98509). The issue is currently under investigation and this workaround 1665 /// will be removed as soon as possible. 1666 ElementCount getMaximizedVFForTarget(unsigned ConstTripCount, 1667 unsigned SmallestType, 1668 unsigned WidestType, 1669 const ElementCount &MaxSafeVF); 1670 1671 /// \return the maximum legal scalable VF, based on the safe max number 1672 /// of elements. 1673 ElementCount getMaxLegalScalableVF(unsigned MaxSafeElements); 1674 1675 /// The vectorization cost is a combination of the cost itself and a boolean 1676 /// indicating whether any of the contributing operations will actually 1677 /// operate on vector values after type legalization in the backend. If this 1678 /// latter value is false, then all operations will be scalarized (i.e. no 1679 /// vectorization has actually taken place). 1680 using VectorizationCostTy = std::pair<InstructionCost, bool>; 1681 1682 /// Returns the expected execution cost. The unit of the cost does 1683 /// not matter because we use the 'cost' units to compare different 1684 /// vector widths. The cost that is returned is *not* normalized by 1685 /// the factor width. If \p Invalid is not nullptr, this function 1686 /// will add a pair(Instruction*, ElementCount) to \p Invalid for 1687 /// each instruction that has an Invalid cost for the given VF. 1688 using InstructionVFPair = std::pair<Instruction *, ElementCount>; 1689 VectorizationCostTy 1690 expectedCost(ElementCount VF, 1691 SmallVectorImpl<InstructionVFPair> *Invalid = nullptr); 1692 1693 /// Returns the execution time cost of an instruction for a given vector 1694 /// width. Vector width of one means scalar. 1695 VectorizationCostTy getInstructionCost(Instruction *I, ElementCount VF); 1696 1697 /// The cost-computation logic from getInstructionCost which provides 1698 /// the vector type as an output parameter. 1699 InstructionCost getInstructionCost(Instruction *I, ElementCount VF, 1700 Type *&VectorTy); 1701 1702 /// Return the cost of instructions in an inloop reduction pattern, if I is 1703 /// part of that pattern. 1704 Optional<InstructionCost> 1705 getReductionPatternCost(Instruction *I, ElementCount VF, Type *VectorTy, 1706 TTI::TargetCostKind CostKind); 1707 1708 /// Calculate vectorization cost of memory instruction \p I. 1709 InstructionCost getMemoryInstructionCost(Instruction *I, ElementCount VF); 1710 1711 /// The cost computation for scalarized memory instruction. 1712 InstructionCost getMemInstScalarizationCost(Instruction *I, ElementCount VF); 1713 1714 /// The cost computation for interleaving group of memory instructions. 1715 InstructionCost getInterleaveGroupCost(Instruction *I, ElementCount VF); 1716 1717 /// The cost computation for Gather/Scatter instruction. 1718 InstructionCost getGatherScatterCost(Instruction *I, ElementCount VF); 1719 1720 /// The cost computation for widening instruction \p I with consecutive 1721 /// memory access. 1722 InstructionCost getConsecutiveMemOpCost(Instruction *I, ElementCount VF); 1723 1724 /// The cost calculation for Load/Store instruction \p I with uniform pointer - 1725 /// Load: scalar load + broadcast. 1726 /// Store: scalar store + (loop invariant value stored? 0 : extract of last 1727 /// element) 1728 InstructionCost getUniformMemOpCost(Instruction *I, ElementCount VF); 1729 1730 /// Estimate the overhead of scalarizing an instruction. This is a 1731 /// convenience wrapper for the type-based getScalarizationOverhead API. 1732 InstructionCost getScalarizationOverhead(Instruction *I, 1733 ElementCount VF) const; 1734 1735 /// Returns whether the instruction is a load or store and will be a emitted 1736 /// as a vector operation. 1737 bool isConsecutiveLoadOrStore(Instruction *I); 1738 1739 /// Returns true if an artificially high cost for emulated masked memrefs 1740 /// should be used. 1741 bool useEmulatedMaskMemRefHack(Instruction *I); 1742 1743 /// Map of scalar integer values to the smallest bitwidth they can be legally 1744 /// represented as. The vector equivalents of these values should be truncated 1745 /// to this type. 1746 MapVector<Instruction *, uint64_t> MinBWs; 1747 1748 /// A type representing the costs for instructions if they were to be 1749 /// scalarized rather than vectorized. The entries are Instruction-Cost 1750 /// pairs. 1751 using ScalarCostsTy = DenseMap<Instruction *, InstructionCost>; 1752 1753 /// A set containing all BasicBlocks that are known to present after 1754 /// vectorization as a predicated block. 1755 SmallPtrSet<BasicBlock *, 4> PredicatedBBsAfterVectorization; 1756 1757 /// Records whether it is allowed to have the original scalar loop execute at 1758 /// least once. This may be needed as a fallback loop in case runtime 1759 /// aliasing/dependence checks fail, or to handle the tail/remainder 1760 /// iterations when the trip count is unknown or doesn't divide by the VF, 1761 /// or as a peel-loop to handle gaps in interleave-groups. 1762 /// Under optsize and when the trip count is very small we don't allow any 1763 /// iterations to execute in the scalar loop. 1764 ScalarEpilogueLowering ScalarEpilogueStatus = CM_ScalarEpilogueAllowed; 1765 1766 /// All blocks of loop are to be masked to fold tail of scalar iterations. 1767 bool FoldTailByMasking = false; 1768 1769 /// A map holding scalar costs for different vectorization factors. The 1770 /// presence of a cost for an instruction in the mapping indicates that the 1771 /// instruction will be scalarized when vectorizing with the associated 1772 /// vectorization factor. The entries are VF-ScalarCostTy pairs. 1773 DenseMap<ElementCount, ScalarCostsTy> InstsToScalarize; 1774 1775 /// Holds the instructions known to be uniform after vectorization. 1776 /// The data is collected per VF. 1777 DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> Uniforms; 1778 1779 /// Holds the instructions known to be scalar after vectorization. 1780 /// The data is collected per VF. 1781 DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> Scalars; 1782 1783 /// Holds the instructions (address computations) that are forced to be 1784 /// scalarized. 1785 DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> ForcedScalars; 1786 1787 /// PHINodes of the reductions that should be expanded in-loop along with 1788 /// their associated chains of reduction operations, in program order from top 1789 /// (PHI) to bottom 1790 ReductionChainMap InLoopReductionChains; 1791 1792 /// A Map of inloop reduction operations and their immediate chain operand. 1793 /// FIXME: This can be removed once reductions can be costed correctly in 1794 /// vplan. This was added to allow quick lookup to the inloop operations, 1795 /// without having to loop through InLoopReductionChains. 1796 DenseMap<Instruction *, Instruction *> InLoopReductionImmediateChains; 1797 1798 /// Returns the expected difference in cost from scalarizing the expression 1799 /// feeding a predicated instruction \p PredInst. The instructions to 1800 /// scalarize and their scalar costs are collected in \p ScalarCosts. A 1801 /// non-negative return value implies the expression will be scalarized. 1802 /// Currently, only single-use chains are considered for scalarization. 1803 int computePredInstDiscount(Instruction *PredInst, ScalarCostsTy &ScalarCosts, 1804 ElementCount VF); 1805 1806 /// Collect the instructions that are uniform after vectorization. An 1807 /// instruction is uniform if we represent it with a single scalar value in 1808 /// the vectorized loop corresponding to each vector iteration. Examples of 1809 /// uniform instructions include pointer operands of consecutive or 1810 /// interleaved memory accesses. Note that although uniformity implies an 1811 /// instruction will be scalar, the reverse is not true. In general, a 1812 /// scalarized instruction will be represented by VF scalar values in the 1813 /// vectorized loop, each corresponding to an iteration of the original 1814 /// scalar loop. 1815 void collectLoopUniforms(ElementCount VF); 1816 1817 /// Collect the instructions that are scalar after vectorization. An 1818 /// instruction is scalar if it is known to be uniform or will be scalarized 1819 /// during vectorization. Non-uniform scalarized instructions will be 1820 /// represented by VF values in the vectorized loop, each corresponding to an 1821 /// iteration of the original scalar loop. 1822 void collectLoopScalars(ElementCount VF); 1823 1824 /// Keeps cost model vectorization decision and cost for instructions. 1825 /// Right now it is used for memory instructions only. 1826 using DecisionList = DenseMap<std::pair<Instruction *, ElementCount>, 1827 std::pair<InstWidening, InstructionCost>>; 1828 1829 DecisionList WideningDecisions; 1830 1831 /// Returns true if \p V is expected to be vectorized and it needs to be 1832 /// extracted. 1833 bool needsExtract(Value *V, ElementCount VF) const { 1834 Instruction *I = dyn_cast<Instruction>(V); 1835 if (VF.isScalar() || !I || !TheLoop->contains(I) || 1836 TheLoop->isLoopInvariant(I)) 1837 return false; 1838 1839 // Assume we can vectorize V (and hence we need extraction) if the 1840 // scalars are not computed yet. This can happen, because it is called 1841 // via getScalarizationOverhead from setCostBasedWideningDecision, before 1842 // the scalars are collected. That should be a safe assumption in most 1843 // cases, because we check if the operands have vectorizable types 1844 // beforehand in LoopVectorizationLegality. 1845 return Scalars.find(VF) == Scalars.end() || 1846 !isScalarAfterVectorization(I, VF); 1847 }; 1848 1849 /// Returns a range containing only operands needing to be extracted. 1850 SmallVector<Value *, 4> filterExtractingOperands(Instruction::op_range Ops, 1851 ElementCount VF) const { 1852 return SmallVector<Value *, 4>(make_filter_range( 1853 Ops, [this, VF](Value *V) { return this->needsExtract(V, VF); })); 1854 } 1855 1856 /// Determines if we have the infrastructure to vectorize loop \p L and its 1857 /// epilogue, assuming the main loop is vectorized by \p VF. 1858 bool isCandidateForEpilogueVectorization(const Loop &L, 1859 const ElementCount VF) const; 1860 1861 /// Returns true if epilogue vectorization is considered profitable, and 1862 /// false otherwise. 1863 /// \p VF is the vectorization factor chosen for the original loop. 1864 bool isEpilogueVectorizationProfitable(const ElementCount VF) const; 1865 1866 public: 1867 /// The loop that we evaluate. 1868 Loop *TheLoop; 1869 1870 /// Predicated scalar evolution analysis. 1871 PredicatedScalarEvolution &PSE; 1872 1873 /// Loop Info analysis. 1874 LoopInfo *LI; 1875 1876 /// Vectorization legality. 1877 LoopVectorizationLegality *Legal; 1878 1879 /// Vector target information. 1880 const TargetTransformInfo &TTI; 1881 1882 /// Target Library Info. 1883 const TargetLibraryInfo *TLI; 1884 1885 /// Demanded bits analysis. 1886 DemandedBits *DB; 1887 1888 /// Assumption cache. 1889 AssumptionCache *AC; 1890 1891 /// Interface to emit optimization remarks. 1892 OptimizationRemarkEmitter *ORE; 1893 1894 const Function *TheFunction; 1895 1896 /// Loop Vectorize Hint. 1897 const LoopVectorizeHints *Hints; 1898 1899 /// The interleave access information contains groups of interleaved accesses 1900 /// with the same stride and close to each other. 1901 InterleavedAccessInfo &InterleaveInfo; 1902 1903 /// Values to ignore in the cost model. 1904 SmallPtrSet<const Value *, 16> ValuesToIgnore; 1905 1906 /// Values to ignore in the cost model when VF > 1. 1907 SmallPtrSet<const Value *, 16> VecValuesToIgnore; 1908 1909 /// All element types found in the loop. 1910 SmallPtrSet<Type *, 16> ElementTypesInLoop; 1911 1912 /// Profitable vector factors. 1913 SmallVector<VectorizationFactor, 8> ProfitableVFs; 1914 }; 1915 } // end namespace llvm 1916 1917 /// Helper struct to manage generating runtime checks for vectorization. 1918 /// 1919 /// The runtime checks are created up-front in temporary blocks to allow better 1920 /// estimating the cost and un-linked from the existing IR. After deciding to 1921 /// vectorize, the checks are moved back. If deciding not to vectorize, the 1922 /// temporary blocks are completely removed. 1923 class GeneratedRTChecks { 1924 /// Basic block which contains the generated SCEV checks, if any. 1925 BasicBlock *SCEVCheckBlock = nullptr; 1926 1927 /// The value representing the result of the generated SCEV checks. If it is 1928 /// nullptr, either no SCEV checks have been generated or they have been used. 1929 Value *SCEVCheckCond = nullptr; 1930 1931 /// Basic block which contains the generated memory runtime checks, if any. 1932 BasicBlock *MemCheckBlock = nullptr; 1933 1934 /// The value representing the result of the generated memory runtime checks. 1935 /// If it is nullptr, either no memory runtime checks have been generated or 1936 /// they have been used. 1937 Value *MemRuntimeCheckCond = nullptr; 1938 1939 DominatorTree *DT; 1940 LoopInfo *LI; 1941 1942 SCEVExpander SCEVExp; 1943 SCEVExpander MemCheckExp; 1944 1945 public: 1946 GeneratedRTChecks(ScalarEvolution &SE, DominatorTree *DT, LoopInfo *LI, 1947 const DataLayout &DL) 1948 : DT(DT), LI(LI), SCEVExp(SE, DL, "scev.check"), 1949 MemCheckExp(SE, DL, "scev.check") {} 1950 1951 /// Generate runtime checks in SCEVCheckBlock and MemCheckBlock, so we can 1952 /// accurately estimate the cost of the runtime checks. The blocks are 1953 /// un-linked from the IR and is added back during vector code generation. If 1954 /// there is no vector code generation, the check blocks are removed 1955 /// completely. 1956 void Create(Loop *L, const LoopAccessInfo &LAI, 1957 const SCEVUnionPredicate &UnionPred) { 1958 1959 BasicBlock *LoopHeader = L->getHeader(); 1960 BasicBlock *Preheader = L->getLoopPreheader(); 1961 1962 // Use SplitBlock to create blocks for SCEV & memory runtime checks to 1963 // ensure the blocks are properly added to LoopInfo & DominatorTree. Those 1964 // may be used by SCEVExpander. The blocks will be un-linked from their 1965 // predecessors and removed from LI & DT at the end of the function. 1966 if (!UnionPred.isAlwaysTrue()) { 1967 SCEVCheckBlock = SplitBlock(Preheader, Preheader->getTerminator(), DT, LI, 1968 nullptr, "vector.scevcheck"); 1969 1970 SCEVCheckCond = SCEVExp.expandCodeForPredicate( 1971 &UnionPred, SCEVCheckBlock->getTerminator()); 1972 } 1973 1974 const auto &RtPtrChecking = *LAI.getRuntimePointerChecking(); 1975 if (RtPtrChecking.Need) { 1976 auto *Pred = SCEVCheckBlock ? SCEVCheckBlock : Preheader; 1977 MemCheckBlock = SplitBlock(Pred, Pred->getTerminator(), DT, LI, nullptr, 1978 "vector.memcheck"); 1979 1980 MemRuntimeCheckCond = 1981 addRuntimeChecks(MemCheckBlock->getTerminator(), L, 1982 RtPtrChecking.getChecks(), MemCheckExp); 1983 assert(MemRuntimeCheckCond && 1984 "no RT checks generated although RtPtrChecking " 1985 "claimed checks are required"); 1986 } 1987 1988 if (!MemCheckBlock && !SCEVCheckBlock) 1989 return; 1990 1991 // Unhook the temporary block with the checks, update various places 1992 // accordingly. 1993 if (SCEVCheckBlock) 1994 SCEVCheckBlock->replaceAllUsesWith(Preheader); 1995 if (MemCheckBlock) 1996 MemCheckBlock->replaceAllUsesWith(Preheader); 1997 1998 if (SCEVCheckBlock) { 1999 SCEVCheckBlock->getTerminator()->moveBefore(Preheader->getTerminator()); 2000 new UnreachableInst(Preheader->getContext(), SCEVCheckBlock); 2001 Preheader->getTerminator()->eraseFromParent(); 2002 } 2003 if (MemCheckBlock) { 2004 MemCheckBlock->getTerminator()->moveBefore(Preheader->getTerminator()); 2005 new UnreachableInst(Preheader->getContext(), MemCheckBlock); 2006 Preheader->getTerminator()->eraseFromParent(); 2007 } 2008 2009 DT->changeImmediateDominator(LoopHeader, Preheader); 2010 if (MemCheckBlock) { 2011 DT->eraseNode(MemCheckBlock); 2012 LI->removeBlock(MemCheckBlock); 2013 } 2014 if (SCEVCheckBlock) { 2015 DT->eraseNode(SCEVCheckBlock); 2016 LI->removeBlock(SCEVCheckBlock); 2017 } 2018 } 2019 2020 /// Remove the created SCEV & memory runtime check blocks & instructions, if 2021 /// unused. 2022 ~GeneratedRTChecks() { 2023 SCEVExpanderCleaner SCEVCleaner(SCEVExp, *DT); 2024 SCEVExpanderCleaner MemCheckCleaner(MemCheckExp, *DT); 2025 if (!SCEVCheckCond) 2026 SCEVCleaner.markResultUsed(); 2027 2028 if (!MemRuntimeCheckCond) 2029 MemCheckCleaner.markResultUsed(); 2030 2031 if (MemRuntimeCheckCond) { 2032 auto &SE = *MemCheckExp.getSE(); 2033 // Memory runtime check generation creates compares that use expanded 2034 // values. Remove them before running the SCEVExpanderCleaners. 2035 for (auto &I : make_early_inc_range(reverse(*MemCheckBlock))) { 2036 if (MemCheckExp.isInsertedInstruction(&I)) 2037 continue; 2038 SE.forgetValue(&I); 2039 I.eraseFromParent(); 2040 } 2041 } 2042 MemCheckCleaner.cleanup(); 2043 SCEVCleaner.cleanup(); 2044 2045 if (SCEVCheckCond) 2046 SCEVCheckBlock->eraseFromParent(); 2047 if (MemRuntimeCheckCond) 2048 MemCheckBlock->eraseFromParent(); 2049 } 2050 2051 /// Adds the generated SCEVCheckBlock before \p LoopVectorPreHeader and 2052 /// adjusts the branches to branch to the vector preheader or \p Bypass, 2053 /// depending on the generated condition. 2054 BasicBlock *emitSCEVChecks(Loop *L, BasicBlock *Bypass, 2055 BasicBlock *LoopVectorPreHeader, 2056 BasicBlock *LoopExitBlock) { 2057 if (!SCEVCheckCond) 2058 return nullptr; 2059 if (auto *C = dyn_cast<ConstantInt>(SCEVCheckCond)) 2060 if (C->isZero()) 2061 return nullptr; 2062 2063 auto *Pred = LoopVectorPreHeader->getSinglePredecessor(); 2064 2065 BranchInst::Create(LoopVectorPreHeader, SCEVCheckBlock); 2066 // Create new preheader for vector loop. 2067 if (auto *PL = LI->getLoopFor(LoopVectorPreHeader)) 2068 PL->addBasicBlockToLoop(SCEVCheckBlock, *LI); 2069 2070 SCEVCheckBlock->getTerminator()->eraseFromParent(); 2071 SCEVCheckBlock->moveBefore(LoopVectorPreHeader); 2072 Pred->getTerminator()->replaceSuccessorWith(LoopVectorPreHeader, 2073 SCEVCheckBlock); 2074 2075 DT->addNewBlock(SCEVCheckBlock, Pred); 2076 DT->changeImmediateDominator(LoopVectorPreHeader, SCEVCheckBlock); 2077 2078 ReplaceInstWithInst( 2079 SCEVCheckBlock->getTerminator(), 2080 BranchInst::Create(Bypass, LoopVectorPreHeader, SCEVCheckCond)); 2081 // Mark the check as used, to prevent it from being removed during cleanup. 2082 SCEVCheckCond = nullptr; 2083 return SCEVCheckBlock; 2084 } 2085 2086 /// Adds the generated MemCheckBlock before \p LoopVectorPreHeader and adjusts 2087 /// the branches to branch to the vector preheader or \p Bypass, depending on 2088 /// the generated condition. 2089 BasicBlock *emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass, 2090 BasicBlock *LoopVectorPreHeader) { 2091 // Check if we generated code that checks in runtime if arrays overlap. 2092 if (!MemRuntimeCheckCond) 2093 return nullptr; 2094 2095 auto *Pred = LoopVectorPreHeader->getSinglePredecessor(); 2096 Pred->getTerminator()->replaceSuccessorWith(LoopVectorPreHeader, 2097 MemCheckBlock); 2098 2099 DT->addNewBlock(MemCheckBlock, Pred); 2100 DT->changeImmediateDominator(LoopVectorPreHeader, MemCheckBlock); 2101 MemCheckBlock->moveBefore(LoopVectorPreHeader); 2102 2103 if (auto *PL = LI->getLoopFor(LoopVectorPreHeader)) 2104 PL->addBasicBlockToLoop(MemCheckBlock, *LI); 2105 2106 ReplaceInstWithInst( 2107 MemCheckBlock->getTerminator(), 2108 BranchInst::Create(Bypass, LoopVectorPreHeader, MemRuntimeCheckCond)); 2109 MemCheckBlock->getTerminator()->setDebugLoc( 2110 Pred->getTerminator()->getDebugLoc()); 2111 2112 // Mark the check as used, to prevent it from being removed during cleanup. 2113 MemRuntimeCheckCond = nullptr; 2114 return MemCheckBlock; 2115 } 2116 }; 2117 2118 // Return true if \p OuterLp is an outer loop annotated with hints for explicit 2119 // vectorization. The loop needs to be annotated with #pragma omp simd 2120 // simdlen(#) or #pragma clang vectorize(enable) vectorize_width(#). If the 2121 // vector length information is not provided, vectorization is not considered 2122 // explicit. Interleave hints are not allowed either. These limitations will be 2123 // relaxed in the future. 2124 // Please, note that we are currently forced to abuse the pragma 'clang 2125 // vectorize' semantics. This pragma provides *auto-vectorization hints* 2126 // (i.e., LV must check that vectorization is legal) whereas pragma 'omp simd' 2127 // provides *explicit vectorization hints* (LV can bypass legal checks and 2128 // assume that vectorization is legal). However, both hints are implemented 2129 // using the same metadata (llvm.loop.vectorize, processed by 2130 // LoopVectorizeHints). This will be fixed in the future when the native IR 2131 // representation for pragma 'omp simd' is introduced. 2132 static bool isExplicitVecOuterLoop(Loop *OuterLp, 2133 OptimizationRemarkEmitter *ORE) { 2134 assert(!OuterLp->isInnermost() && "This is not an outer loop"); 2135 LoopVectorizeHints Hints(OuterLp, true /*DisableInterleaving*/, *ORE); 2136 2137 // Only outer loops with an explicit vectorization hint are supported. 2138 // Unannotated outer loops are ignored. 2139 if (Hints.getForce() == LoopVectorizeHints::FK_Undefined) 2140 return false; 2141 2142 Function *Fn = OuterLp->getHeader()->getParent(); 2143 if (!Hints.allowVectorization(Fn, OuterLp, 2144 true /*VectorizeOnlyWhenForced*/)) { 2145 LLVM_DEBUG(dbgs() << "LV: Loop hints prevent outer loop vectorization.\n"); 2146 return false; 2147 } 2148 2149 if (Hints.getInterleave() > 1) { 2150 // TODO: Interleave support is future work. 2151 LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Interleave is not supported for " 2152 "outer loops.\n"); 2153 Hints.emitRemarkWithHints(); 2154 return false; 2155 } 2156 2157 return true; 2158 } 2159 2160 static void collectSupportedLoops(Loop &L, LoopInfo *LI, 2161 OptimizationRemarkEmitter *ORE, 2162 SmallVectorImpl<Loop *> &V) { 2163 // Collect inner loops and outer loops without irreducible control flow. For 2164 // now, only collect outer loops that have explicit vectorization hints. If we 2165 // are stress testing the VPlan H-CFG construction, we collect the outermost 2166 // loop of every loop nest. 2167 if (L.isInnermost() || VPlanBuildStressTest || 2168 (EnableVPlanNativePath && isExplicitVecOuterLoop(&L, ORE))) { 2169 LoopBlocksRPO RPOT(&L); 2170 RPOT.perform(LI); 2171 if (!containsIrreducibleCFG<const BasicBlock *>(RPOT, *LI)) { 2172 V.push_back(&L); 2173 // TODO: Collect inner loops inside marked outer loops in case 2174 // vectorization fails for the outer loop. Do not invoke 2175 // 'containsIrreducibleCFG' again for inner loops when the outer loop is 2176 // already known to be reducible. We can use an inherited attribute for 2177 // that. 2178 return; 2179 } 2180 } 2181 for (Loop *InnerL : L) 2182 collectSupportedLoops(*InnerL, LI, ORE, V); 2183 } 2184 2185 namespace { 2186 2187 /// The LoopVectorize Pass. 2188 struct LoopVectorize : public FunctionPass { 2189 /// Pass identification, replacement for typeid 2190 static char ID; 2191 2192 LoopVectorizePass Impl; 2193 2194 explicit LoopVectorize(bool InterleaveOnlyWhenForced = false, 2195 bool VectorizeOnlyWhenForced = false) 2196 : FunctionPass(ID), 2197 Impl({InterleaveOnlyWhenForced, VectorizeOnlyWhenForced}) { 2198 initializeLoopVectorizePass(*PassRegistry::getPassRegistry()); 2199 } 2200 2201 bool runOnFunction(Function &F) override { 2202 if (skipFunction(F)) 2203 return false; 2204 2205 auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE(); 2206 auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); 2207 auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); 2208 auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 2209 auto *BFI = &getAnalysis<BlockFrequencyInfoWrapperPass>().getBFI(); 2210 auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>(); 2211 auto *TLI = TLIP ? &TLIP->getTLI(F) : nullptr; 2212 auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); 2213 auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); 2214 auto *LAA = &getAnalysis<LoopAccessLegacyAnalysis>(); 2215 auto *DB = &getAnalysis<DemandedBitsWrapperPass>().getDemandedBits(); 2216 auto *ORE = &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE(); 2217 auto *PSI = &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI(); 2218 2219 std::function<const LoopAccessInfo &(Loop &)> GetLAA = 2220 [&](Loop &L) -> const LoopAccessInfo & { return LAA->getInfo(&L); }; 2221 2222 return Impl.runImpl(F, *SE, *LI, *TTI, *DT, *BFI, TLI, *DB, *AA, *AC, 2223 GetLAA, *ORE, PSI).MadeAnyChange; 2224 } 2225 2226 void getAnalysisUsage(AnalysisUsage &AU) const override { 2227 AU.addRequired<AssumptionCacheTracker>(); 2228 AU.addRequired<BlockFrequencyInfoWrapperPass>(); 2229 AU.addRequired<DominatorTreeWrapperPass>(); 2230 AU.addRequired<LoopInfoWrapperPass>(); 2231 AU.addRequired<ScalarEvolutionWrapperPass>(); 2232 AU.addRequired<TargetTransformInfoWrapperPass>(); 2233 AU.addRequired<AAResultsWrapperPass>(); 2234 AU.addRequired<LoopAccessLegacyAnalysis>(); 2235 AU.addRequired<DemandedBitsWrapperPass>(); 2236 AU.addRequired<OptimizationRemarkEmitterWrapperPass>(); 2237 AU.addRequired<InjectTLIMappingsLegacy>(); 2238 2239 // We currently do not preserve loopinfo/dominator analyses with outer loop 2240 // vectorization. Until this is addressed, mark these analyses as preserved 2241 // only for non-VPlan-native path. 2242 // TODO: Preserve Loop and Dominator analyses for VPlan-native path. 2243 if (!EnableVPlanNativePath) { 2244 AU.addPreserved<LoopInfoWrapperPass>(); 2245 AU.addPreserved<DominatorTreeWrapperPass>(); 2246 } 2247 2248 AU.addPreserved<BasicAAWrapperPass>(); 2249 AU.addPreserved<GlobalsAAWrapperPass>(); 2250 AU.addRequired<ProfileSummaryInfoWrapperPass>(); 2251 } 2252 }; 2253 2254 } // end anonymous namespace 2255 2256 //===----------------------------------------------------------------------===// 2257 // Implementation of LoopVectorizationLegality, InnerLoopVectorizer and 2258 // LoopVectorizationCostModel and LoopVectorizationPlanner. 2259 //===----------------------------------------------------------------------===// 2260 2261 Value *InnerLoopVectorizer::getBroadcastInstrs(Value *V) { 2262 // We need to place the broadcast of invariant variables outside the loop, 2263 // but only if it's proven safe to do so. Else, broadcast will be inside 2264 // vector loop body. 2265 Instruction *Instr = dyn_cast<Instruction>(V); 2266 bool SafeToHoist = OrigLoop->isLoopInvariant(V) && 2267 (!Instr || 2268 DT->dominates(Instr->getParent(), LoopVectorPreHeader)); 2269 // Place the code for broadcasting invariant variables in the new preheader. 2270 IRBuilder<>::InsertPointGuard Guard(Builder); 2271 if (SafeToHoist) 2272 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 2273 2274 // Broadcast the scalar into all locations in the vector. 2275 Value *Shuf = Builder.CreateVectorSplat(VF, V, "broadcast"); 2276 2277 return Shuf; 2278 } 2279 2280 void InnerLoopVectorizer::createVectorIntOrFpInductionPHI( 2281 const InductionDescriptor &II, Value *Step, Value *Start, 2282 Instruction *EntryVal, VPValue *Def, VPValue *CastDef, 2283 VPTransformState &State) { 2284 assert((isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) && 2285 "Expected either an induction phi-node or a truncate of it!"); 2286 2287 // Construct the initial value of the vector IV in the vector loop preheader 2288 auto CurrIP = Builder.saveIP(); 2289 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 2290 if (isa<TruncInst>(EntryVal)) { 2291 assert(Start->getType()->isIntegerTy() && 2292 "Truncation requires an integer type"); 2293 auto *TruncType = cast<IntegerType>(EntryVal->getType()); 2294 Step = Builder.CreateTrunc(Step, TruncType); 2295 Start = Builder.CreateCast(Instruction::Trunc, Start, TruncType); 2296 } 2297 2298 Value *Zero = getSignedIntOrFpConstant(Start->getType(), 0); 2299 Value *SplatStart = Builder.CreateVectorSplat(VF, Start); 2300 Value *SteppedStart = 2301 getStepVector(SplatStart, Zero, Step, II.getInductionOpcode()); 2302 2303 // We create vector phi nodes for both integer and floating-point induction 2304 // variables. Here, we determine the kind of arithmetic we will perform. 2305 Instruction::BinaryOps AddOp; 2306 Instruction::BinaryOps MulOp; 2307 if (Step->getType()->isIntegerTy()) { 2308 AddOp = Instruction::Add; 2309 MulOp = Instruction::Mul; 2310 } else { 2311 AddOp = II.getInductionOpcode(); 2312 MulOp = Instruction::FMul; 2313 } 2314 2315 // Multiply the vectorization factor by the step using integer or 2316 // floating-point arithmetic as appropriate. 2317 Type *StepType = Step->getType(); 2318 Value *RuntimeVF; 2319 if (Step->getType()->isFloatingPointTy()) 2320 RuntimeVF = getRuntimeVFAsFloat(Builder, StepType, VF); 2321 else 2322 RuntimeVF = getRuntimeVF(Builder, StepType, VF); 2323 Value *Mul = Builder.CreateBinOp(MulOp, Step, RuntimeVF); 2324 2325 // Create a vector splat to use in the induction update. 2326 // 2327 // FIXME: If the step is non-constant, we create the vector splat with 2328 // IRBuilder. IRBuilder can constant-fold the multiply, but it doesn't 2329 // handle a constant vector splat. 2330 Value *SplatVF = isa<Constant>(Mul) 2331 ? ConstantVector::getSplat(VF, cast<Constant>(Mul)) 2332 : Builder.CreateVectorSplat(VF, Mul); 2333 Builder.restoreIP(CurrIP); 2334 2335 // We may need to add the step a number of times, depending on the unroll 2336 // factor. The last of those goes into the PHI. 2337 PHINode *VecInd = PHINode::Create(SteppedStart->getType(), 2, "vec.ind", 2338 &*LoopVectorBody->getFirstInsertionPt()); 2339 VecInd->setDebugLoc(EntryVal->getDebugLoc()); 2340 Instruction *LastInduction = VecInd; 2341 for (unsigned Part = 0; Part < UF; ++Part) { 2342 State.set(Def, LastInduction, Part); 2343 2344 if (isa<TruncInst>(EntryVal)) 2345 addMetadata(LastInduction, EntryVal); 2346 recordVectorLoopValueForInductionCast(II, EntryVal, LastInduction, CastDef, 2347 State, Part); 2348 2349 LastInduction = cast<Instruction>( 2350 Builder.CreateBinOp(AddOp, LastInduction, SplatVF, "step.add")); 2351 LastInduction->setDebugLoc(EntryVal->getDebugLoc()); 2352 } 2353 2354 // Move the last step to the end of the latch block. This ensures consistent 2355 // placement of all induction updates. 2356 auto *LoopVectorLatch = LI->getLoopFor(LoopVectorBody)->getLoopLatch(); 2357 auto *Br = cast<BranchInst>(LoopVectorLatch->getTerminator()); 2358 auto *ICmp = cast<Instruction>(Br->getCondition()); 2359 LastInduction->moveBefore(ICmp); 2360 LastInduction->setName("vec.ind.next"); 2361 2362 VecInd->addIncoming(SteppedStart, LoopVectorPreHeader); 2363 VecInd->addIncoming(LastInduction, LoopVectorLatch); 2364 } 2365 2366 bool InnerLoopVectorizer::shouldScalarizeInstruction(Instruction *I) const { 2367 return Cost->isScalarAfterVectorization(I, VF) || 2368 Cost->isProfitableToScalarize(I, VF); 2369 } 2370 2371 bool InnerLoopVectorizer::needsScalarInduction(Instruction *IV) const { 2372 if (shouldScalarizeInstruction(IV)) 2373 return true; 2374 auto isScalarInst = [&](User *U) -> bool { 2375 auto *I = cast<Instruction>(U); 2376 return (OrigLoop->contains(I) && shouldScalarizeInstruction(I)); 2377 }; 2378 return llvm::any_of(IV->users(), isScalarInst); 2379 } 2380 2381 void InnerLoopVectorizer::recordVectorLoopValueForInductionCast( 2382 const InductionDescriptor &ID, const Instruction *EntryVal, 2383 Value *VectorLoopVal, VPValue *CastDef, VPTransformState &State, 2384 unsigned Part, unsigned Lane) { 2385 assert((isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) && 2386 "Expected either an induction phi-node or a truncate of it!"); 2387 2388 // This induction variable is not the phi from the original loop but the 2389 // newly-created IV based on the proof that casted Phi is equal to the 2390 // uncasted Phi in the vectorized loop (under a runtime guard possibly). It 2391 // re-uses the same InductionDescriptor that original IV uses but we don't 2392 // have to do any recording in this case - that is done when original IV is 2393 // processed. 2394 if (isa<TruncInst>(EntryVal)) 2395 return; 2396 2397 if (!CastDef) { 2398 assert(ID.getCastInsts().empty() && 2399 "there are casts for ID, but no CastDef"); 2400 return; 2401 } 2402 assert(!ID.getCastInsts().empty() && 2403 "there is a CastDef, but no casts for ID"); 2404 // Only the first Cast instruction in the Casts vector is of interest. 2405 // The rest of the Casts (if exist) have no uses outside the 2406 // induction update chain itself. 2407 if (Lane < UINT_MAX) 2408 State.set(CastDef, VectorLoopVal, VPIteration(Part, Lane)); 2409 else 2410 State.set(CastDef, VectorLoopVal, Part); 2411 } 2412 2413 void InnerLoopVectorizer::widenIntOrFpInduction(PHINode *IV, Value *Start, 2414 TruncInst *Trunc, VPValue *Def, 2415 VPValue *CastDef, 2416 VPTransformState &State) { 2417 assert((IV->getType()->isIntegerTy() || IV != OldInduction) && 2418 "Primary induction variable must have an integer type"); 2419 2420 auto II = Legal->getInductionVars().find(IV); 2421 assert(II != Legal->getInductionVars().end() && "IV is not an induction"); 2422 2423 auto ID = II->second; 2424 assert(IV->getType() == ID.getStartValue()->getType() && "Types must match"); 2425 2426 // The value from the original loop to which we are mapping the new induction 2427 // variable. 2428 Instruction *EntryVal = Trunc ? cast<Instruction>(Trunc) : IV; 2429 2430 auto &DL = OrigLoop->getHeader()->getModule()->getDataLayout(); 2431 2432 // Generate code for the induction step. Note that induction steps are 2433 // required to be loop-invariant 2434 auto CreateStepValue = [&](const SCEV *Step) -> Value * { 2435 assert(PSE.getSE()->isLoopInvariant(Step, OrigLoop) && 2436 "Induction step should be loop invariant"); 2437 if (PSE.getSE()->isSCEVable(IV->getType())) { 2438 SCEVExpander Exp(*PSE.getSE(), DL, "induction"); 2439 return Exp.expandCodeFor(Step, Step->getType(), 2440 LoopVectorPreHeader->getTerminator()); 2441 } 2442 return cast<SCEVUnknown>(Step)->getValue(); 2443 }; 2444 2445 // The scalar value to broadcast. This is derived from the canonical 2446 // induction variable. If a truncation type is given, truncate the canonical 2447 // induction variable and step. Otherwise, derive these values from the 2448 // induction descriptor. 2449 auto CreateScalarIV = [&](Value *&Step) -> Value * { 2450 Value *ScalarIV = Induction; 2451 if (IV != OldInduction) { 2452 ScalarIV = IV->getType()->isIntegerTy() 2453 ? Builder.CreateSExtOrTrunc(Induction, IV->getType()) 2454 : Builder.CreateCast(Instruction::SIToFP, Induction, 2455 IV->getType()); 2456 ScalarIV = emitTransformedIndex(Builder, ScalarIV, PSE.getSE(), DL, ID); 2457 ScalarIV->setName("offset.idx"); 2458 } 2459 if (Trunc) { 2460 auto *TruncType = cast<IntegerType>(Trunc->getType()); 2461 assert(Step->getType()->isIntegerTy() && 2462 "Truncation requires an integer step"); 2463 ScalarIV = Builder.CreateTrunc(ScalarIV, TruncType); 2464 Step = Builder.CreateTrunc(Step, TruncType); 2465 } 2466 return ScalarIV; 2467 }; 2468 2469 // Create the vector values from the scalar IV, in the absence of creating a 2470 // vector IV. 2471 auto CreateSplatIV = [&](Value *ScalarIV, Value *Step) { 2472 Value *Broadcasted = getBroadcastInstrs(ScalarIV); 2473 for (unsigned Part = 0; Part < UF; ++Part) { 2474 assert(!VF.isScalable() && "scalable vectors not yet supported."); 2475 Value *StartIdx; 2476 if (Step->getType()->isFloatingPointTy()) 2477 StartIdx = getRuntimeVFAsFloat(Builder, Step->getType(), VF * Part); 2478 else 2479 StartIdx = getRuntimeVF(Builder, Step->getType(), VF * Part); 2480 2481 Value *EntryPart = 2482 getStepVector(Broadcasted, StartIdx, Step, ID.getInductionOpcode()); 2483 State.set(Def, EntryPart, Part); 2484 if (Trunc) 2485 addMetadata(EntryPart, Trunc); 2486 recordVectorLoopValueForInductionCast(ID, EntryVal, EntryPart, CastDef, 2487 State, Part); 2488 } 2489 }; 2490 2491 // Fast-math-flags propagate from the original induction instruction. 2492 IRBuilder<>::FastMathFlagGuard FMFG(Builder); 2493 if (ID.getInductionBinOp() && isa<FPMathOperator>(ID.getInductionBinOp())) 2494 Builder.setFastMathFlags(ID.getInductionBinOp()->getFastMathFlags()); 2495 2496 // Now do the actual transformations, and start with creating the step value. 2497 Value *Step = CreateStepValue(ID.getStep()); 2498 if (VF.isZero() || VF.isScalar()) { 2499 Value *ScalarIV = CreateScalarIV(Step); 2500 CreateSplatIV(ScalarIV, Step); 2501 return; 2502 } 2503 2504 // Determine if we want a scalar version of the induction variable. This is 2505 // true if the induction variable itself is not widened, or if it has at 2506 // least one user in the loop that is not widened. 2507 auto NeedsScalarIV = needsScalarInduction(EntryVal); 2508 if (!NeedsScalarIV) { 2509 createVectorIntOrFpInductionPHI(ID, Step, Start, EntryVal, Def, CastDef, 2510 State); 2511 return; 2512 } 2513 2514 // Try to create a new independent vector induction variable. If we can't 2515 // create the phi node, we will splat the scalar induction variable in each 2516 // loop iteration. 2517 if (!shouldScalarizeInstruction(EntryVal)) { 2518 createVectorIntOrFpInductionPHI(ID, Step, Start, EntryVal, Def, CastDef, 2519 State); 2520 Value *ScalarIV = CreateScalarIV(Step); 2521 // Create scalar steps that can be used by instructions we will later 2522 // scalarize. Note that the addition of the scalar steps will not increase 2523 // the number of instructions in the loop in the common case prior to 2524 // InstCombine. We will be trading one vector extract for each scalar step. 2525 buildScalarSteps(ScalarIV, Step, EntryVal, ID, Def, CastDef, State); 2526 return; 2527 } 2528 2529 // All IV users are scalar instructions, so only emit a scalar IV, not a 2530 // vectorised IV. Except when we tail-fold, then the splat IV feeds the 2531 // predicate used by the masked loads/stores. 2532 Value *ScalarIV = CreateScalarIV(Step); 2533 if (!Cost->isScalarEpilogueAllowed()) 2534 CreateSplatIV(ScalarIV, Step); 2535 buildScalarSteps(ScalarIV, Step, EntryVal, ID, Def, CastDef, State); 2536 } 2537 2538 Value *InnerLoopVectorizer::getStepVector(Value *Val, Value *StartIdx, 2539 Value *Step, 2540 Instruction::BinaryOps BinOp) { 2541 // Create and check the types. 2542 auto *ValVTy = cast<VectorType>(Val->getType()); 2543 ElementCount VLen = ValVTy->getElementCount(); 2544 2545 Type *STy = Val->getType()->getScalarType(); 2546 assert((STy->isIntegerTy() || STy->isFloatingPointTy()) && 2547 "Induction Step must be an integer or FP"); 2548 assert(Step->getType() == STy && "Step has wrong type"); 2549 2550 SmallVector<Constant *, 8> Indices; 2551 2552 // Create a vector of consecutive numbers from zero to VF. 2553 VectorType *InitVecValVTy = ValVTy; 2554 Type *InitVecValSTy = STy; 2555 if (STy->isFloatingPointTy()) { 2556 InitVecValSTy = 2557 IntegerType::get(STy->getContext(), STy->getScalarSizeInBits()); 2558 InitVecValVTy = VectorType::get(InitVecValSTy, VLen); 2559 } 2560 Value *InitVec = Builder.CreateStepVector(InitVecValVTy); 2561 2562 // Splat the StartIdx 2563 Value *StartIdxSplat = Builder.CreateVectorSplat(VLen, StartIdx); 2564 2565 if (STy->isIntegerTy()) { 2566 InitVec = Builder.CreateAdd(InitVec, StartIdxSplat); 2567 Step = Builder.CreateVectorSplat(VLen, Step); 2568 assert(Step->getType() == Val->getType() && "Invalid step vec"); 2569 // FIXME: The newly created binary instructions should contain nsw/nuw flags, 2570 // which can be found from the original scalar operations. 2571 Step = Builder.CreateMul(InitVec, Step); 2572 return Builder.CreateAdd(Val, Step, "induction"); 2573 } 2574 2575 // Floating point induction. 2576 assert((BinOp == Instruction::FAdd || BinOp == Instruction::FSub) && 2577 "Binary Opcode should be specified for FP induction"); 2578 InitVec = Builder.CreateUIToFP(InitVec, ValVTy); 2579 InitVec = Builder.CreateFAdd(InitVec, StartIdxSplat); 2580 2581 Step = Builder.CreateVectorSplat(VLen, Step); 2582 Value *MulOp = Builder.CreateFMul(InitVec, Step); 2583 return Builder.CreateBinOp(BinOp, Val, MulOp, "induction"); 2584 } 2585 2586 void InnerLoopVectorizer::buildScalarSteps(Value *ScalarIV, Value *Step, 2587 Instruction *EntryVal, 2588 const InductionDescriptor &ID, 2589 VPValue *Def, VPValue *CastDef, 2590 VPTransformState &State) { 2591 // We shouldn't have to build scalar steps if we aren't vectorizing. 2592 assert(VF.isVector() && "VF should be greater than one"); 2593 // Get the value type and ensure it and the step have the same integer type. 2594 Type *ScalarIVTy = ScalarIV->getType()->getScalarType(); 2595 assert(ScalarIVTy == Step->getType() && 2596 "Val and Step should have the same type"); 2597 2598 // We build scalar steps for both integer and floating-point induction 2599 // variables. Here, we determine the kind of arithmetic we will perform. 2600 Instruction::BinaryOps AddOp; 2601 Instruction::BinaryOps MulOp; 2602 if (ScalarIVTy->isIntegerTy()) { 2603 AddOp = Instruction::Add; 2604 MulOp = Instruction::Mul; 2605 } else { 2606 AddOp = ID.getInductionOpcode(); 2607 MulOp = Instruction::FMul; 2608 } 2609 2610 // Determine the number of scalars we need to generate for each unroll 2611 // iteration. If EntryVal is uniform, we only need to generate the first 2612 // lane. Otherwise, we generate all VF values. 2613 bool IsUniform = 2614 Cost->isUniformAfterVectorization(cast<Instruction>(EntryVal), VF); 2615 unsigned Lanes = IsUniform ? 1 : VF.getKnownMinValue(); 2616 // Compute the scalar steps and save the results in State. 2617 Type *IntStepTy = IntegerType::get(ScalarIVTy->getContext(), 2618 ScalarIVTy->getScalarSizeInBits()); 2619 Type *VecIVTy = nullptr; 2620 Value *UnitStepVec = nullptr, *SplatStep = nullptr, *SplatIV = nullptr; 2621 if (!IsUniform && VF.isScalable()) { 2622 VecIVTy = VectorType::get(ScalarIVTy, VF); 2623 UnitStepVec = Builder.CreateStepVector(VectorType::get(IntStepTy, VF)); 2624 SplatStep = Builder.CreateVectorSplat(VF, Step); 2625 SplatIV = Builder.CreateVectorSplat(VF, ScalarIV); 2626 } 2627 2628 for (unsigned Part = 0; Part < UF; ++Part) { 2629 Value *StartIdx0 = createStepForVF(Builder, IntStepTy, VF, Part); 2630 2631 if (!IsUniform && VF.isScalable()) { 2632 auto *SplatStartIdx = Builder.CreateVectorSplat(VF, StartIdx0); 2633 auto *InitVec = Builder.CreateAdd(SplatStartIdx, UnitStepVec); 2634 if (ScalarIVTy->isFloatingPointTy()) 2635 InitVec = Builder.CreateSIToFP(InitVec, VecIVTy); 2636 auto *Mul = Builder.CreateBinOp(MulOp, InitVec, SplatStep); 2637 auto *Add = Builder.CreateBinOp(AddOp, SplatIV, Mul); 2638 State.set(Def, Add, Part); 2639 recordVectorLoopValueForInductionCast(ID, EntryVal, Add, CastDef, State, 2640 Part); 2641 // It's useful to record the lane values too for the known minimum number 2642 // of elements so we do those below. This improves the code quality when 2643 // trying to extract the first element, for example. 2644 } 2645 2646 if (ScalarIVTy->isFloatingPointTy()) 2647 StartIdx0 = Builder.CreateSIToFP(StartIdx0, ScalarIVTy); 2648 2649 for (unsigned Lane = 0; Lane < Lanes; ++Lane) { 2650 Value *StartIdx = Builder.CreateBinOp( 2651 AddOp, StartIdx0, getSignedIntOrFpConstant(ScalarIVTy, Lane)); 2652 // The step returned by `createStepForVF` is a runtime-evaluated value 2653 // when VF is scalable. Otherwise, it should be folded into a Constant. 2654 assert((VF.isScalable() || isa<Constant>(StartIdx)) && 2655 "Expected StartIdx to be folded to a constant when VF is not " 2656 "scalable"); 2657 auto *Mul = Builder.CreateBinOp(MulOp, StartIdx, Step); 2658 auto *Add = Builder.CreateBinOp(AddOp, ScalarIV, Mul); 2659 State.set(Def, Add, VPIteration(Part, Lane)); 2660 recordVectorLoopValueForInductionCast(ID, EntryVal, Add, CastDef, State, 2661 Part, Lane); 2662 } 2663 } 2664 } 2665 2666 void InnerLoopVectorizer::packScalarIntoVectorValue(VPValue *Def, 2667 const VPIteration &Instance, 2668 VPTransformState &State) { 2669 Value *ScalarInst = State.get(Def, Instance); 2670 Value *VectorValue = State.get(Def, Instance.Part); 2671 VectorValue = Builder.CreateInsertElement( 2672 VectorValue, ScalarInst, 2673 Instance.Lane.getAsRuntimeExpr(State.Builder, VF)); 2674 State.set(Def, VectorValue, Instance.Part); 2675 } 2676 2677 Value *InnerLoopVectorizer::reverseVector(Value *Vec) { 2678 assert(Vec->getType()->isVectorTy() && "Invalid type"); 2679 return Builder.CreateVectorReverse(Vec, "reverse"); 2680 } 2681 2682 // Return whether we allow using masked interleave-groups (for dealing with 2683 // strided loads/stores that reside in predicated blocks, or for dealing 2684 // with gaps). 2685 static bool useMaskedInterleavedAccesses(const TargetTransformInfo &TTI) { 2686 // If an override option has been passed in for interleaved accesses, use it. 2687 if (EnableMaskedInterleavedMemAccesses.getNumOccurrences() > 0) 2688 return EnableMaskedInterleavedMemAccesses; 2689 2690 return TTI.enableMaskedInterleavedAccessVectorization(); 2691 } 2692 2693 // Try to vectorize the interleave group that \p Instr belongs to. 2694 // 2695 // E.g. Translate following interleaved load group (factor = 3): 2696 // for (i = 0; i < N; i+=3) { 2697 // R = Pic[i]; // Member of index 0 2698 // G = Pic[i+1]; // Member of index 1 2699 // B = Pic[i+2]; // Member of index 2 2700 // ... // do something to R, G, B 2701 // } 2702 // To: 2703 // %wide.vec = load <12 x i32> ; Read 4 tuples of R,G,B 2704 // %R.vec = shuffle %wide.vec, poison, <0, 3, 6, 9> ; R elements 2705 // %G.vec = shuffle %wide.vec, poison, <1, 4, 7, 10> ; G elements 2706 // %B.vec = shuffle %wide.vec, poison, <2, 5, 8, 11> ; B elements 2707 // 2708 // Or translate following interleaved store group (factor = 3): 2709 // for (i = 0; i < N; i+=3) { 2710 // ... do something to R, G, B 2711 // Pic[i] = R; // Member of index 0 2712 // Pic[i+1] = G; // Member of index 1 2713 // Pic[i+2] = B; // Member of index 2 2714 // } 2715 // To: 2716 // %R_G.vec = shuffle %R.vec, %G.vec, <0, 1, 2, ..., 7> 2717 // %B_U.vec = shuffle %B.vec, poison, <0, 1, 2, 3, u, u, u, u> 2718 // %interleaved.vec = shuffle %R_G.vec, %B_U.vec, 2719 // <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11> ; Interleave R,G,B elements 2720 // store <12 x i32> %interleaved.vec ; Write 4 tuples of R,G,B 2721 void InnerLoopVectorizer::vectorizeInterleaveGroup( 2722 const InterleaveGroup<Instruction> *Group, ArrayRef<VPValue *> VPDefs, 2723 VPTransformState &State, VPValue *Addr, ArrayRef<VPValue *> StoredValues, 2724 VPValue *BlockInMask) { 2725 Instruction *Instr = Group->getInsertPos(); 2726 const DataLayout &DL = Instr->getModule()->getDataLayout(); 2727 2728 // Prepare for the vector type of the interleaved load/store. 2729 Type *ScalarTy = getLoadStoreType(Instr); 2730 unsigned InterleaveFactor = Group->getFactor(); 2731 assert(!VF.isScalable() && "scalable vectors not yet supported."); 2732 auto *VecTy = VectorType::get(ScalarTy, VF * InterleaveFactor); 2733 2734 // Prepare for the new pointers. 2735 SmallVector<Value *, 2> AddrParts; 2736 unsigned Index = Group->getIndex(Instr); 2737 2738 // TODO: extend the masked interleaved-group support to reversed access. 2739 assert((!BlockInMask || !Group->isReverse()) && 2740 "Reversed masked interleave-group not supported."); 2741 2742 // If the group is reverse, adjust the index to refer to the last vector lane 2743 // instead of the first. We adjust the index from the first vector lane, 2744 // rather than directly getting the pointer for lane VF - 1, because the 2745 // pointer operand of the interleaved access is supposed to be uniform. For 2746 // uniform instructions, we're only required to generate a value for the 2747 // first vector lane in each unroll iteration. 2748 if (Group->isReverse()) 2749 Index += (VF.getKnownMinValue() - 1) * Group->getFactor(); 2750 2751 for (unsigned Part = 0; Part < UF; Part++) { 2752 Value *AddrPart = State.get(Addr, VPIteration(Part, 0)); 2753 setDebugLocFromInst(AddrPart); 2754 2755 // Notice current instruction could be any index. Need to adjust the address 2756 // to the member of index 0. 2757 // 2758 // E.g. a = A[i+1]; // Member of index 1 (Current instruction) 2759 // b = A[i]; // Member of index 0 2760 // Current pointer is pointed to A[i+1], adjust it to A[i]. 2761 // 2762 // E.g. A[i+1] = a; // Member of index 1 2763 // A[i] = b; // Member of index 0 2764 // A[i+2] = c; // Member of index 2 (Current instruction) 2765 // Current pointer is pointed to A[i+2], adjust it to A[i]. 2766 2767 bool InBounds = false; 2768 if (auto *gep = dyn_cast<GetElementPtrInst>(AddrPart->stripPointerCasts())) 2769 InBounds = gep->isInBounds(); 2770 AddrPart = Builder.CreateGEP(ScalarTy, AddrPart, Builder.getInt32(-Index)); 2771 cast<GetElementPtrInst>(AddrPart)->setIsInBounds(InBounds); 2772 2773 // Cast to the vector pointer type. 2774 unsigned AddressSpace = AddrPart->getType()->getPointerAddressSpace(); 2775 Type *PtrTy = VecTy->getPointerTo(AddressSpace); 2776 AddrParts.push_back(Builder.CreateBitCast(AddrPart, PtrTy)); 2777 } 2778 2779 setDebugLocFromInst(Instr); 2780 Value *PoisonVec = PoisonValue::get(VecTy); 2781 2782 Value *MaskForGaps = nullptr; 2783 if (Group->requiresScalarEpilogue() && !Cost->isScalarEpilogueAllowed()) { 2784 MaskForGaps = createBitMaskForGaps(Builder, VF.getKnownMinValue(), *Group); 2785 assert(MaskForGaps && "Mask for Gaps is required but it is null"); 2786 } 2787 2788 // Vectorize the interleaved load group. 2789 if (isa<LoadInst>(Instr)) { 2790 // For each unroll part, create a wide load for the group. 2791 SmallVector<Value *, 2> NewLoads; 2792 for (unsigned Part = 0; Part < UF; Part++) { 2793 Instruction *NewLoad; 2794 if (BlockInMask || MaskForGaps) { 2795 assert(useMaskedInterleavedAccesses(*TTI) && 2796 "masked interleaved groups are not allowed."); 2797 Value *GroupMask = MaskForGaps; 2798 if (BlockInMask) { 2799 Value *BlockInMaskPart = State.get(BlockInMask, Part); 2800 Value *ShuffledMask = Builder.CreateShuffleVector( 2801 BlockInMaskPart, 2802 createReplicatedMask(InterleaveFactor, VF.getKnownMinValue()), 2803 "interleaved.mask"); 2804 GroupMask = MaskForGaps 2805 ? Builder.CreateBinOp(Instruction::And, ShuffledMask, 2806 MaskForGaps) 2807 : ShuffledMask; 2808 } 2809 NewLoad = 2810 Builder.CreateMaskedLoad(VecTy, AddrParts[Part], Group->getAlign(), 2811 GroupMask, PoisonVec, "wide.masked.vec"); 2812 } 2813 else 2814 NewLoad = Builder.CreateAlignedLoad(VecTy, AddrParts[Part], 2815 Group->getAlign(), "wide.vec"); 2816 Group->addMetadata(NewLoad); 2817 NewLoads.push_back(NewLoad); 2818 } 2819 2820 // For each member in the group, shuffle out the appropriate data from the 2821 // wide loads. 2822 unsigned J = 0; 2823 for (unsigned I = 0; I < InterleaveFactor; ++I) { 2824 Instruction *Member = Group->getMember(I); 2825 2826 // Skip the gaps in the group. 2827 if (!Member) 2828 continue; 2829 2830 auto StrideMask = 2831 createStrideMask(I, InterleaveFactor, VF.getKnownMinValue()); 2832 for (unsigned Part = 0; Part < UF; Part++) { 2833 Value *StridedVec = Builder.CreateShuffleVector( 2834 NewLoads[Part], StrideMask, "strided.vec"); 2835 2836 // If this member has different type, cast the result type. 2837 if (Member->getType() != ScalarTy) { 2838 assert(!VF.isScalable() && "VF is assumed to be non scalable."); 2839 VectorType *OtherVTy = VectorType::get(Member->getType(), VF); 2840 StridedVec = createBitOrPointerCast(StridedVec, OtherVTy, DL); 2841 } 2842 2843 if (Group->isReverse()) 2844 StridedVec = reverseVector(StridedVec); 2845 2846 State.set(VPDefs[J], StridedVec, Part); 2847 } 2848 ++J; 2849 } 2850 return; 2851 } 2852 2853 // The sub vector type for current instruction. 2854 auto *SubVT = VectorType::get(ScalarTy, VF); 2855 2856 // Vectorize the interleaved store group. 2857 MaskForGaps = createBitMaskForGaps(Builder, VF.getKnownMinValue(), *Group); 2858 assert((!MaskForGaps || useMaskedInterleavedAccesses(*TTI)) && 2859 "masked interleaved groups are not allowed."); 2860 assert((!MaskForGaps || !VF.isScalable()) && 2861 "masking gaps for scalable vectors is not yet supported."); 2862 for (unsigned Part = 0; Part < UF; Part++) { 2863 // Collect the stored vector from each member. 2864 SmallVector<Value *, 4> StoredVecs; 2865 for (unsigned i = 0; i < InterleaveFactor; i++) { 2866 assert((Group->getMember(i) || MaskForGaps) && 2867 "Fail to get a member from an interleaved store group"); 2868 Instruction *Member = Group->getMember(i); 2869 2870 // Skip the gaps in the group. 2871 if (!Member) { 2872 Value *Undef = PoisonValue::get(SubVT); 2873 StoredVecs.push_back(Undef); 2874 continue; 2875 } 2876 2877 Value *StoredVec = State.get(StoredValues[i], Part); 2878 2879 if (Group->isReverse()) 2880 StoredVec = reverseVector(StoredVec); 2881 2882 // If this member has different type, cast it to a unified type. 2883 2884 if (StoredVec->getType() != SubVT) 2885 StoredVec = createBitOrPointerCast(StoredVec, SubVT, DL); 2886 2887 StoredVecs.push_back(StoredVec); 2888 } 2889 2890 // Concatenate all vectors into a wide vector. 2891 Value *WideVec = concatenateVectors(Builder, StoredVecs); 2892 2893 // Interleave the elements in the wide vector. 2894 Value *IVec = Builder.CreateShuffleVector( 2895 WideVec, createInterleaveMask(VF.getKnownMinValue(), InterleaveFactor), 2896 "interleaved.vec"); 2897 2898 Instruction *NewStoreInstr; 2899 if (BlockInMask || MaskForGaps) { 2900 Value *GroupMask = MaskForGaps; 2901 if (BlockInMask) { 2902 Value *BlockInMaskPart = State.get(BlockInMask, Part); 2903 Value *ShuffledMask = Builder.CreateShuffleVector( 2904 BlockInMaskPart, 2905 createReplicatedMask(InterleaveFactor, VF.getKnownMinValue()), 2906 "interleaved.mask"); 2907 GroupMask = MaskForGaps ? Builder.CreateBinOp(Instruction::And, 2908 ShuffledMask, MaskForGaps) 2909 : ShuffledMask; 2910 } 2911 NewStoreInstr = Builder.CreateMaskedStore(IVec, AddrParts[Part], 2912 Group->getAlign(), GroupMask); 2913 } else 2914 NewStoreInstr = 2915 Builder.CreateAlignedStore(IVec, AddrParts[Part], Group->getAlign()); 2916 2917 Group->addMetadata(NewStoreInstr); 2918 } 2919 } 2920 2921 void InnerLoopVectorizer::vectorizeMemoryInstruction( 2922 Instruction *Instr, VPTransformState &State, VPValue *Def, VPValue *Addr, 2923 VPValue *StoredValue, VPValue *BlockInMask, bool ConsecutiveStride, 2924 bool Reverse) { 2925 // Attempt to issue a wide load. 2926 LoadInst *LI = dyn_cast<LoadInst>(Instr); 2927 StoreInst *SI = dyn_cast<StoreInst>(Instr); 2928 2929 assert((LI || SI) && "Invalid Load/Store instruction"); 2930 assert((!SI || StoredValue) && "No stored value provided for widened store"); 2931 assert((!LI || !StoredValue) && "Stored value provided for widened load"); 2932 2933 Type *ScalarDataTy = getLoadStoreType(Instr); 2934 2935 auto *DataTy = VectorType::get(ScalarDataTy, VF); 2936 const Align Alignment = getLoadStoreAlignment(Instr); 2937 bool CreateGatherScatter = !ConsecutiveStride; 2938 2939 VectorParts BlockInMaskParts(UF); 2940 bool isMaskRequired = BlockInMask; 2941 if (isMaskRequired) 2942 for (unsigned Part = 0; Part < UF; ++Part) 2943 BlockInMaskParts[Part] = State.get(BlockInMask, Part); 2944 2945 const auto CreateVecPtr = [&](unsigned Part, Value *Ptr) -> Value * { 2946 // Calculate the pointer for the specific unroll-part. 2947 GetElementPtrInst *PartPtr = nullptr; 2948 2949 bool InBounds = false; 2950 if (auto *gep = dyn_cast<GetElementPtrInst>(Ptr->stripPointerCasts())) 2951 InBounds = gep->isInBounds(); 2952 if (Reverse) { 2953 // If the address is consecutive but reversed, then the 2954 // wide store needs to start at the last vector element. 2955 // RunTimeVF = VScale * VF.getKnownMinValue() 2956 // For fixed-width VScale is 1, then RunTimeVF = VF.getKnownMinValue() 2957 Value *RunTimeVF = getRuntimeVF(Builder, Builder.getInt32Ty(), VF); 2958 // NumElt = -Part * RunTimeVF 2959 Value *NumElt = Builder.CreateMul(Builder.getInt32(-Part), RunTimeVF); 2960 // LastLane = 1 - RunTimeVF 2961 Value *LastLane = Builder.CreateSub(Builder.getInt32(1), RunTimeVF); 2962 PartPtr = 2963 cast<GetElementPtrInst>(Builder.CreateGEP(ScalarDataTy, Ptr, NumElt)); 2964 PartPtr->setIsInBounds(InBounds); 2965 PartPtr = cast<GetElementPtrInst>( 2966 Builder.CreateGEP(ScalarDataTy, PartPtr, LastLane)); 2967 PartPtr->setIsInBounds(InBounds); 2968 if (isMaskRequired) // Reverse of a null all-one mask is a null mask. 2969 BlockInMaskParts[Part] = reverseVector(BlockInMaskParts[Part]); 2970 } else { 2971 Value *Increment = 2972 createStepForVF(Builder, Builder.getInt32Ty(), VF, Part); 2973 PartPtr = cast<GetElementPtrInst>( 2974 Builder.CreateGEP(ScalarDataTy, Ptr, Increment)); 2975 PartPtr->setIsInBounds(InBounds); 2976 } 2977 2978 unsigned AddressSpace = Ptr->getType()->getPointerAddressSpace(); 2979 return Builder.CreateBitCast(PartPtr, DataTy->getPointerTo(AddressSpace)); 2980 }; 2981 2982 // Handle Stores: 2983 if (SI) { 2984 setDebugLocFromInst(SI); 2985 2986 for (unsigned Part = 0; Part < UF; ++Part) { 2987 Instruction *NewSI = nullptr; 2988 Value *StoredVal = State.get(StoredValue, Part); 2989 if (CreateGatherScatter) { 2990 Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr; 2991 Value *VectorGep = State.get(Addr, Part); 2992 NewSI = Builder.CreateMaskedScatter(StoredVal, VectorGep, Alignment, 2993 MaskPart); 2994 } else { 2995 if (Reverse) { 2996 // If we store to reverse consecutive memory locations, then we need 2997 // to reverse the order of elements in the stored value. 2998 StoredVal = reverseVector(StoredVal); 2999 // We don't want to update the value in the map as it might be used in 3000 // another expression. So don't call resetVectorValue(StoredVal). 3001 } 3002 auto *VecPtr = CreateVecPtr(Part, State.get(Addr, VPIteration(0, 0))); 3003 if (isMaskRequired) 3004 NewSI = Builder.CreateMaskedStore(StoredVal, VecPtr, Alignment, 3005 BlockInMaskParts[Part]); 3006 else 3007 NewSI = Builder.CreateAlignedStore(StoredVal, VecPtr, Alignment); 3008 } 3009 addMetadata(NewSI, SI); 3010 } 3011 return; 3012 } 3013 3014 // Handle loads. 3015 assert(LI && "Must have a load instruction"); 3016 setDebugLocFromInst(LI); 3017 for (unsigned Part = 0; Part < UF; ++Part) { 3018 Value *NewLI; 3019 if (CreateGatherScatter) { 3020 Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr; 3021 Value *VectorGep = State.get(Addr, Part); 3022 NewLI = Builder.CreateMaskedGather(DataTy, VectorGep, Alignment, MaskPart, 3023 nullptr, "wide.masked.gather"); 3024 addMetadata(NewLI, LI); 3025 } else { 3026 auto *VecPtr = CreateVecPtr(Part, State.get(Addr, VPIteration(0, 0))); 3027 if (isMaskRequired) 3028 NewLI = Builder.CreateMaskedLoad( 3029 DataTy, VecPtr, Alignment, BlockInMaskParts[Part], 3030 PoisonValue::get(DataTy), "wide.masked.load"); 3031 else 3032 NewLI = 3033 Builder.CreateAlignedLoad(DataTy, VecPtr, Alignment, "wide.load"); 3034 3035 // Add metadata to the load, but setVectorValue to the reverse shuffle. 3036 addMetadata(NewLI, LI); 3037 if (Reverse) 3038 NewLI = reverseVector(NewLI); 3039 } 3040 3041 State.set(Def, NewLI, Part); 3042 } 3043 } 3044 3045 void InnerLoopVectorizer::scalarizeInstruction(Instruction *Instr, VPValue *Def, 3046 VPUser &User, 3047 const VPIteration &Instance, 3048 bool IfPredicateInstr, 3049 VPTransformState &State) { 3050 assert(!Instr->getType()->isAggregateType() && "Can't handle vectors"); 3051 3052 // llvm.experimental.noalias.scope.decl intrinsics must only be duplicated for 3053 // the first lane and part. 3054 if (isa<NoAliasScopeDeclInst>(Instr)) 3055 if (!Instance.isFirstIteration()) 3056 return; 3057 3058 setDebugLocFromInst(Instr); 3059 3060 // Does this instruction return a value ? 3061 bool IsVoidRetTy = Instr->getType()->isVoidTy(); 3062 3063 Instruction *Cloned = Instr->clone(); 3064 if (!IsVoidRetTy) 3065 Cloned->setName(Instr->getName() + ".cloned"); 3066 3067 State.Builder.SetInsertPoint(Builder.GetInsertBlock(), 3068 Builder.GetInsertPoint()); 3069 // Replace the operands of the cloned instructions with their scalar 3070 // equivalents in the new loop. 3071 for (unsigned op = 0, e = User.getNumOperands(); op != e; ++op) { 3072 auto *Operand = dyn_cast<Instruction>(Instr->getOperand(op)); 3073 auto InputInstance = Instance; 3074 if (!Operand || !OrigLoop->contains(Operand) || 3075 (Cost->isUniformAfterVectorization(Operand, State.VF))) 3076 InputInstance.Lane = VPLane::getFirstLane(); 3077 auto *NewOp = State.get(User.getOperand(op), InputInstance); 3078 Cloned->setOperand(op, NewOp); 3079 } 3080 addNewMetadata(Cloned, Instr); 3081 3082 // Place the cloned scalar in the new loop. 3083 Builder.Insert(Cloned); 3084 3085 State.set(Def, Cloned, Instance); 3086 3087 // If we just cloned a new assumption, add it the assumption cache. 3088 if (auto *II = dyn_cast<AssumeInst>(Cloned)) 3089 AC->registerAssumption(II); 3090 3091 // End if-block. 3092 if (IfPredicateInstr) 3093 PredicatedInstructions.push_back(Cloned); 3094 } 3095 3096 PHINode *InnerLoopVectorizer::createInductionVariable(Loop *L, Value *Start, 3097 Value *End, Value *Step, 3098 Instruction *DL) { 3099 BasicBlock *Header = L->getHeader(); 3100 BasicBlock *Latch = L->getLoopLatch(); 3101 // As we're just creating this loop, it's possible no latch exists 3102 // yet. If so, use the header as this will be a single block loop. 3103 if (!Latch) 3104 Latch = Header; 3105 3106 IRBuilder<> B(&*Header->getFirstInsertionPt()); 3107 Instruction *OldInst = getDebugLocFromInstOrOperands(OldInduction); 3108 setDebugLocFromInst(OldInst, &B); 3109 auto *Induction = B.CreatePHI(Start->getType(), 2, "index"); 3110 3111 B.SetInsertPoint(Latch->getTerminator()); 3112 setDebugLocFromInst(OldInst, &B); 3113 3114 // Create i+1 and fill the PHINode. 3115 // 3116 // If the tail is not folded, we know that End - Start >= Step (either 3117 // statically or through the minimum iteration checks). We also know that both 3118 // Start % Step == 0 and End % Step == 0. We exit the vector loop if %IV + 3119 // %Step == %End. Hence we must exit the loop before %IV + %Step unsigned 3120 // overflows and we can mark the induction increment as NUW. 3121 Value *Next = B.CreateAdd(Induction, Step, "index.next", 3122 /*NUW=*/!Cost->foldTailByMasking(), /*NSW=*/false); 3123 Induction->addIncoming(Start, L->getLoopPreheader()); 3124 Induction->addIncoming(Next, Latch); 3125 // Create the compare. 3126 Value *ICmp = B.CreateICmpEQ(Next, End); 3127 B.CreateCondBr(ICmp, L->getUniqueExitBlock(), Header); 3128 3129 // Now we have two terminators. Remove the old one from the block. 3130 Latch->getTerminator()->eraseFromParent(); 3131 3132 return Induction; 3133 } 3134 3135 Value *InnerLoopVectorizer::getOrCreateTripCount(Loop *L) { 3136 if (TripCount) 3137 return TripCount; 3138 3139 assert(L && "Create Trip Count for null loop."); 3140 IRBuilder<> Builder(L->getLoopPreheader()->getTerminator()); 3141 // Find the loop boundaries. 3142 ScalarEvolution *SE = PSE.getSE(); 3143 const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount(); 3144 assert(!isa<SCEVCouldNotCompute>(BackedgeTakenCount) && 3145 "Invalid loop count"); 3146 3147 Type *IdxTy = Legal->getWidestInductionType(); 3148 assert(IdxTy && "No type for induction"); 3149 3150 // The exit count might have the type of i64 while the phi is i32. This can 3151 // happen if we have an induction variable that is sign extended before the 3152 // compare. The only way that we get a backedge taken count is that the 3153 // induction variable was signed and as such will not overflow. In such a case 3154 // truncation is legal. 3155 if (SE->getTypeSizeInBits(BackedgeTakenCount->getType()) > 3156 IdxTy->getPrimitiveSizeInBits()) 3157 BackedgeTakenCount = SE->getTruncateOrNoop(BackedgeTakenCount, IdxTy); 3158 BackedgeTakenCount = SE->getNoopOrZeroExtend(BackedgeTakenCount, IdxTy); 3159 3160 // Get the total trip count from the count by adding 1. 3161 const SCEV *ExitCount = SE->getAddExpr( 3162 BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType())); 3163 3164 const DataLayout &DL = L->getHeader()->getModule()->getDataLayout(); 3165 3166 // Expand the trip count and place the new instructions in the preheader. 3167 // Notice that the pre-header does not change, only the loop body. 3168 SCEVExpander Exp(*SE, DL, "induction"); 3169 3170 // Count holds the overall loop count (N). 3171 TripCount = Exp.expandCodeFor(ExitCount, ExitCount->getType(), 3172 L->getLoopPreheader()->getTerminator()); 3173 3174 if (TripCount->getType()->isPointerTy()) 3175 TripCount = 3176 CastInst::CreatePointerCast(TripCount, IdxTy, "exitcount.ptrcnt.to.int", 3177 L->getLoopPreheader()->getTerminator()); 3178 3179 return TripCount; 3180 } 3181 3182 Value *InnerLoopVectorizer::getOrCreateVectorTripCount(Loop *L) { 3183 if (VectorTripCount) 3184 return VectorTripCount; 3185 3186 Value *TC = getOrCreateTripCount(L); 3187 IRBuilder<> Builder(L->getLoopPreheader()->getTerminator()); 3188 3189 Type *Ty = TC->getType(); 3190 // This is where we can make the step a runtime constant. 3191 Value *Step = createStepForVF(Builder, Ty, VF, UF); 3192 3193 // If the tail is to be folded by masking, round the number of iterations N 3194 // up to a multiple of Step instead of rounding down. This is done by first 3195 // adding Step-1 and then rounding down. Note that it's ok if this addition 3196 // overflows: the vector induction variable will eventually wrap to zero given 3197 // that it starts at zero and its Step is a power of two; the loop will then 3198 // exit, with the last early-exit vector comparison also producing all-true. 3199 if (Cost->foldTailByMasking()) { 3200 assert(isPowerOf2_32(VF.getKnownMinValue() * UF) && 3201 "VF*UF must be a power of 2 when folding tail by masking"); 3202 assert(!VF.isScalable() && 3203 "Tail folding not yet supported for scalable vectors"); 3204 TC = Builder.CreateAdd( 3205 TC, ConstantInt::get(Ty, VF.getKnownMinValue() * UF - 1), "n.rnd.up"); 3206 } 3207 3208 // Now we need to generate the expression for the part of the loop that the 3209 // vectorized body will execute. This is equal to N - (N % Step) if scalar 3210 // iterations are not required for correctness, or N - Step, otherwise. Step 3211 // is equal to the vectorization factor (number of SIMD elements) times the 3212 // unroll factor (number of SIMD instructions). 3213 Value *R = Builder.CreateURem(TC, Step, "n.mod.vf"); 3214 3215 // There are cases where we *must* run at least one iteration in the remainder 3216 // loop. See the cost model for when this can happen. If the step evenly 3217 // divides the trip count, we set the remainder to be equal to the step. If 3218 // the step does not evenly divide the trip count, no adjustment is necessary 3219 // since there will already be scalar iterations. Note that the minimum 3220 // iterations check ensures that N >= Step. 3221 if (Cost->requiresScalarEpilogue(VF)) { 3222 auto *IsZero = Builder.CreateICmpEQ(R, ConstantInt::get(R->getType(), 0)); 3223 R = Builder.CreateSelect(IsZero, Step, R); 3224 } 3225 3226 VectorTripCount = Builder.CreateSub(TC, R, "n.vec"); 3227 3228 return VectorTripCount; 3229 } 3230 3231 Value *InnerLoopVectorizer::createBitOrPointerCast(Value *V, VectorType *DstVTy, 3232 const DataLayout &DL) { 3233 // Verify that V is a vector type with same number of elements as DstVTy. 3234 auto *DstFVTy = cast<FixedVectorType>(DstVTy); 3235 unsigned VF = DstFVTy->getNumElements(); 3236 auto *SrcVecTy = cast<FixedVectorType>(V->getType()); 3237 assert((VF == SrcVecTy->getNumElements()) && "Vector dimensions do not match"); 3238 Type *SrcElemTy = SrcVecTy->getElementType(); 3239 Type *DstElemTy = DstFVTy->getElementType(); 3240 assert((DL.getTypeSizeInBits(SrcElemTy) == DL.getTypeSizeInBits(DstElemTy)) && 3241 "Vector elements must have same size"); 3242 3243 // Do a direct cast if element types are castable. 3244 if (CastInst::isBitOrNoopPointerCastable(SrcElemTy, DstElemTy, DL)) { 3245 return Builder.CreateBitOrPointerCast(V, DstFVTy); 3246 } 3247 // V cannot be directly casted to desired vector type. 3248 // May happen when V is a floating point vector but DstVTy is a vector of 3249 // pointers or vice-versa. Handle this using a two-step bitcast using an 3250 // intermediate Integer type for the bitcast i.e. Ptr <-> Int <-> Float. 3251 assert((DstElemTy->isPointerTy() != SrcElemTy->isPointerTy()) && 3252 "Only one type should be a pointer type"); 3253 assert((DstElemTy->isFloatingPointTy() != SrcElemTy->isFloatingPointTy()) && 3254 "Only one type should be a floating point type"); 3255 Type *IntTy = 3256 IntegerType::getIntNTy(V->getContext(), DL.getTypeSizeInBits(SrcElemTy)); 3257 auto *VecIntTy = FixedVectorType::get(IntTy, VF); 3258 Value *CastVal = Builder.CreateBitOrPointerCast(V, VecIntTy); 3259 return Builder.CreateBitOrPointerCast(CastVal, DstFVTy); 3260 } 3261 3262 void InnerLoopVectorizer::emitMinimumIterationCountCheck(Loop *L, 3263 BasicBlock *Bypass) { 3264 Value *Count = getOrCreateTripCount(L); 3265 // Reuse existing vector loop preheader for TC checks. 3266 // Note that new preheader block is generated for vector loop. 3267 BasicBlock *const TCCheckBlock = LoopVectorPreHeader; 3268 IRBuilder<> Builder(TCCheckBlock->getTerminator()); 3269 3270 // Generate code to check if the loop's trip count is less than VF * UF, or 3271 // equal to it in case a scalar epilogue is required; this implies that the 3272 // vector trip count is zero. This check also covers the case where adding one 3273 // to the backedge-taken count overflowed leading to an incorrect trip count 3274 // of zero. In this case we will also jump to the scalar loop. 3275 auto P = Cost->requiresScalarEpilogue(VF) ? ICmpInst::ICMP_ULE 3276 : ICmpInst::ICMP_ULT; 3277 3278 // If tail is to be folded, vector loop takes care of all iterations. 3279 Value *CheckMinIters = Builder.getFalse(); 3280 if (!Cost->foldTailByMasking()) { 3281 Value *Step = createStepForVF(Builder, Count->getType(), VF, UF); 3282 CheckMinIters = Builder.CreateICmp(P, Count, Step, "min.iters.check"); 3283 } 3284 // Create new preheader for vector loop. 3285 LoopVectorPreHeader = 3286 SplitBlock(TCCheckBlock, TCCheckBlock->getTerminator(), DT, LI, nullptr, 3287 "vector.ph"); 3288 3289 assert(DT->properlyDominates(DT->getNode(TCCheckBlock), 3290 DT->getNode(Bypass)->getIDom()) && 3291 "TC check is expected to dominate Bypass"); 3292 3293 // Update dominator for Bypass & LoopExit (if needed). 3294 DT->changeImmediateDominator(Bypass, TCCheckBlock); 3295 if (!Cost->requiresScalarEpilogue(VF)) 3296 // If there is an epilogue which must run, there's no edge from the 3297 // middle block to exit blocks and thus no need to update the immediate 3298 // dominator of the exit blocks. 3299 DT->changeImmediateDominator(LoopExitBlock, TCCheckBlock); 3300 3301 ReplaceInstWithInst( 3302 TCCheckBlock->getTerminator(), 3303 BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters)); 3304 LoopBypassBlocks.push_back(TCCheckBlock); 3305 } 3306 3307 BasicBlock *InnerLoopVectorizer::emitSCEVChecks(Loop *L, BasicBlock *Bypass) { 3308 3309 BasicBlock *const SCEVCheckBlock = 3310 RTChecks.emitSCEVChecks(L, Bypass, LoopVectorPreHeader, LoopExitBlock); 3311 if (!SCEVCheckBlock) 3312 return nullptr; 3313 3314 assert(!(SCEVCheckBlock->getParent()->hasOptSize() || 3315 (OptForSizeBasedOnProfile && 3316 Cost->Hints->getForce() != LoopVectorizeHints::FK_Enabled)) && 3317 "Cannot SCEV check stride or overflow when optimizing for size"); 3318 3319 3320 // Update dominator only if this is first RT check. 3321 if (LoopBypassBlocks.empty()) { 3322 DT->changeImmediateDominator(Bypass, SCEVCheckBlock); 3323 if (!Cost->requiresScalarEpilogue(VF)) 3324 // If there is an epilogue which must run, there's no edge from the 3325 // middle block to exit blocks and thus no need to update the immediate 3326 // dominator of the exit blocks. 3327 DT->changeImmediateDominator(LoopExitBlock, SCEVCheckBlock); 3328 } 3329 3330 LoopBypassBlocks.push_back(SCEVCheckBlock); 3331 AddedSafetyChecks = true; 3332 return SCEVCheckBlock; 3333 } 3334 3335 BasicBlock *InnerLoopVectorizer::emitMemRuntimeChecks(Loop *L, 3336 BasicBlock *Bypass) { 3337 // VPlan-native path does not do any analysis for runtime checks currently. 3338 if (EnableVPlanNativePath) 3339 return nullptr; 3340 3341 BasicBlock *const MemCheckBlock = 3342 RTChecks.emitMemRuntimeChecks(L, Bypass, LoopVectorPreHeader); 3343 3344 // Check if we generated code that checks in runtime if arrays overlap. We put 3345 // the checks into a separate block to make the more common case of few 3346 // elements faster. 3347 if (!MemCheckBlock) 3348 return nullptr; 3349 3350 if (MemCheckBlock->getParent()->hasOptSize() || OptForSizeBasedOnProfile) { 3351 assert(Cost->Hints->getForce() == LoopVectorizeHints::FK_Enabled && 3352 "Cannot emit memory checks when optimizing for size, unless forced " 3353 "to vectorize."); 3354 ORE->emit([&]() { 3355 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationCodeSize", 3356 L->getStartLoc(), L->getHeader()) 3357 << "Code-size may be reduced by not forcing " 3358 "vectorization, or by source-code modifications " 3359 "eliminating the need for runtime checks " 3360 "(e.g., adding 'restrict')."; 3361 }); 3362 } 3363 3364 LoopBypassBlocks.push_back(MemCheckBlock); 3365 3366 AddedSafetyChecks = true; 3367 3368 // We currently don't use LoopVersioning for the actual loop cloning but we 3369 // still use it to add the noalias metadata. 3370 LVer = std::make_unique<LoopVersioning>( 3371 *Legal->getLAI(), 3372 Legal->getLAI()->getRuntimePointerChecking()->getChecks(), OrigLoop, LI, 3373 DT, PSE.getSE()); 3374 LVer->prepareNoAliasMetadata(); 3375 return MemCheckBlock; 3376 } 3377 3378 Value *InnerLoopVectorizer::emitTransformedIndex( 3379 IRBuilder<> &B, Value *Index, ScalarEvolution *SE, const DataLayout &DL, 3380 const InductionDescriptor &ID) const { 3381 3382 SCEVExpander Exp(*SE, DL, "induction"); 3383 auto Step = ID.getStep(); 3384 auto StartValue = ID.getStartValue(); 3385 assert(Index->getType()->getScalarType() == Step->getType() && 3386 "Index scalar type does not match StepValue type"); 3387 3388 // Note: the IR at this point is broken. We cannot use SE to create any new 3389 // SCEV and then expand it, hoping that SCEV's simplification will give us 3390 // a more optimal code. Unfortunately, attempt of doing so on invalid IR may 3391 // lead to various SCEV crashes. So all we can do is to use builder and rely 3392 // on InstCombine for future simplifications. Here we handle some trivial 3393 // cases only. 3394 auto CreateAdd = [&B](Value *X, Value *Y) { 3395 assert(X->getType() == Y->getType() && "Types don't match!"); 3396 if (auto *CX = dyn_cast<ConstantInt>(X)) 3397 if (CX->isZero()) 3398 return Y; 3399 if (auto *CY = dyn_cast<ConstantInt>(Y)) 3400 if (CY->isZero()) 3401 return X; 3402 return B.CreateAdd(X, Y); 3403 }; 3404 3405 // We allow X to be a vector type, in which case Y will potentially be 3406 // splatted into a vector with the same element count. 3407 auto CreateMul = [&B](Value *X, Value *Y) { 3408 assert(X->getType()->getScalarType() == Y->getType() && 3409 "Types don't match!"); 3410 if (auto *CX = dyn_cast<ConstantInt>(X)) 3411 if (CX->isOne()) 3412 return Y; 3413 if (auto *CY = dyn_cast<ConstantInt>(Y)) 3414 if (CY->isOne()) 3415 return X; 3416 VectorType *XVTy = dyn_cast<VectorType>(X->getType()); 3417 if (XVTy && !isa<VectorType>(Y->getType())) 3418 Y = B.CreateVectorSplat(XVTy->getElementCount(), Y); 3419 return B.CreateMul(X, Y); 3420 }; 3421 3422 // Get a suitable insert point for SCEV expansion. For blocks in the vector 3423 // loop, choose the end of the vector loop header (=LoopVectorBody), because 3424 // the DomTree is not kept up-to-date for additional blocks generated in the 3425 // vector loop. By using the header as insertion point, we guarantee that the 3426 // expanded instructions dominate all their uses. 3427 auto GetInsertPoint = [this, &B]() { 3428 BasicBlock *InsertBB = B.GetInsertPoint()->getParent(); 3429 if (InsertBB != LoopVectorBody && 3430 LI->getLoopFor(LoopVectorBody) == LI->getLoopFor(InsertBB)) 3431 return LoopVectorBody->getTerminator(); 3432 return &*B.GetInsertPoint(); 3433 }; 3434 3435 switch (ID.getKind()) { 3436 case InductionDescriptor::IK_IntInduction: { 3437 assert(!isa<VectorType>(Index->getType()) && 3438 "Vector indices not supported for integer inductions yet"); 3439 assert(Index->getType() == StartValue->getType() && 3440 "Index type does not match StartValue type"); 3441 if (ID.getConstIntStepValue() && ID.getConstIntStepValue()->isMinusOne()) 3442 return B.CreateSub(StartValue, Index); 3443 auto *Offset = CreateMul( 3444 Index, Exp.expandCodeFor(Step, Index->getType(), GetInsertPoint())); 3445 return CreateAdd(StartValue, Offset); 3446 } 3447 case InductionDescriptor::IK_PtrInduction: { 3448 assert(isa<SCEVConstant>(Step) && 3449 "Expected constant step for pointer induction"); 3450 return B.CreateGEP( 3451 ID.getElementType(), StartValue, 3452 CreateMul(Index, 3453 Exp.expandCodeFor(Step, Index->getType()->getScalarType(), 3454 GetInsertPoint()))); 3455 } 3456 case InductionDescriptor::IK_FpInduction: { 3457 assert(!isa<VectorType>(Index->getType()) && 3458 "Vector indices not supported for FP inductions yet"); 3459 assert(Step->getType()->isFloatingPointTy() && "Expected FP Step value"); 3460 auto InductionBinOp = ID.getInductionBinOp(); 3461 assert(InductionBinOp && 3462 (InductionBinOp->getOpcode() == Instruction::FAdd || 3463 InductionBinOp->getOpcode() == Instruction::FSub) && 3464 "Original bin op should be defined for FP induction"); 3465 3466 Value *StepValue = cast<SCEVUnknown>(Step)->getValue(); 3467 Value *MulExp = B.CreateFMul(StepValue, Index); 3468 return B.CreateBinOp(InductionBinOp->getOpcode(), StartValue, MulExp, 3469 "induction"); 3470 } 3471 case InductionDescriptor::IK_NoInduction: 3472 return nullptr; 3473 } 3474 llvm_unreachable("invalid enum"); 3475 } 3476 3477 Loop *InnerLoopVectorizer::createVectorLoopSkeleton(StringRef Prefix) { 3478 LoopScalarBody = OrigLoop->getHeader(); 3479 LoopVectorPreHeader = OrigLoop->getLoopPreheader(); 3480 assert(LoopVectorPreHeader && "Invalid loop structure"); 3481 LoopExitBlock = OrigLoop->getUniqueExitBlock(); // may be nullptr 3482 assert((LoopExitBlock || Cost->requiresScalarEpilogue(VF)) && 3483 "multiple exit loop without required epilogue?"); 3484 3485 LoopMiddleBlock = 3486 SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT, 3487 LI, nullptr, Twine(Prefix) + "middle.block"); 3488 LoopScalarPreHeader = 3489 SplitBlock(LoopMiddleBlock, LoopMiddleBlock->getTerminator(), DT, LI, 3490 nullptr, Twine(Prefix) + "scalar.ph"); 3491 3492 auto *ScalarLatchTerm = OrigLoop->getLoopLatch()->getTerminator(); 3493 3494 // Set up the middle block terminator. Two cases: 3495 // 1) If we know that we must execute the scalar epilogue, emit an 3496 // unconditional branch. 3497 // 2) Otherwise, we must have a single unique exit block (due to how we 3498 // implement the multiple exit case). In this case, set up a conditonal 3499 // branch from the middle block to the loop scalar preheader, and the 3500 // exit block. completeLoopSkeleton will update the condition to use an 3501 // iteration check, if required to decide whether to execute the remainder. 3502 BranchInst *BrInst = Cost->requiresScalarEpilogue(VF) ? 3503 BranchInst::Create(LoopScalarPreHeader) : 3504 BranchInst::Create(LoopExitBlock, LoopScalarPreHeader, 3505 Builder.getTrue()); 3506 BrInst->setDebugLoc(ScalarLatchTerm->getDebugLoc()); 3507 ReplaceInstWithInst(LoopMiddleBlock->getTerminator(), BrInst); 3508 3509 // We intentionally don't let SplitBlock to update LoopInfo since 3510 // LoopVectorBody should belong to another loop than LoopVectorPreHeader. 3511 // LoopVectorBody is explicitly added to the correct place few lines later. 3512 LoopVectorBody = 3513 SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT, 3514 nullptr, nullptr, Twine(Prefix) + "vector.body"); 3515 3516 // Update dominator for loop exit. 3517 if (!Cost->requiresScalarEpilogue(VF)) 3518 // If there is an epilogue which must run, there's no edge from the 3519 // middle block to exit blocks and thus no need to update the immediate 3520 // dominator of the exit blocks. 3521 DT->changeImmediateDominator(LoopExitBlock, LoopMiddleBlock); 3522 3523 // Create and register the new vector loop. 3524 Loop *Lp = LI->AllocateLoop(); 3525 Loop *ParentLoop = OrigLoop->getParentLoop(); 3526 3527 // Insert the new loop into the loop nest and register the new basic blocks 3528 // before calling any utilities such as SCEV that require valid LoopInfo. 3529 if (ParentLoop) { 3530 ParentLoop->addChildLoop(Lp); 3531 } else { 3532 LI->addTopLevelLoop(Lp); 3533 } 3534 Lp->addBasicBlockToLoop(LoopVectorBody, *LI); 3535 return Lp; 3536 } 3537 3538 void InnerLoopVectorizer::createInductionResumeValues( 3539 Loop *L, Value *VectorTripCount, 3540 std::pair<BasicBlock *, Value *> AdditionalBypass) { 3541 assert(VectorTripCount && L && "Expected valid arguments"); 3542 assert(((AdditionalBypass.first && AdditionalBypass.second) || 3543 (!AdditionalBypass.first && !AdditionalBypass.second)) && 3544 "Inconsistent information about additional bypass."); 3545 // We are going to resume the execution of the scalar loop. 3546 // Go over all of the induction variables that we found and fix the 3547 // PHIs that are left in the scalar version of the loop. 3548 // The starting values of PHI nodes depend on the counter of the last 3549 // iteration in the vectorized loop. 3550 // If we come from a bypass edge then we need to start from the original 3551 // start value. 3552 for (auto &InductionEntry : Legal->getInductionVars()) { 3553 PHINode *OrigPhi = InductionEntry.first; 3554 InductionDescriptor II = InductionEntry.second; 3555 3556 // Create phi nodes to merge from the backedge-taken check block. 3557 PHINode *BCResumeVal = 3558 PHINode::Create(OrigPhi->getType(), 3, "bc.resume.val", 3559 LoopScalarPreHeader->getTerminator()); 3560 // Copy original phi DL over to the new one. 3561 BCResumeVal->setDebugLoc(OrigPhi->getDebugLoc()); 3562 Value *&EndValue = IVEndValues[OrigPhi]; 3563 Value *EndValueFromAdditionalBypass = AdditionalBypass.second; 3564 if (OrigPhi == OldInduction) { 3565 // We know what the end value is. 3566 EndValue = VectorTripCount; 3567 } else { 3568 IRBuilder<> B(L->getLoopPreheader()->getTerminator()); 3569 3570 // Fast-math-flags propagate from the original induction instruction. 3571 if (II.getInductionBinOp() && isa<FPMathOperator>(II.getInductionBinOp())) 3572 B.setFastMathFlags(II.getInductionBinOp()->getFastMathFlags()); 3573 3574 Type *StepType = II.getStep()->getType(); 3575 Instruction::CastOps CastOp = 3576 CastInst::getCastOpcode(VectorTripCount, true, StepType, true); 3577 Value *CRD = B.CreateCast(CastOp, VectorTripCount, StepType, "cast.crd"); 3578 const DataLayout &DL = LoopScalarBody->getModule()->getDataLayout(); 3579 EndValue = emitTransformedIndex(B, CRD, PSE.getSE(), DL, II); 3580 EndValue->setName("ind.end"); 3581 3582 // Compute the end value for the additional bypass (if applicable). 3583 if (AdditionalBypass.first) { 3584 B.SetInsertPoint(&(*AdditionalBypass.first->getFirstInsertionPt())); 3585 CastOp = CastInst::getCastOpcode(AdditionalBypass.second, true, 3586 StepType, true); 3587 CRD = 3588 B.CreateCast(CastOp, AdditionalBypass.second, StepType, "cast.crd"); 3589 EndValueFromAdditionalBypass = 3590 emitTransformedIndex(B, CRD, PSE.getSE(), DL, II); 3591 EndValueFromAdditionalBypass->setName("ind.end"); 3592 } 3593 } 3594 // The new PHI merges the original incoming value, in case of a bypass, 3595 // or the value at the end of the vectorized loop. 3596 BCResumeVal->addIncoming(EndValue, LoopMiddleBlock); 3597 3598 // Fix the scalar body counter (PHI node). 3599 // The old induction's phi node in the scalar body needs the truncated 3600 // value. 3601 for (BasicBlock *BB : LoopBypassBlocks) 3602 BCResumeVal->addIncoming(II.getStartValue(), BB); 3603 3604 if (AdditionalBypass.first) 3605 BCResumeVal->setIncomingValueForBlock(AdditionalBypass.first, 3606 EndValueFromAdditionalBypass); 3607 3608 OrigPhi->setIncomingValueForBlock(LoopScalarPreHeader, BCResumeVal); 3609 } 3610 } 3611 3612 BasicBlock *InnerLoopVectorizer::completeLoopSkeleton(Loop *L, 3613 MDNode *OrigLoopID) { 3614 assert(L && "Expected valid loop."); 3615 3616 // The trip counts should be cached by now. 3617 Value *Count = getOrCreateTripCount(L); 3618 Value *VectorTripCount = getOrCreateVectorTripCount(L); 3619 3620 auto *ScalarLatchTerm = OrigLoop->getLoopLatch()->getTerminator(); 3621 3622 // Add a check in the middle block to see if we have completed 3623 // all of the iterations in the first vector loop. Three cases: 3624 // 1) If we require a scalar epilogue, there is no conditional branch as 3625 // we unconditionally branch to the scalar preheader. Do nothing. 3626 // 2) If (N - N%VF) == N, then we *don't* need to run the remainder. 3627 // Thus if tail is to be folded, we know we don't need to run the 3628 // remainder and we can use the previous value for the condition (true). 3629 // 3) Otherwise, construct a runtime check. 3630 if (!Cost->requiresScalarEpilogue(VF) && !Cost->foldTailByMasking()) { 3631 Instruction *CmpN = CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_EQ, 3632 Count, VectorTripCount, "cmp.n", 3633 LoopMiddleBlock->getTerminator()); 3634 3635 // Here we use the same DebugLoc as the scalar loop latch terminator instead 3636 // of the corresponding compare because they may have ended up with 3637 // different line numbers and we want to avoid awkward line stepping while 3638 // debugging. Eg. if the compare has got a line number inside the loop. 3639 CmpN->setDebugLoc(ScalarLatchTerm->getDebugLoc()); 3640 cast<BranchInst>(LoopMiddleBlock->getTerminator())->setCondition(CmpN); 3641 } 3642 3643 // Get ready to start creating new instructions into the vectorized body. 3644 assert(LoopVectorPreHeader == L->getLoopPreheader() && 3645 "Inconsistent vector loop preheader"); 3646 Builder.SetInsertPoint(&*LoopVectorBody->getFirstInsertionPt()); 3647 3648 Optional<MDNode *> VectorizedLoopID = 3649 makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll, 3650 LLVMLoopVectorizeFollowupVectorized}); 3651 if (VectorizedLoopID.hasValue()) { 3652 L->setLoopID(VectorizedLoopID.getValue()); 3653 3654 // Do not setAlreadyVectorized if loop attributes have been defined 3655 // explicitly. 3656 return LoopVectorPreHeader; 3657 } 3658 3659 // Keep all loop hints from the original loop on the vector loop (we'll 3660 // replace the vectorizer-specific hints below). 3661 if (MDNode *LID = OrigLoop->getLoopID()) 3662 L->setLoopID(LID); 3663 3664 LoopVectorizeHints Hints(L, true, *ORE); 3665 Hints.setAlreadyVectorized(); 3666 3667 #ifdef EXPENSIVE_CHECKS 3668 assert(DT->verify(DominatorTree::VerificationLevel::Fast)); 3669 LI->verify(*DT); 3670 #endif 3671 3672 return LoopVectorPreHeader; 3673 } 3674 3675 BasicBlock *InnerLoopVectorizer::createVectorizedLoopSkeleton() { 3676 /* 3677 In this function we generate a new loop. The new loop will contain 3678 the vectorized instructions while the old loop will continue to run the 3679 scalar remainder. 3680 3681 [ ] <-- loop iteration number check. 3682 / | 3683 / v 3684 | [ ] <-- vector loop bypass (may consist of multiple blocks). 3685 | / | 3686 | / v 3687 || [ ] <-- vector pre header. 3688 |/ | 3689 | v 3690 | [ ] \ 3691 | [ ]_| <-- vector loop. 3692 | | 3693 | v 3694 \ -[ ] <--- middle-block. 3695 \/ | 3696 /\ v 3697 | ->[ ] <--- new preheader. 3698 | | 3699 (opt) v <-- edge from middle to exit iff epilogue is not required. 3700 | [ ] \ 3701 | [ ]_| <-- old scalar loop to handle remainder (scalar epilogue). 3702 \ | 3703 \ v 3704 >[ ] <-- exit block(s). 3705 ... 3706 */ 3707 3708 // Get the metadata of the original loop before it gets modified. 3709 MDNode *OrigLoopID = OrigLoop->getLoopID(); 3710 3711 // Workaround! Compute the trip count of the original loop and cache it 3712 // before we start modifying the CFG. This code has a systemic problem 3713 // wherein it tries to run analysis over partially constructed IR; this is 3714 // wrong, and not simply for SCEV. The trip count of the original loop 3715 // simply happens to be prone to hitting this in practice. In theory, we 3716 // can hit the same issue for any SCEV, or ValueTracking query done during 3717 // mutation. See PR49900. 3718 getOrCreateTripCount(OrigLoop); 3719 3720 // Create an empty vector loop, and prepare basic blocks for the runtime 3721 // checks. 3722 Loop *Lp = createVectorLoopSkeleton(""); 3723 3724 // Now, compare the new count to zero. If it is zero skip the vector loop and 3725 // jump to the scalar loop. This check also covers the case where the 3726 // backedge-taken count is uint##_max: adding one to it will overflow leading 3727 // to an incorrect trip count of zero. In this (rare) case we will also jump 3728 // to the scalar loop. 3729 emitMinimumIterationCountCheck(Lp, LoopScalarPreHeader); 3730 3731 // Generate the code to check any assumptions that we've made for SCEV 3732 // expressions. 3733 emitSCEVChecks(Lp, LoopScalarPreHeader); 3734 3735 // Generate the code that checks in runtime if arrays overlap. We put the 3736 // checks into a separate block to make the more common case of few elements 3737 // faster. 3738 emitMemRuntimeChecks(Lp, LoopScalarPreHeader); 3739 3740 // Some loops have a single integer induction variable, while other loops 3741 // don't. One example is c++ iterators that often have multiple pointer 3742 // induction variables. In the code below we also support a case where we 3743 // don't have a single induction variable. 3744 // 3745 // We try to obtain an induction variable from the original loop as hard 3746 // as possible. However if we don't find one that: 3747 // - is an integer 3748 // - counts from zero, stepping by one 3749 // - is the size of the widest induction variable type 3750 // then we create a new one. 3751 OldInduction = Legal->getPrimaryInduction(); 3752 Type *IdxTy = Legal->getWidestInductionType(); 3753 Value *StartIdx = ConstantInt::get(IdxTy, 0); 3754 // The loop step is equal to the vectorization factor (num of SIMD elements) 3755 // times the unroll factor (num of SIMD instructions). 3756 Builder.SetInsertPoint(&*Lp->getHeader()->getFirstInsertionPt()); 3757 Value *Step = createStepForVF(Builder, IdxTy, VF, UF); 3758 Value *CountRoundDown = getOrCreateVectorTripCount(Lp); 3759 Induction = 3760 createInductionVariable(Lp, StartIdx, CountRoundDown, Step, 3761 getDebugLocFromInstOrOperands(OldInduction)); 3762 3763 // Emit phis for the new starting index of the scalar loop. 3764 createInductionResumeValues(Lp, CountRoundDown); 3765 3766 return completeLoopSkeleton(Lp, OrigLoopID); 3767 } 3768 3769 // Fix up external users of the induction variable. At this point, we are 3770 // in LCSSA form, with all external PHIs that use the IV having one input value, 3771 // coming from the remainder loop. We need those PHIs to also have a correct 3772 // value for the IV when arriving directly from the middle block. 3773 void InnerLoopVectorizer::fixupIVUsers(PHINode *OrigPhi, 3774 const InductionDescriptor &II, 3775 Value *CountRoundDown, Value *EndValue, 3776 BasicBlock *MiddleBlock) { 3777 // There are two kinds of external IV usages - those that use the value 3778 // computed in the last iteration (the PHI) and those that use the penultimate 3779 // value (the value that feeds into the phi from the loop latch). 3780 // We allow both, but they, obviously, have different values. 3781 3782 assert(OrigLoop->getUniqueExitBlock() && "Expected a single exit block"); 3783 3784 DenseMap<Value *, Value *> MissingVals; 3785 3786 // An external user of the last iteration's value should see the value that 3787 // the remainder loop uses to initialize its own IV. 3788 Value *PostInc = OrigPhi->getIncomingValueForBlock(OrigLoop->getLoopLatch()); 3789 for (User *U : PostInc->users()) { 3790 Instruction *UI = cast<Instruction>(U); 3791 if (!OrigLoop->contains(UI)) { 3792 assert(isa<PHINode>(UI) && "Expected LCSSA form"); 3793 MissingVals[UI] = EndValue; 3794 } 3795 } 3796 3797 // An external user of the penultimate value need to see EndValue - Step. 3798 // The simplest way to get this is to recompute it from the constituent SCEVs, 3799 // that is Start + (Step * (CRD - 1)). 3800 for (User *U : OrigPhi->users()) { 3801 auto *UI = cast<Instruction>(U); 3802 if (!OrigLoop->contains(UI)) { 3803 const DataLayout &DL = 3804 OrigLoop->getHeader()->getModule()->getDataLayout(); 3805 assert(isa<PHINode>(UI) && "Expected LCSSA form"); 3806 3807 IRBuilder<> B(MiddleBlock->getTerminator()); 3808 3809 // Fast-math-flags propagate from the original induction instruction. 3810 if (II.getInductionBinOp() && isa<FPMathOperator>(II.getInductionBinOp())) 3811 B.setFastMathFlags(II.getInductionBinOp()->getFastMathFlags()); 3812 3813 Value *CountMinusOne = B.CreateSub( 3814 CountRoundDown, ConstantInt::get(CountRoundDown->getType(), 1)); 3815 Value *CMO = 3816 !II.getStep()->getType()->isIntegerTy() 3817 ? B.CreateCast(Instruction::SIToFP, CountMinusOne, 3818 II.getStep()->getType()) 3819 : B.CreateSExtOrTrunc(CountMinusOne, II.getStep()->getType()); 3820 CMO->setName("cast.cmo"); 3821 Value *Escape = emitTransformedIndex(B, CMO, PSE.getSE(), DL, II); 3822 Escape->setName("ind.escape"); 3823 MissingVals[UI] = Escape; 3824 } 3825 } 3826 3827 for (auto &I : MissingVals) { 3828 PHINode *PHI = cast<PHINode>(I.first); 3829 // One corner case we have to handle is two IVs "chasing" each-other, 3830 // that is %IV2 = phi [...], [ %IV1, %latch ] 3831 // In this case, if IV1 has an external use, we need to avoid adding both 3832 // "last value of IV1" and "penultimate value of IV2". So, verify that we 3833 // don't already have an incoming value for the middle block. 3834 if (PHI->getBasicBlockIndex(MiddleBlock) == -1) 3835 PHI->addIncoming(I.second, MiddleBlock); 3836 } 3837 } 3838 3839 namespace { 3840 3841 struct CSEDenseMapInfo { 3842 static bool canHandle(const Instruction *I) { 3843 return isa<InsertElementInst>(I) || isa<ExtractElementInst>(I) || 3844 isa<ShuffleVectorInst>(I) || isa<GetElementPtrInst>(I); 3845 } 3846 3847 static inline Instruction *getEmptyKey() { 3848 return DenseMapInfo<Instruction *>::getEmptyKey(); 3849 } 3850 3851 static inline Instruction *getTombstoneKey() { 3852 return DenseMapInfo<Instruction *>::getTombstoneKey(); 3853 } 3854 3855 static unsigned getHashValue(const Instruction *I) { 3856 assert(canHandle(I) && "Unknown instruction!"); 3857 return hash_combine(I->getOpcode(), hash_combine_range(I->value_op_begin(), 3858 I->value_op_end())); 3859 } 3860 3861 static bool isEqual(const Instruction *LHS, const Instruction *RHS) { 3862 if (LHS == getEmptyKey() || RHS == getEmptyKey() || 3863 LHS == getTombstoneKey() || RHS == getTombstoneKey()) 3864 return LHS == RHS; 3865 return LHS->isIdenticalTo(RHS); 3866 } 3867 }; 3868 3869 } // end anonymous namespace 3870 3871 ///Perform cse of induction variable instructions. 3872 static void cse(BasicBlock *BB) { 3873 // Perform simple cse. 3874 SmallDenseMap<Instruction *, Instruction *, 4, CSEDenseMapInfo> CSEMap; 3875 for (Instruction &In : llvm::make_early_inc_range(*BB)) { 3876 if (!CSEDenseMapInfo::canHandle(&In)) 3877 continue; 3878 3879 // Check if we can replace this instruction with any of the 3880 // visited instructions. 3881 if (Instruction *V = CSEMap.lookup(&In)) { 3882 In.replaceAllUsesWith(V); 3883 In.eraseFromParent(); 3884 continue; 3885 } 3886 3887 CSEMap[&In] = &In; 3888 } 3889 } 3890 3891 InstructionCost 3892 LoopVectorizationCostModel::getVectorCallCost(CallInst *CI, ElementCount VF, 3893 bool &NeedToScalarize) const { 3894 Function *F = CI->getCalledFunction(); 3895 Type *ScalarRetTy = CI->getType(); 3896 SmallVector<Type *, 4> Tys, ScalarTys; 3897 for (auto &ArgOp : CI->args()) 3898 ScalarTys.push_back(ArgOp->getType()); 3899 3900 // Estimate cost of scalarized vector call. The source operands are assumed 3901 // to be vectors, so we need to extract individual elements from there, 3902 // execute VF scalar calls, and then gather the result into the vector return 3903 // value. 3904 InstructionCost ScalarCallCost = 3905 TTI.getCallInstrCost(F, ScalarRetTy, ScalarTys, TTI::TCK_RecipThroughput); 3906 if (VF.isScalar()) 3907 return ScalarCallCost; 3908 3909 // Compute corresponding vector type for return value and arguments. 3910 Type *RetTy = ToVectorTy(ScalarRetTy, VF); 3911 for (Type *ScalarTy : ScalarTys) 3912 Tys.push_back(ToVectorTy(ScalarTy, VF)); 3913 3914 // Compute costs of unpacking argument values for the scalar calls and 3915 // packing the return values to a vector. 3916 InstructionCost ScalarizationCost = getScalarizationOverhead(CI, VF); 3917 3918 InstructionCost Cost = 3919 ScalarCallCost * VF.getKnownMinValue() + ScalarizationCost; 3920 3921 // If we can't emit a vector call for this function, then the currently found 3922 // cost is the cost we need to return. 3923 NeedToScalarize = true; 3924 VFShape Shape = VFShape::get(*CI, VF, false /*HasGlobalPred*/); 3925 Function *VecFunc = VFDatabase(*CI).getVectorizedFunction(Shape); 3926 3927 if (!TLI || CI->isNoBuiltin() || !VecFunc) 3928 return Cost; 3929 3930 // If the corresponding vector cost is cheaper, return its cost. 3931 InstructionCost VectorCallCost = 3932 TTI.getCallInstrCost(nullptr, RetTy, Tys, TTI::TCK_RecipThroughput); 3933 if (VectorCallCost < Cost) { 3934 NeedToScalarize = false; 3935 Cost = VectorCallCost; 3936 } 3937 return Cost; 3938 } 3939 3940 static Type *MaybeVectorizeType(Type *Elt, ElementCount VF) { 3941 if (VF.isScalar() || (!Elt->isIntOrPtrTy() && !Elt->isFloatingPointTy())) 3942 return Elt; 3943 return VectorType::get(Elt, VF); 3944 } 3945 3946 InstructionCost 3947 LoopVectorizationCostModel::getVectorIntrinsicCost(CallInst *CI, 3948 ElementCount VF) const { 3949 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 3950 assert(ID && "Expected intrinsic call!"); 3951 Type *RetTy = MaybeVectorizeType(CI->getType(), VF); 3952 FastMathFlags FMF; 3953 if (auto *FPMO = dyn_cast<FPMathOperator>(CI)) 3954 FMF = FPMO->getFastMathFlags(); 3955 3956 SmallVector<const Value *> Arguments(CI->args()); 3957 FunctionType *FTy = CI->getCalledFunction()->getFunctionType(); 3958 SmallVector<Type *> ParamTys; 3959 std::transform(FTy->param_begin(), FTy->param_end(), 3960 std::back_inserter(ParamTys), 3961 [&](Type *Ty) { return MaybeVectorizeType(Ty, VF); }); 3962 3963 IntrinsicCostAttributes CostAttrs(ID, RetTy, Arguments, ParamTys, FMF, 3964 dyn_cast<IntrinsicInst>(CI)); 3965 return TTI.getIntrinsicInstrCost(CostAttrs, 3966 TargetTransformInfo::TCK_RecipThroughput); 3967 } 3968 3969 static Type *smallestIntegerVectorType(Type *T1, Type *T2) { 3970 auto *I1 = cast<IntegerType>(cast<VectorType>(T1)->getElementType()); 3971 auto *I2 = cast<IntegerType>(cast<VectorType>(T2)->getElementType()); 3972 return I1->getBitWidth() < I2->getBitWidth() ? T1 : T2; 3973 } 3974 3975 static Type *largestIntegerVectorType(Type *T1, Type *T2) { 3976 auto *I1 = cast<IntegerType>(cast<VectorType>(T1)->getElementType()); 3977 auto *I2 = cast<IntegerType>(cast<VectorType>(T2)->getElementType()); 3978 return I1->getBitWidth() > I2->getBitWidth() ? T1 : T2; 3979 } 3980 3981 void InnerLoopVectorizer::truncateToMinimalBitwidths(VPTransformState &State) { 3982 // For every instruction `I` in MinBWs, truncate the operands, create a 3983 // truncated version of `I` and reextend its result. InstCombine runs 3984 // later and will remove any ext/trunc pairs. 3985 SmallPtrSet<Value *, 4> Erased; 3986 for (const auto &KV : Cost->getMinimalBitwidths()) { 3987 // If the value wasn't vectorized, we must maintain the original scalar 3988 // type. The absence of the value from State indicates that it 3989 // wasn't vectorized. 3990 // FIXME: Should not rely on getVPValue at this point. 3991 VPValue *Def = State.Plan->getVPValue(KV.first, true); 3992 if (!State.hasAnyVectorValue(Def)) 3993 continue; 3994 for (unsigned Part = 0; Part < UF; ++Part) { 3995 Value *I = State.get(Def, Part); 3996 if (Erased.count(I) || I->use_empty() || !isa<Instruction>(I)) 3997 continue; 3998 Type *OriginalTy = I->getType(); 3999 Type *ScalarTruncatedTy = 4000 IntegerType::get(OriginalTy->getContext(), KV.second); 4001 auto *TruncatedTy = VectorType::get( 4002 ScalarTruncatedTy, cast<VectorType>(OriginalTy)->getElementCount()); 4003 if (TruncatedTy == OriginalTy) 4004 continue; 4005 4006 IRBuilder<> B(cast<Instruction>(I)); 4007 auto ShrinkOperand = [&](Value *V) -> Value * { 4008 if (auto *ZI = dyn_cast<ZExtInst>(V)) 4009 if (ZI->getSrcTy() == TruncatedTy) 4010 return ZI->getOperand(0); 4011 return B.CreateZExtOrTrunc(V, TruncatedTy); 4012 }; 4013 4014 // The actual instruction modification depends on the instruction type, 4015 // unfortunately. 4016 Value *NewI = nullptr; 4017 if (auto *BO = dyn_cast<BinaryOperator>(I)) { 4018 NewI = B.CreateBinOp(BO->getOpcode(), ShrinkOperand(BO->getOperand(0)), 4019 ShrinkOperand(BO->getOperand(1))); 4020 4021 // Any wrapping introduced by shrinking this operation shouldn't be 4022 // considered undefined behavior. So, we can't unconditionally copy 4023 // arithmetic wrapping flags to NewI. 4024 cast<BinaryOperator>(NewI)->copyIRFlags(I, /*IncludeWrapFlags=*/false); 4025 } else if (auto *CI = dyn_cast<ICmpInst>(I)) { 4026 NewI = 4027 B.CreateICmp(CI->getPredicate(), ShrinkOperand(CI->getOperand(0)), 4028 ShrinkOperand(CI->getOperand(1))); 4029 } else if (auto *SI = dyn_cast<SelectInst>(I)) { 4030 NewI = B.CreateSelect(SI->getCondition(), 4031 ShrinkOperand(SI->getTrueValue()), 4032 ShrinkOperand(SI->getFalseValue())); 4033 } else if (auto *CI = dyn_cast<CastInst>(I)) { 4034 switch (CI->getOpcode()) { 4035 default: 4036 llvm_unreachable("Unhandled cast!"); 4037 case Instruction::Trunc: 4038 NewI = ShrinkOperand(CI->getOperand(0)); 4039 break; 4040 case Instruction::SExt: 4041 NewI = B.CreateSExtOrTrunc( 4042 CI->getOperand(0), 4043 smallestIntegerVectorType(OriginalTy, TruncatedTy)); 4044 break; 4045 case Instruction::ZExt: 4046 NewI = B.CreateZExtOrTrunc( 4047 CI->getOperand(0), 4048 smallestIntegerVectorType(OriginalTy, TruncatedTy)); 4049 break; 4050 } 4051 } else if (auto *SI = dyn_cast<ShuffleVectorInst>(I)) { 4052 auto Elements0 = 4053 cast<VectorType>(SI->getOperand(0)->getType())->getElementCount(); 4054 auto *O0 = B.CreateZExtOrTrunc( 4055 SI->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements0)); 4056 auto Elements1 = 4057 cast<VectorType>(SI->getOperand(1)->getType())->getElementCount(); 4058 auto *O1 = B.CreateZExtOrTrunc( 4059 SI->getOperand(1), VectorType::get(ScalarTruncatedTy, Elements1)); 4060 4061 NewI = B.CreateShuffleVector(O0, O1, SI->getShuffleMask()); 4062 } else if (isa<LoadInst>(I) || isa<PHINode>(I)) { 4063 // Don't do anything with the operands, just extend the result. 4064 continue; 4065 } else if (auto *IE = dyn_cast<InsertElementInst>(I)) { 4066 auto Elements = 4067 cast<VectorType>(IE->getOperand(0)->getType())->getElementCount(); 4068 auto *O0 = B.CreateZExtOrTrunc( 4069 IE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements)); 4070 auto *O1 = B.CreateZExtOrTrunc(IE->getOperand(1), ScalarTruncatedTy); 4071 NewI = B.CreateInsertElement(O0, O1, IE->getOperand(2)); 4072 } else if (auto *EE = dyn_cast<ExtractElementInst>(I)) { 4073 auto Elements = 4074 cast<VectorType>(EE->getOperand(0)->getType())->getElementCount(); 4075 auto *O0 = B.CreateZExtOrTrunc( 4076 EE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements)); 4077 NewI = B.CreateExtractElement(O0, EE->getOperand(2)); 4078 } else { 4079 // If we don't know what to do, be conservative and don't do anything. 4080 continue; 4081 } 4082 4083 // Lastly, extend the result. 4084 NewI->takeName(cast<Instruction>(I)); 4085 Value *Res = B.CreateZExtOrTrunc(NewI, OriginalTy); 4086 I->replaceAllUsesWith(Res); 4087 cast<Instruction>(I)->eraseFromParent(); 4088 Erased.insert(I); 4089 State.reset(Def, Res, Part); 4090 } 4091 } 4092 4093 // We'll have created a bunch of ZExts that are now parentless. Clean up. 4094 for (const auto &KV : Cost->getMinimalBitwidths()) { 4095 // If the value wasn't vectorized, we must maintain the original scalar 4096 // type. The absence of the value from State indicates that it 4097 // wasn't vectorized. 4098 // FIXME: Should not rely on getVPValue at this point. 4099 VPValue *Def = State.Plan->getVPValue(KV.first, true); 4100 if (!State.hasAnyVectorValue(Def)) 4101 continue; 4102 for (unsigned Part = 0; Part < UF; ++Part) { 4103 Value *I = State.get(Def, Part); 4104 ZExtInst *Inst = dyn_cast<ZExtInst>(I); 4105 if (Inst && Inst->use_empty()) { 4106 Value *NewI = Inst->getOperand(0); 4107 Inst->eraseFromParent(); 4108 State.reset(Def, NewI, Part); 4109 } 4110 } 4111 } 4112 } 4113 4114 void InnerLoopVectorizer::fixVectorizedLoop(VPTransformState &State) { 4115 // Insert truncates and extends for any truncated instructions as hints to 4116 // InstCombine. 4117 if (VF.isVector()) 4118 truncateToMinimalBitwidths(State); 4119 4120 // Fix widened non-induction PHIs by setting up the PHI operands. 4121 if (OrigPHIsToFix.size()) { 4122 assert(EnableVPlanNativePath && 4123 "Unexpected non-induction PHIs for fixup in non VPlan-native path"); 4124 fixNonInductionPHIs(State); 4125 } 4126 4127 // At this point every instruction in the original loop is widened to a 4128 // vector form. Now we need to fix the recurrences in the loop. These PHI 4129 // nodes are currently empty because we did not want to introduce cycles. 4130 // This is the second stage of vectorizing recurrences. 4131 fixCrossIterationPHIs(State); 4132 4133 // Forget the original basic block. 4134 PSE.getSE()->forgetLoop(OrigLoop); 4135 4136 // If we inserted an edge from the middle block to the unique exit block, 4137 // update uses outside the loop (phis) to account for the newly inserted 4138 // edge. 4139 if (!Cost->requiresScalarEpilogue(VF)) { 4140 // Fix-up external users of the induction variables. 4141 for (auto &Entry : Legal->getInductionVars()) 4142 fixupIVUsers(Entry.first, Entry.second, 4143 getOrCreateVectorTripCount(LI->getLoopFor(LoopVectorBody)), 4144 IVEndValues[Entry.first], LoopMiddleBlock); 4145 4146 fixLCSSAPHIs(State); 4147 } 4148 4149 for (Instruction *PI : PredicatedInstructions) 4150 sinkScalarOperands(&*PI); 4151 4152 // Remove redundant induction instructions. 4153 cse(LoopVectorBody); 4154 4155 // Set/update profile weights for the vector and remainder loops as original 4156 // loop iterations are now distributed among them. Note that original loop 4157 // represented by LoopScalarBody becomes remainder loop after vectorization. 4158 // 4159 // For cases like foldTailByMasking() and requiresScalarEpiloque() we may 4160 // end up getting slightly roughened result but that should be OK since 4161 // profile is not inherently precise anyway. Note also possible bypass of 4162 // vector code caused by legality checks is ignored, assigning all the weight 4163 // to the vector loop, optimistically. 4164 // 4165 // For scalable vectorization we can't know at compile time how many iterations 4166 // of the loop are handled in one vector iteration, so instead assume a pessimistic 4167 // vscale of '1'. 4168 setProfileInfoAfterUnrolling( 4169 LI->getLoopFor(LoopScalarBody), LI->getLoopFor(LoopVectorBody), 4170 LI->getLoopFor(LoopScalarBody), VF.getKnownMinValue() * UF); 4171 } 4172 4173 void InnerLoopVectorizer::fixCrossIterationPHIs(VPTransformState &State) { 4174 // In order to support recurrences we need to be able to vectorize Phi nodes. 4175 // Phi nodes have cycles, so we need to vectorize them in two stages. This is 4176 // stage #2: We now need to fix the recurrences by adding incoming edges to 4177 // the currently empty PHI nodes. At this point every instruction in the 4178 // original loop is widened to a vector form so we can use them to construct 4179 // the incoming edges. 4180 VPBasicBlock *Header = State.Plan->getEntry()->getEntryBasicBlock(); 4181 for (VPRecipeBase &R : Header->phis()) { 4182 if (auto *ReductionPhi = dyn_cast<VPReductionPHIRecipe>(&R)) 4183 fixReduction(ReductionPhi, State); 4184 else if (auto *FOR = dyn_cast<VPFirstOrderRecurrencePHIRecipe>(&R)) 4185 fixFirstOrderRecurrence(FOR, State); 4186 } 4187 } 4188 4189 void InnerLoopVectorizer::fixFirstOrderRecurrence(VPWidenPHIRecipe *PhiR, 4190 VPTransformState &State) { 4191 // This is the second phase of vectorizing first-order recurrences. An 4192 // overview of the transformation is described below. Suppose we have the 4193 // following loop. 4194 // 4195 // for (int i = 0; i < n; ++i) 4196 // b[i] = a[i] - a[i - 1]; 4197 // 4198 // There is a first-order recurrence on "a". For this loop, the shorthand 4199 // scalar IR looks like: 4200 // 4201 // scalar.ph: 4202 // s_init = a[-1] 4203 // br scalar.body 4204 // 4205 // scalar.body: 4206 // i = phi [0, scalar.ph], [i+1, scalar.body] 4207 // s1 = phi [s_init, scalar.ph], [s2, scalar.body] 4208 // s2 = a[i] 4209 // b[i] = s2 - s1 4210 // br cond, scalar.body, ... 4211 // 4212 // In this example, s1 is a recurrence because it's value depends on the 4213 // previous iteration. In the first phase of vectorization, we created a 4214 // vector phi v1 for s1. We now complete the vectorization and produce the 4215 // shorthand vector IR shown below (for VF = 4, UF = 1). 4216 // 4217 // vector.ph: 4218 // v_init = vector(..., ..., ..., a[-1]) 4219 // br vector.body 4220 // 4221 // vector.body 4222 // i = phi [0, vector.ph], [i+4, vector.body] 4223 // v1 = phi [v_init, vector.ph], [v2, vector.body] 4224 // v2 = a[i, i+1, i+2, i+3]; 4225 // v3 = vector(v1(3), v2(0, 1, 2)) 4226 // b[i, i+1, i+2, i+3] = v2 - v3 4227 // br cond, vector.body, middle.block 4228 // 4229 // middle.block: 4230 // x = v2(3) 4231 // br scalar.ph 4232 // 4233 // scalar.ph: 4234 // s_init = phi [x, middle.block], [a[-1], otherwise] 4235 // br scalar.body 4236 // 4237 // After execution completes the vector loop, we extract the next value of 4238 // the recurrence (x) to use as the initial value in the scalar loop. 4239 4240 // Extract the last vector element in the middle block. This will be the 4241 // initial value for the recurrence when jumping to the scalar loop. 4242 VPValue *PreviousDef = PhiR->getBackedgeValue(); 4243 Value *Incoming = State.get(PreviousDef, UF - 1); 4244 auto *ExtractForScalar = Incoming; 4245 auto *IdxTy = Builder.getInt32Ty(); 4246 if (VF.isVector()) { 4247 auto *One = ConstantInt::get(IdxTy, 1); 4248 Builder.SetInsertPoint(LoopMiddleBlock->getTerminator()); 4249 auto *RuntimeVF = getRuntimeVF(Builder, IdxTy, VF); 4250 auto *LastIdx = Builder.CreateSub(RuntimeVF, One); 4251 ExtractForScalar = Builder.CreateExtractElement(ExtractForScalar, LastIdx, 4252 "vector.recur.extract"); 4253 } 4254 // Extract the second last element in the middle block if the 4255 // Phi is used outside the loop. We need to extract the phi itself 4256 // and not the last element (the phi update in the current iteration). This 4257 // will be the value when jumping to the exit block from the LoopMiddleBlock, 4258 // when the scalar loop is not run at all. 4259 Value *ExtractForPhiUsedOutsideLoop = nullptr; 4260 if (VF.isVector()) { 4261 auto *RuntimeVF = getRuntimeVF(Builder, IdxTy, VF); 4262 auto *Idx = Builder.CreateSub(RuntimeVF, ConstantInt::get(IdxTy, 2)); 4263 ExtractForPhiUsedOutsideLoop = Builder.CreateExtractElement( 4264 Incoming, Idx, "vector.recur.extract.for.phi"); 4265 } else if (UF > 1) 4266 // When loop is unrolled without vectorizing, initialize 4267 // ExtractForPhiUsedOutsideLoop with the value just prior to unrolled value 4268 // of `Incoming`. This is analogous to the vectorized case above: extracting 4269 // the second last element when VF > 1. 4270 ExtractForPhiUsedOutsideLoop = State.get(PreviousDef, UF - 2); 4271 4272 // Fix the initial value of the original recurrence in the scalar loop. 4273 Builder.SetInsertPoint(&*LoopScalarPreHeader->begin()); 4274 PHINode *Phi = cast<PHINode>(PhiR->getUnderlyingValue()); 4275 auto *Start = Builder.CreatePHI(Phi->getType(), 2, "scalar.recur.init"); 4276 auto *ScalarInit = PhiR->getStartValue()->getLiveInIRValue(); 4277 for (auto *BB : predecessors(LoopScalarPreHeader)) { 4278 auto *Incoming = BB == LoopMiddleBlock ? ExtractForScalar : ScalarInit; 4279 Start->addIncoming(Incoming, BB); 4280 } 4281 4282 Phi->setIncomingValueForBlock(LoopScalarPreHeader, Start); 4283 Phi->setName("scalar.recur"); 4284 4285 // Finally, fix users of the recurrence outside the loop. The users will need 4286 // either the last value of the scalar recurrence or the last value of the 4287 // vector recurrence we extracted in the middle block. Since the loop is in 4288 // LCSSA form, we just need to find all the phi nodes for the original scalar 4289 // recurrence in the exit block, and then add an edge for the middle block. 4290 // Note that LCSSA does not imply single entry when the original scalar loop 4291 // had multiple exiting edges (as we always run the last iteration in the 4292 // scalar epilogue); in that case, there is no edge from middle to exit and 4293 // and thus no phis which needed updated. 4294 if (!Cost->requiresScalarEpilogue(VF)) 4295 for (PHINode &LCSSAPhi : LoopExitBlock->phis()) 4296 if (llvm::is_contained(LCSSAPhi.incoming_values(), Phi)) 4297 LCSSAPhi.addIncoming(ExtractForPhiUsedOutsideLoop, LoopMiddleBlock); 4298 } 4299 4300 void InnerLoopVectorizer::fixReduction(VPReductionPHIRecipe *PhiR, 4301 VPTransformState &State) { 4302 PHINode *OrigPhi = cast<PHINode>(PhiR->getUnderlyingValue()); 4303 // Get it's reduction variable descriptor. 4304 assert(Legal->isReductionVariable(OrigPhi) && 4305 "Unable to find the reduction variable"); 4306 const RecurrenceDescriptor &RdxDesc = PhiR->getRecurrenceDescriptor(); 4307 4308 RecurKind RK = RdxDesc.getRecurrenceKind(); 4309 TrackingVH<Value> ReductionStartValue = RdxDesc.getRecurrenceStartValue(); 4310 Instruction *LoopExitInst = RdxDesc.getLoopExitInstr(); 4311 setDebugLocFromInst(ReductionStartValue); 4312 4313 VPValue *LoopExitInstDef = PhiR->getBackedgeValue(); 4314 // This is the vector-clone of the value that leaves the loop. 4315 Type *VecTy = State.get(LoopExitInstDef, 0)->getType(); 4316 4317 // Wrap flags are in general invalid after vectorization, clear them. 4318 clearReductionWrapFlags(RdxDesc, State); 4319 4320 // Before each round, move the insertion point right between 4321 // the PHIs and the values we are going to write. 4322 // This allows us to write both PHINodes and the extractelement 4323 // instructions. 4324 Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt()); 4325 4326 setDebugLocFromInst(LoopExitInst); 4327 4328 Type *PhiTy = OrigPhi->getType(); 4329 // If tail is folded by masking, the vector value to leave the loop should be 4330 // a Select choosing between the vectorized LoopExitInst and vectorized Phi, 4331 // instead of the former. For an inloop reduction the reduction will already 4332 // be predicated, and does not need to be handled here. 4333 if (Cost->foldTailByMasking() && !PhiR->isInLoop()) { 4334 for (unsigned Part = 0; Part < UF; ++Part) { 4335 Value *VecLoopExitInst = State.get(LoopExitInstDef, Part); 4336 Value *Sel = nullptr; 4337 for (User *U : VecLoopExitInst->users()) { 4338 if (isa<SelectInst>(U)) { 4339 assert(!Sel && "Reduction exit feeding two selects"); 4340 Sel = U; 4341 } else 4342 assert(isa<PHINode>(U) && "Reduction exit must feed Phi's or select"); 4343 } 4344 assert(Sel && "Reduction exit feeds no select"); 4345 State.reset(LoopExitInstDef, Sel, Part); 4346 4347 // If the target can create a predicated operator for the reduction at no 4348 // extra cost in the loop (for example a predicated vadd), it can be 4349 // cheaper for the select to remain in the loop than be sunk out of it, 4350 // and so use the select value for the phi instead of the old 4351 // LoopExitValue. 4352 if (PreferPredicatedReductionSelect || 4353 TTI->preferPredicatedReductionSelect( 4354 RdxDesc.getOpcode(), PhiTy, 4355 TargetTransformInfo::ReductionFlags())) { 4356 auto *VecRdxPhi = 4357 cast<PHINode>(State.get(PhiR, Part)); 4358 VecRdxPhi->setIncomingValueForBlock( 4359 LI->getLoopFor(LoopVectorBody)->getLoopLatch(), Sel); 4360 } 4361 } 4362 } 4363 4364 // If the vector reduction can be performed in a smaller type, we truncate 4365 // then extend the loop exit value to enable InstCombine to evaluate the 4366 // entire expression in the smaller type. 4367 if (VF.isVector() && PhiTy != RdxDesc.getRecurrenceType()) { 4368 assert(!PhiR->isInLoop() && "Unexpected truncated inloop reduction!"); 4369 Type *RdxVecTy = VectorType::get(RdxDesc.getRecurrenceType(), VF); 4370 Builder.SetInsertPoint( 4371 LI->getLoopFor(LoopVectorBody)->getLoopLatch()->getTerminator()); 4372 VectorParts RdxParts(UF); 4373 for (unsigned Part = 0; Part < UF; ++Part) { 4374 RdxParts[Part] = State.get(LoopExitInstDef, Part); 4375 Value *Trunc = Builder.CreateTrunc(RdxParts[Part], RdxVecTy); 4376 Value *Extnd = RdxDesc.isSigned() ? Builder.CreateSExt(Trunc, VecTy) 4377 : Builder.CreateZExt(Trunc, VecTy); 4378 for (User *U : llvm::make_early_inc_range(RdxParts[Part]->users())) 4379 if (U != Trunc) { 4380 U->replaceUsesOfWith(RdxParts[Part], Extnd); 4381 RdxParts[Part] = Extnd; 4382 } 4383 } 4384 Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt()); 4385 for (unsigned Part = 0; Part < UF; ++Part) { 4386 RdxParts[Part] = Builder.CreateTrunc(RdxParts[Part], RdxVecTy); 4387 State.reset(LoopExitInstDef, RdxParts[Part], Part); 4388 } 4389 } 4390 4391 // Reduce all of the unrolled parts into a single vector. 4392 Value *ReducedPartRdx = State.get(LoopExitInstDef, 0); 4393 unsigned Op = RecurrenceDescriptor::getOpcode(RK); 4394 4395 // The middle block terminator has already been assigned a DebugLoc here (the 4396 // OrigLoop's single latch terminator). We want the whole middle block to 4397 // appear to execute on this line because: (a) it is all compiler generated, 4398 // (b) these instructions are always executed after evaluating the latch 4399 // conditional branch, and (c) other passes may add new predecessors which 4400 // terminate on this line. This is the easiest way to ensure we don't 4401 // accidentally cause an extra step back into the loop while debugging. 4402 setDebugLocFromInst(LoopMiddleBlock->getTerminator()); 4403 if (PhiR->isOrdered()) 4404 ReducedPartRdx = State.get(LoopExitInstDef, UF - 1); 4405 else { 4406 // Floating-point operations should have some FMF to enable the reduction. 4407 IRBuilderBase::FastMathFlagGuard FMFG(Builder); 4408 Builder.setFastMathFlags(RdxDesc.getFastMathFlags()); 4409 for (unsigned Part = 1; Part < UF; ++Part) { 4410 Value *RdxPart = State.get(LoopExitInstDef, Part); 4411 if (Op != Instruction::ICmp && Op != Instruction::FCmp) { 4412 ReducedPartRdx = Builder.CreateBinOp( 4413 (Instruction::BinaryOps)Op, RdxPart, ReducedPartRdx, "bin.rdx"); 4414 } else if (RecurrenceDescriptor::isSelectCmpRecurrenceKind(RK)) 4415 ReducedPartRdx = createSelectCmpOp(Builder, ReductionStartValue, RK, 4416 ReducedPartRdx, RdxPart); 4417 else 4418 ReducedPartRdx = createMinMaxOp(Builder, RK, ReducedPartRdx, RdxPart); 4419 } 4420 } 4421 4422 // Create the reduction after the loop. Note that inloop reductions create the 4423 // target reduction in the loop using a Reduction recipe. 4424 if (VF.isVector() && !PhiR->isInLoop()) { 4425 ReducedPartRdx = 4426 createTargetReduction(Builder, TTI, RdxDesc, ReducedPartRdx, OrigPhi); 4427 // If the reduction can be performed in a smaller type, we need to extend 4428 // the reduction to the wider type before we branch to the original loop. 4429 if (PhiTy != RdxDesc.getRecurrenceType()) 4430 ReducedPartRdx = RdxDesc.isSigned() 4431 ? Builder.CreateSExt(ReducedPartRdx, PhiTy) 4432 : Builder.CreateZExt(ReducedPartRdx, PhiTy); 4433 } 4434 4435 // Create a phi node that merges control-flow from the backedge-taken check 4436 // block and the middle block. 4437 PHINode *BCBlockPhi = PHINode::Create(PhiTy, 2, "bc.merge.rdx", 4438 LoopScalarPreHeader->getTerminator()); 4439 for (unsigned I = 0, E = LoopBypassBlocks.size(); I != E; ++I) 4440 BCBlockPhi->addIncoming(ReductionStartValue, LoopBypassBlocks[I]); 4441 BCBlockPhi->addIncoming(ReducedPartRdx, LoopMiddleBlock); 4442 4443 // Now, we need to fix the users of the reduction variable 4444 // inside and outside of the scalar remainder loop. 4445 4446 // We know that the loop is in LCSSA form. We need to update the PHI nodes 4447 // in the exit blocks. See comment on analogous loop in 4448 // fixFirstOrderRecurrence for a more complete explaination of the logic. 4449 if (!Cost->requiresScalarEpilogue(VF)) 4450 for (PHINode &LCSSAPhi : LoopExitBlock->phis()) 4451 if (llvm::is_contained(LCSSAPhi.incoming_values(), LoopExitInst)) 4452 LCSSAPhi.addIncoming(ReducedPartRdx, LoopMiddleBlock); 4453 4454 // Fix the scalar loop reduction variable with the incoming reduction sum 4455 // from the vector body and from the backedge value. 4456 int IncomingEdgeBlockIdx = 4457 OrigPhi->getBasicBlockIndex(OrigLoop->getLoopLatch()); 4458 assert(IncomingEdgeBlockIdx >= 0 && "Invalid block index"); 4459 // Pick the other block. 4460 int SelfEdgeBlockIdx = (IncomingEdgeBlockIdx ? 0 : 1); 4461 OrigPhi->setIncomingValue(SelfEdgeBlockIdx, BCBlockPhi); 4462 OrigPhi->setIncomingValue(IncomingEdgeBlockIdx, LoopExitInst); 4463 } 4464 4465 void InnerLoopVectorizer::clearReductionWrapFlags(const RecurrenceDescriptor &RdxDesc, 4466 VPTransformState &State) { 4467 RecurKind RK = RdxDesc.getRecurrenceKind(); 4468 if (RK != RecurKind::Add && RK != RecurKind::Mul) 4469 return; 4470 4471 Instruction *LoopExitInstr = RdxDesc.getLoopExitInstr(); 4472 assert(LoopExitInstr && "null loop exit instruction"); 4473 SmallVector<Instruction *, 8> Worklist; 4474 SmallPtrSet<Instruction *, 8> Visited; 4475 Worklist.push_back(LoopExitInstr); 4476 Visited.insert(LoopExitInstr); 4477 4478 while (!Worklist.empty()) { 4479 Instruction *Cur = Worklist.pop_back_val(); 4480 if (isa<OverflowingBinaryOperator>(Cur)) 4481 for (unsigned Part = 0; Part < UF; ++Part) { 4482 // FIXME: Should not rely on getVPValue at this point. 4483 Value *V = State.get(State.Plan->getVPValue(Cur, true), Part); 4484 cast<Instruction>(V)->dropPoisonGeneratingFlags(); 4485 } 4486 4487 for (User *U : Cur->users()) { 4488 Instruction *UI = cast<Instruction>(U); 4489 if ((Cur != LoopExitInstr || OrigLoop->contains(UI->getParent())) && 4490 Visited.insert(UI).second) 4491 Worklist.push_back(UI); 4492 } 4493 } 4494 } 4495 4496 void InnerLoopVectorizer::fixLCSSAPHIs(VPTransformState &State) { 4497 for (PHINode &LCSSAPhi : LoopExitBlock->phis()) { 4498 if (LCSSAPhi.getBasicBlockIndex(LoopMiddleBlock) != -1) 4499 // Some phis were already hand updated by the reduction and recurrence 4500 // code above, leave them alone. 4501 continue; 4502 4503 auto *IncomingValue = LCSSAPhi.getIncomingValue(0); 4504 // Non-instruction incoming values will have only one value. 4505 4506 VPLane Lane = VPLane::getFirstLane(); 4507 if (isa<Instruction>(IncomingValue) && 4508 !Cost->isUniformAfterVectorization(cast<Instruction>(IncomingValue), 4509 VF)) 4510 Lane = VPLane::getLastLaneForVF(VF); 4511 4512 // Can be a loop invariant incoming value or the last scalar value to be 4513 // extracted from the vectorized loop. 4514 // FIXME: Should not rely on getVPValue at this point. 4515 Builder.SetInsertPoint(LoopMiddleBlock->getTerminator()); 4516 Value *lastIncomingValue = 4517 OrigLoop->isLoopInvariant(IncomingValue) 4518 ? IncomingValue 4519 : State.get(State.Plan->getVPValue(IncomingValue, true), 4520 VPIteration(UF - 1, Lane)); 4521 LCSSAPhi.addIncoming(lastIncomingValue, LoopMiddleBlock); 4522 } 4523 } 4524 4525 void InnerLoopVectorizer::sinkScalarOperands(Instruction *PredInst) { 4526 // The basic block and loop containing the predicated instruction. 4527 auto *PredBB = PredInst->getParent(); 4528 auto *VectorLoop = LI->getLoopFor(PredBB); 4529 4530 // Initialize a worklist with the operands of the predicated instruction. 4531 SetVector<Value *> Worklist(PredInst->op_begin(), PredInst->op_end()); 4532 4533 // Holds instructions that we need to analyze again. An instruction may be 4534 // reanalyzed if we don't yet know if we can sink it or not. 4535 SmallVector<Instruction *, 8> InstsToReanalyze; 4536 4537 // Returns true if a given use occurs in the predicated block. Phi nodes use 4538 // their operands in their corresponding predecessor blocks. 4539 auto isBlockOfUsePredicated = [&](Use &U) -> bool { 4540 auto *I = cast<Instruction>(U.getUser()); 4541 BasicBlock *BB = I->getParent(); 4542 if (auto *Phi = dyn_cast<PHINode>(I)) 4543 BB = Phi->getIncomingBlock( 4544 PHINode::getIncomingValueNumForOperand(U.getOperandNo())); 4545 return BB == PredBB; 4546 }; 4547 4548 // Iteratively sink the scalarized operands of the predicated instruction 4549 // into the block we created for it. When an instruction is sunk, it's 4550 // operands are then added to the worklist. The algorithm ends after one pass 4551 // through the worklist doesn't sink a single instruction. 4552 bool Changed; 4553 do { 4554 // Add the instructions that need to be reanalyzed to the worklist, and 4555 // reset the changed indicator. 4556 Worklist.insert(InstsToReanalyze.begin(), InstsToReanalyze.end()); 4557 InstsToReanalyze.clear(); 4558 Changed = false; 4559 4560 while (!Worklist.empty()) { 4561 auto *I = dyn_cast<Instruction>(Worklist.pop_back_val()); 4562 4563 // We can't sink an instruction if it is a phi node, is not in the loop, 4564 // or may have side effects. 4565 if (!I || isa<PHINode>(I) || !VectorLoop->contains(I) || 4566 I->mayHaveSideEffects()) 4567 continue; 4568 4569 // If the instruction is already in PredBB, check if we can sink its 4570 // operands. In that case, VPlan's sinkScalarOperands() succeeded in 4571 // sinking the scalar instruction I, hence it appears in PredBB; but it 4572 // may have failed to sink I's operands (recursively), which we try 4573 // (again) here. 4574 if (I->getParent() == PredBB) { 4575 Worklist.insert(I->op_begin(), I->op_end()); 4576 continue; 4577 } 4578 4579 // It's legal to sink the instruction if all its uses occur in the 4580 // predicated block. Otherwise, there's nothing to do yet, and we may 4581 // need to reanalyze the instruction. 4582 if (!llvm::all_of(I->uses(), isBlockOfUsePredicated)) { 4583 InstsToReanalyze.push_back(I); 4584 continue; 4585 } 4586 4587 // Move the instruction to the beginning of the predicated block, and add 4588 // it's operands to the worklist. 4589 I->moveBefore(&*PredBB->getFirstInsertionPt()); 4590 Worklist.insert(I->op_begin(), I->op_end()); 4591 4592 // The sinking may have enabled other instructions to be sunk, so we will 4593 // need to iterate. 4594 Changed = true; 4595 } 4596 } while (Changed); 4597 } 4598 4599 void InnerLoopVectorizer::fixNonInductionPHIs(VPTransformState &State) { 4600 for (PHINode *OrigPhi : OrigPHIsToFix) { 4601 VPWidenPHIRecipe *VPPhi = 4602 cast<VPWidenPHIRecipe>(State.Plan->getVPValue(OrigPhi)); 4603 PHINode *NewPhi = cast<PHINode>(State.get(VPPhi, 0)); 4604 // Make sure the builder has a valid insert point. 4605 Builder.SetInsertPoint(NewPhi); 4606 for (unsigned i = 0; i < VPPhi->getNumOperands(); ++i) { 4607 VPValue *Inc = VPPhi->getIncomingValue(i); 4608 VPBasicBlock *VPBB = VPPhi->getIncomingBlock(i); 4609 NewPhi->addIncoming(State.get(Inc, 0), State.CFG.VPBB2IRBB[VPBB]); 4610 } 4611 } 4612 } 4613 4614 bool InnerLoopVectorizer::useOrderedReductions(RecurrenceDescriptor &RdxDesc) { 4615 return Cost->useOrderedReductions(RdxDesc); 4616 } 4617 4618 void InnerLoopVectorizer::widenGEP(GetElementPtrInst *GEP, VPValue *VPDef, 4619 VPUser &Operands, unsigned UF, 4620 ElementCount VF, bool IsPtrLoopInvariant, 4621 SmallBitVector &IsIndexLoopInvariant, 4622 VPTransformState &State) { 4623 // Construct a vector GEP by widening the operands of the scalar GEP as 4624 // necessary. We mark the vector GEP 'inbounds' if appropriate. A GEP 4625 // results in a vector of pointers when at least one operand of the GEP 4626 // is vector-typed. Thus, to keep the representation compact, we only use 4627 // vector-typed operands for loop-varying values. 4628 4629 if (VF.isVector() && IsPtrLoopInvariant && IsIndexLoopInvariant.all()) { 4630 // If we are vectorizing, but the GEP has only loop-invariant operands, 4631 // the GEP we build (by only using vector-typed operands for 4632 // loop-varying values) would be a scalar pointer. Thus, to ensure we 4633 // produce a vector of pointers, we need to either arbitrarily pick an 4634 // operand to broadcast, or broadcast a clone of the original GEP. 4635 // Here, we broadcast a clone of the original. 4636 // 4637 // TODO: If at some point we decide to scalarize instructions having 4638 // loop-invariant operands, this special case will no longer be 4639 // required. We would add the scalarization decision to 4640 // collectLoopScalars() and teach getVectorValue() to broadcast 4641 // the lane-zero scalar value. 4642 auto *Clone = Builder.Insert(GEP->clone()); 4643 for (unsigned Part = 0; Part < UF; ++Part) { 4644 Value *EntryPart = Builder.CreateVectorSplat(VF, Clone); 4645 State.set(VPDef, EntryPart, Part); 4646 addMetadata(EntryPart, GEP); 4647 } 4648 } else { 4649 // If the GEP has at least one loop-varying operand, we are sure to 4650 // produce a vector of pointers. But if we are only unrolling, we want 4651 // to produce a scalar GEP for each unroll part. Thus, the GEP we 4652 // produce with the code below will be scalar (if VF == 1) or vector 4653 // (otherwise). Note that for the unroll-only case, we still maintain 4654 // values in the vector mapping with initVector, as we do for other 4655 // instructions. 4656 for (unsigned Part = 0; Part < UF; ++Part) { 4657 // The pointer operand of the new GEP. If it's loop-invariant, we 4658 // won't broadcast it. 4659 auto *Ptr = IsPtrLoopInvariant 4660 ? State.get(Operands.getOperand(0), VPIteration(0, 0)) 4661 : State.get(Operands.getOperand(0), Part); 4662 4663 // Collect all the indices for the new GEP. If any index is 4664 // loop-invariant, we won't broadcast it. 4665 SmallVector<Value *, 4> Indices; 4666 for (unsigned I = 1, E = Operands.getNumOperands(); I < E; I++) { 4667 VPValue *Operand = Operands.getOperand(I); 4668 if (IsIndexLoopInvariant[I - 1]) 4669 Indices.push_back(State.get(Operand, VPIteration(0, 0))); 4670 else 4671 Indices.push_back(State.get(Operand, Part)); 4672 } 4673 4674 // Create the new GEP. Note that this GEP may be a scalar if VF == 1, 4675 // but it should be a vector, otherwise. 4676 auto *NewGEP = 4677 GEP->isInBounds() 4678 ? Builder.CreateInBoundsGEP(GEP->getSourceElementType(), Ptr, 4679 Indices) 4680 : Builder.CreateGEP(GEP->getSourceElementType(), Ptr, Indices); 4681 assert((VF.isScalar() || NewGEP->getType()->isVectorTy()) && 4682 "NewGEP is not a pointer vector"); 4683 State.set(VPDef, NewGEP, Part); 4684 addMetadata(NewGEP, GEP); 4685 } 4686 } 4687 } 4688 4689 void InnerLoopVectorizer::widenPHIInstruction(Instruction *PN, 4690 VPWidenPHIRecipe *PhiR, 4691 VPTransformState &State) { 4692 PHINode *P = cast<PHINode>(PN); 4693 if (EnableVPlanNativePath) { 4694 // Currently we enter here in the VPlan-native path for non-induction 4695 // PHIs where all control flow is uniform. We simply widen these PHIs. 4696 // Create a vector phi with no operands - the vector phi operands will be 4697 // set at the end of vector code generation. 4698 Type *VecTy = (State.VF.isScalar()) 4699 ? PN->getType() 4700 : VectorType::get(PN->getType(), State.VF); 4701 Value *VecPhi = Builder.CreatePHI(VecTy, PN->getNumOperands(), "vec.phi"); 4702 State.set(PhiR, VecPhi, 0); 4703 OrigPHIsToFix.push_back(P); 4704 4705 return; 4706 } 4707 4708 assert(PN->getParent() == OrigLoop->getHeader() && 4709 "Non-header phis should have been handled elsewhere"); 4710 4711 // In order to support recurrences we need to be able to vectorize Phi nodes. 4712 // Phi nodes have cycles, so we need to vectorize them in two stages. This is 4713 // stage #1: We create a new vector PHI node with no incoming edges. We'll use 4714 // this value when we vectorize all of the instructions that use the PHI. 4715 4716 assert(!Legal->isReductionVariable(P) && 4717 "reductions should be handled elsewhere"); 4718 4719 setDebugLocFromInst(P); 4720 4721 // This PHINode must be an induction variable. 4722 // Make sure that we know about it. 4723 assert(Legal->getInductionVars().count(P) && "Not an induction variable"); 4724 4725 InductionDescriptor II = Legal->getInductionVars().lookup(P); 4726 const DataLayout &DL = OrigLoop->getHeader()->getModule()->getDataLayout(); 4727 4728 // FIXME: The newly created binary instructions should contain nsw/nuw flags, 4729 // which can be found from the original scalar operations. 4730 switch (II.getKind()) { 4731 case InductionDescriptor::IK_NoInduction: 4732 llvm_unreachable("Unknown induction"); 4733 case InductionDescriptor::IK_IntInduction: 4734 case InductionDescriptor::IK_FpInduction: 4735 llvm_unreachable("Integer/fp induction is handled elsewhere."); 4736 case InductionDescriptor::IK_PtrInduction: { 4737 // Handle the pointer induction variable case. 4738 assert(P->getType()->isPointerTy() && "Unexpected type."); 4739 4740 if (Cost->isScalarAfterVectorization(P, State.VF)) { 4741 // This is the normalized GEP that starts counting at zero. 4742 Value *PtrInd = 4743 Builder.CreateSExtOrTrunc(Induction, II.getStep()->getType()); 4744 // Determine the number of scalars we need to generate for each unroll 4745 // iteration. If the instruction is uniform, we only need to generate the 4746 // first lane. Otherwise, we generate all VF values. 4747 bool IsUniform = Cost->isUniformAfterVectorization(P, State.VF); 4748 unsigned Lanes = IsUniform ? 1 : State.VF.getKnownMinValue(); 4749 4750 bool NeedsVectorIndex = !IsUniform && VF.isScalable(); 4751 Value *UnitStepVec = nullptr, *PtrIndSplat = nullptr; 4752 if (NeedsVectorIndex) { 4753 Type *VecIVTy = VectorType::get(PtrInd->getType(), VF); 4754 UnitStepVec = Builder.CreateStepVector(VecIVTy); 4755 PtrIndSplat = Builder.CreateVectorSplat(VF, PtrInd); 4756 } 4757 4758 for (unsigned Part = 0; Part < UF; ++Part) { 4759 Value *PartStart = 4760 createStepForVF(Builder, PtrInd->getType(), VF, Part); 4761 4762 if (NeedsVectorIndex) { 4763 // Here we cache the whole vector, which means we can support the 4764 // extraction of any lane. However, in some cases the extractelement 4765 // instruction that is generated for scalar uses of this vector (e.g. 4766 // a load instruction) is not folded away. Therefore we still 4767 // calculate values for the first n lanes to avoid redundant moves 4768 // (when extracting the 0th element) and to produce scalar code (i.e. 4769 // additional add/gep instructions instead of expensive extractelement 4770 // instructions) when extracting higher-order elements. 4771 Value *PartStartSplat = Builder.CreateVectorSplat(VF, PartStart); 4772 Value *Indices = Builder.CreateAdd(PartStartSplat, UnitStepVec); 4773 Value *GlobalIndices = Builder.CreateAdd(PtrIndSplat, Indices); 4774 Value *SclrGep = 4775 emitTransformedIndex(Builder, GlobalIndices, PSE.getSE(), DL, II); 4776 SclrGep->setName("next.gep"); 4777 State.set(PhiR, SclrGep, Part); 4778 } 4779 4780 for (unsigned Lane = 0; Lane < Lanes; ++Lane) { 4781 Value *Idx = Builder.CreateAdd( 4782 PartStart, ConstantInt::get(PtrInd->getType(), Lane)); 4783 Value *GlobalIdx = Builder.CreateAdd(PtrInd, Idx); 4784 Value *SclrGep = 4785 emitTransformedIndex(Builder, GlobalIdx, PSE.getSE(), DL, II); 4786 SclrGep->setName("next.gep"); 4787 State.set(PhiR, SclrGep, VPIteration(Part, Lane)); 4788 } 4789 } 4790 return; 4791 } 4792 assert(isa<SCEVConstant>(II.getStep()) && 4793 "Induction step not a SCEV constant!"); 4794 Type *PhiType = II.getStep()->getType(); 4795 4796 // Build a pointer phi 4797 Value *ScalarStartValue = II.getStartValue(); 4798 Type *ScStValueType = ScalarStartValue->getType(); 4799 PHINode *NewPointerPhi = 4800 PHINode::Create(ScStValueType, 2, "pointer.phi", Induction); 4801 NewPointerPhi->addIncoming(ScalarStartValue, LoopVectorPreHeader); 4802 4803 // A pointer induction, performed by using a gep 4804 BasicBlock *LoopLatch = LI->getLoopFor(LoopVectorBody)->getLoopLatch(); 4805 Instruction *InductionLoc = LoopLatch->getTerminator(); 4806 const SCEV *ScalarStep = II.getStep(); 4807 SCEVExpander Exp(*PSE.getSE(), DL, "induction"); 4808 Value *ScalarStepValue = 4809 Exp.expandCodeFor(ScalarStep, PhiType, InductionLoc); 4810 Value *RuntimeVF = getRuntimeVF(Builder, PhiType, VF); 4811 Value *NumUnrolledElems = 4812 Builder.CreateMul(RuntimeVF, ConstantInt::get(PhiType, State.UF)); 4813 Value *InductionGEP = GetElementPtrInst::Create( 4814 II.getElementType(), NewPointerPhi, 4815 Builder.CreateMul(ScalarStepValue, NumUnrolledElems), "ptr.ind", 4816 InductionLoc); 4817 NewPointerPhi->addIncoming(InductionGEP, LoopLatch); 4818 4819 // Create UF many actual address geps that use the pointer 4820 // phi as base and a vectorized version of the step value 4821 // (<step*0, ..., step*N>) as offset. 4822 for (unsigned Part = 0; Part < State.UF; ++Part) { 4823 Type *VecPhiType = VectorType::get(PhiType, State.VF); 4824 Value *StartOffsetScalar = 4825 Builder.CreateMul(RuntimeVF, ConstantInt::get(PhiType, Part)); 4826 Value *StartOffset = 4827 Builder.CreateVectorSplat(State.VF, StartOffsetScalar); 4828 // Create a vector of consecutive numbers from zero to VF. 4829 StartOffset = 4830 Builder.CreateAdd(StartOffset, Builder.CreateStepVector(VecPhiType)); 4831 4832 Value *GEP = Builder.CreateGEP( 4833 II.getElementType(), NewPointerPhi, 4834 Builder.CreateMul( 4835 StartOffset, Builder.CreateVectorSplat(State.VF, ScalarStepValue), 4836 "vector.gep")); 4837 State.set(PhiR, GEP, Part); 4838 } 4839 } 4840 } 4841 } 4842 4843 /// A helper function for checking whether an integer division-related 4844 /// instruction may divide by zero (in which case it must be predicated if 4845 /// executed conditionally in the scalar code). 4846 /// TODO: It may be worthwhile to generalize and check isKnownNonZero(). 4847 /// Non-zero divisors that are non compile-time constants will not be 4848 /// converted into multiplication, so we will still end up scalarizing 4849 /// the division, but can do so w/o predication. 4850 static bool mayDivideByZero(Instruction &I) { 4851 assert((I.getOpcode() == Instruction::UDiv || 4852 I.getOpcode() == Instruction::SDiv || 4853 I.getOpcode() == Instruction::URem || 4854 I.getOpcode() == Instruction::SRem) && 4855 "Unexpected instruction"); 4856 Value *Divisor = I.getOperand(1); 4857 auto *CInt = dyn_cast<ConstantInt>(Divisor); 4858 return !CInt || CInt->isZero(); 4859 } 4860 4861 void InnerLoopVectorizer::widenInstruction(Instruction &I, VPValue *Def, 4862 VPUser &User, 4863 VPTransformState &State) { 4864 switch (I.getOpcode()) { 4865 case Instruction::Call: 4866 case Instruction::Br: 4867 case Instruction::PHI: 4868 case Instruction::GetElementPtr: 4869 case Instruction::Select: 4870 llvm_unreachable("This instruction is handled by a different recipe."); 4871 case Instruction::UDiv: 4872 case Instruction::SDiv: 4873 case Instruction::SRem: 4874 case Instruction::URem: 4875 case Instruction::Add: 4876 case Instruction::FAdd: 4877 case Instruction::Sub: 4878 case Instruction::FSub: 4879 case Instruction::FNeg: 4880 case Instruction::Mul: 4881 case Instruction::FMul: 4882 case Instruction::FDiv: 4883 case Instruction::FRem: 4884 case Instruction::Shl: 4885 case Instruction::LShr: 4886 case Instruction::AShr: 4887 case Instruction::And: 4888 case Instruction::Or: 4889 case Instruction::Xor: { 4890 // Just widen unops and binops. 4891 setDebugLocFromInst(&I); 4892 4893 for (unsigned Part = 0; Part < UF; ++Part) { 4894 SmallVector<Value *, 2> Ops; 4895 for (VPValue *VPOp : User.operands()) 4896 Ops.push_back(State.get(VPOp, Part)); 4897 4898 Value *V = Builder.CreateNAryOp(I.getOpcode(), Ops); 4899 4900 if (auto *VecOp = dyn_cast<Instruction>(V)) 4901 VecOp->copyIRFlags(&I); 4902 4903 // Use this vector value for all users of the original instruction. 4904 State.set(Def, V, Part); 4905 addMetadata(V, &I); 4906 } 4907 4908 break; 4909 } 4910 case Instruction::ICmp: 4911 case Instruction::FCmp: { 4912 // Widen compares. Generate vector compares. 4913 bool FCmp = (I.getOpcode() == Instruction::FCmp); 4914 auto *Cmp = cast<CmpInst>(&I); 4915 setDebugLocFromInst(Cmp); 4916 for (unsigned Part = 0; Part < UF; ++Part) { 4917 Value *A = State.get(User.getOperand(0), Part); 4918 Value *B = State.get(User.getOperand(1), Part); 4919 Value *C = nullptr; 4920 if (FCmp) { 4921 // Propagate fast math flags. 4922 IRBuilder<>::FastMathFlagGuard FMFG(Builder); 4923 Builder.setFastMathFlags(Cmp->getFastMathFlags()); 4924 C = Builder.CreateFCmp(Cmp->getPredicate(), A, B); 4925 } else { 4926 C = Builder.CreateICmp(Cmp->getPredicate(), A, B); 4927 } 4928 State.set(Def, C, Part); 4929 addMetadata(C, &I); 4930 } 4931 4932 break; 4933 } 4934 4935 case Instruction::ZExt: 4936 case Instruction::SExt: 4937 case Instruction::FPToUI: 4938 case Instruction::FPToSI: 4939 case Instruction::FPExt: 4940 case Instruction::PtrToInt: 4941 case Instruction::IntToPtr: 4942 case Instruction::SIToFP: 4943 case Instruction::UIToFP: 4944 case Instruction::Trunc: 4945 case Instruction::FPTrunc: 4946 case Instruction::BitCast: { 4947 auto *CI = cast<CastInst>(&I); 4948 setDebugLocFromInst(CI); 4949 4950 /// Vectorize casts. 4951 Type *DestTy = 4952 (VF.isScalar()) ? CI->getType() : VectorType::get(CI->getType(), VF); 4953 4954 for (unsigned Part = 0; Part < UF; ++Part) { 4955 Value *A = State.get(User.getOperand(0), Part); 4956 Value *Cast = Builder.CreateCast(CI->getOpcode(), A, DestTy); 4957 State.set(Def, Cast, Part); 4958 addMetadata(Cast, &I); 4959 } 4960 break; 4961 } 4962 default: 4963 // This instruction is not vectorized by simple widening. 4964 LLVM_DEBUG(dbgs() << "LV: Found an unhandled instruction: " << I); 4965 llvm_unreachable("Unhandled instruction!"); 4966 } // end of switch. 4967 } 4968 4969 void InnerLoopVectorizer::widenCallInstruction(CallInst &I, VPValue *Def, 4970 VPUser &ArgOperands, 4971 VPTransformState &State) { 4972 assert(!isa<DbgInfoIntrinsic>(I) && 4973 "DbgInfoIntrinsic should have been dropped during VPlan construction"); 4974 setDebugLocFromInst(&I); 4975 4976 Module *M = I.getParent()->getParent()->getParent(); 4977 auto *CI = cast<CallInst>(&I); 4978 4979 SmallVector<Type *, 4> Tys; 4980 for (Value *ArgOperand : CI->args()) 4981 Tys.push_back(ToVectorTy(ArgOperand->getType(), VF.getKnownMinValue())); 4982 4983 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 4984 4985 // The flag shows whether we use Intrinsic or a usual Call for vectorized 4986 // version of the instruction. 4987 // Is it beneficial to perform intrinsic call compared to lib call? 4988 bool NeedToScalarize = false; 4989 InstructionCost CallCost = Cost->getVectorCallCost(CI, VF, NeedToScalarize); 4990 InstructionCost IntrinsicCost = ID ? Cost->getVectorIntrinsicCost(CI, VF) : 0; 4991 bool UseVectorIntrinsic = ID && IntrinsicCost <= CallCost; 4992 assert((UseVectorIntrinsic || !NeedToScalarize) && 4993 "Instruction should be scalarized elsewhere."); 4994 assert((IntrinsicCost.isValid() || CallCost.isValid()) && 4995 "Either the intrinsic cost or vector call cost must be valid"); 4996 4997 for (unsigned Part = 0; Part < UF; ++Part) { 4998 SmallVector<Type *, 2> TysForDecl = {CI->getType()}; 4999 SmallVector<Value *, 4> Args; 5000 for (auto &I : enumerate(ArgOperands.operands())) { 5001 // Some intrinsics have a scalar argument - don't replace it with a 5002 // vector. 5003 Value *Arg; 5004 if (!UseVectorIntrinsic || !hasVectorInstrinsicScalarOpd(ID, I.index())) 5005 Arg = State.get(I.value(), Part); 5006 else { 5007 Arg = State.get(I.value(), VPIteration(0, 0)); 5008 if (hasVectorInstrinsicOverloadedScalarOpd(ID, I.index())) 5009 TysForDecl.push_back(Arg->getType()); 5010 } 5011 Args.push_back(Arg); 5012 } 5013 5014 Function *VectorF; 5015 if (UseVectorIntrinsic) { 5016 // Use vector version of the intrinsic. 5017 if (VF.isVector()) 5018 TysForDecl[0] = VectorType::get(CI->getType()->getScalarType(), VF); 5019 VectorF = Intrinsic::getDeclaration(M, ID, TysForDecl); 5020 assert(VectorF && "Can't retrieve vector intrinsic."); 5021 } else { 5022 // Use vector version of the function call. 5023 const VFShape Shape = VFShape::get(*CI, VF, false /*HasGlobalPred*/); 5024 #ifndef NDEBUG 5025 assert(VFDatabase(*CI).getVectorizedFunction(Shape) != nullptr && 5026 "Can't create vector function."); 5027 #endif 5028 VectorF = VFDatabase(*CI).getVectorizedFunction(Shape); 5029 } 5030 SmallVector<OperandBundleDef, 1> OpBundles; 5031 CI->getOperandBundlesAsDefs(OpBundles); 5032 CallInst *V = Builder.CreateCall(VectorF, Args, OpBundles); 5033 5034 if (isa<FPMathOperator>(V)) 5035 V->copyFastMathFlags(CI); 5036 5037 State.set(Def, V, Part); 5038 addMetadata(V, &I); 5039 } 5040 } 5041 5042 void InnerLoopVectorizer::widenSelectInstruction(SelectInst &I, VPValue *VPDef, 5043 VPUser &Operands, 5044 bool InvariantCond, 5045 VPTransformState &State) { 5046 setDebugLocFromInst(&I); 5047 5048 // The condition can be loop invariant but still defined inside the 5049 // loop. This means that we can't just use the original 'cond' value. 5050 // We have to take the 'vectorized' value and pick the first lane. 5051 // Instcombine will make this a no-op. 5052 auto *InvarCond = InvariantCond 5053 ? State.get(Operands.getOperand(0), VPIteration(0, 0)) 5054 : nullptr; 5055 5056 for (unsigned Part = 0; Part < UF; ++Part) { 5057 Value *Cond = 5058 InvarCond ? InvarCond : State.get(Operands.getOperand(0), Part); 5059 Value *Op0 = State.get(Operands.getOperand(1), Part); 5060 Value *Op1 = State.get(Operands.getOperand(2), Part); 5061 Value *Sel = Builder.CreateSelect(Cond, Op0, Op1); 5062 State.set(VPDef, Sel, Part); 5063 addMetadata(Sel, &I); 5064 } 5065 } 5066 5067 void LoopVectorizationCostModel::collectLoopScalars(ElementCount VF) { 5068 // We should not collect Scalars more than once per VF. Right now, this 5069 // function is called from collectUniformsAndScalars(), which already does 5070 // this check. Collecting Scalars for VF=1 does not make any sense. 5071 assert(VF.isVector() && Scalars.find(VF) == Scalars.end() && 5072 "This function should not be visited twice for the same VF"); 5073 5074 SmallSetVector<Instruction *, 8> Worklist; 5075 5076 // These sets are used to seed the analysis with pointers used by memory 5077 // accesses that will remain scalar. 5078 SmallSetVector<Instruction *, 8> ScalarPtrs; 5079 SmallPtrSet<Instruction *, 8> PossibleNonScalarPtrs; 5080 auto *Latch = TheLoop->getLoopLatch(); 5081 5082 // A helper that returns true if the use of Ptr by MemAccess will be scalar. 5083 // The pointer operands of loads and stores will be scalar as long as the 5084 // memory access is not a gather or scatter operation. The value operand of a 5085 // store will remain scalar if the store is scalarized. 5086 auto isScalarUse = [&](Instruction *MemAccess, Value *Ptr) { 5087 InstWidening WideningDecision = getWideningDecision(MemAccess, VF); 5088 assert(WideningDecision != CM_Unknown && 5089 "Widening decision should be ready at this moment"); 5090 if (auto *Store = dyn_cast<StoreInst>(MemAccess)) 5091 if (Ptr == Store->getValueOperand()) 5092 return WideningDecision == CM_Scalarize; 5093 assert(Ptr == getLoadStorePointerOperand(MemAccess) && 5094 "Ptr is neither a value or pointer operand"); 5095 return WideningDecision != CM_GatherScatter; 5096 }; 5097 5098 // A helper that returns true if the given value is a bitcast or 5099 // getelementptr instruction contained in the loop. 5100 auto isLoopVaryingBitCastOrGEP = [&](Value *V) { 5101 return ((isa<BitCastInst>(V) && V->getType()->isPointerTy()) || 5102 isa<GetElementPtrInst>(V)) && 5103 !TheLoop->isLoopInvariant(V); 5104 }; 5105 5106 auto isScalarPtrInduction = [&](Instruction *MemAccess, Value *Ptr) { 5107 if (!isa<PHINode>(Ptr) || 5108 !Legal->getInductionVars().count(cast<PHINode>(Ptr))) 5109 return false; 5110 auto &Induction = Legal->getInductionVars()[cast<PHINode>(Ptr)]; 5111 if (Induction.getKind() != InductionDescriptor::IK_PtrInduction) 5112 return false; 5113 return isScalarUse(MemAccess, Ptr); 5114 }; 5115 5116 // A helper that evaluates a memory access's use of a pointer. If the 5117 // pointer is actually the pointer induction of a loop, it is being 5118 // inserted into Worklist. If the use will be a scalar use, and the 5119 // pointer is only used by memory accesses, we place the pointer in 5120 // ScalarPtrs. Otherwise, the pointer is placed in PossibleNonScalarPtrs. 5121 auto evaluatePtrUse = [&](Instruction *MemAccess, Value *Ptr) { 5122 if (isScalarPtrInduction(MemAccess, Ptr)) { 5123 Worklist.insert(cast<Instruction>(Ptr)); 5124 LLVM_DEBUG(dbgs() << "LV: Found new scalar instruction: " << *Ptr 5125 << "\n"); 5126 5127 Instruction *Update = cast<Instruction>( 5128 cast<PHINode>(Ptr)->getIncomingValueForBlock(Latch)); 5129 5130 // If there is more than one user of Update (Ptr), we shouldn't assume it 5131 // will be scalar after vectorisation as other users of the instruction 5132 // may require widening. Otherwise, add it to ScalarPtrs. 5133 if (Update->hasOneUse() && cast<Value>(*Update->user_begin()) == Ptr) { 5134 ScalarPtrs.insert(Update); 5135 return; 5136 } 5137 } 5138 // We only care about bitcast and getelementptr instructions contained in 5139 // the loop. 5140 if (!isLoopVaryingBitCastOrGEP(Ptr)) 5141 return; 5142 5143 // If the pointer has already been identified as scalar (e.g., if it was 5144 // also identified as uniform), there's nothing to do. 5145 auto *I = cast<Instruction>(Ptr); 5146 if (Worklist.count(I)) 5147 return; 5148 5149 // If the use of the pointer will be a scalar use, and all users of the 5150 // pointer are memory accesses, place the pointer in ScalarPtrs. Otherwise, 5151 // place the pointer in PossibleNonScalarPtrs. 5152 if (isScalarUse(MemAccess, Ptr) && llvm::all_of(I->users(), [&](User *U) { 5153 return isa<LoadInst>(U) || isa<StoreInst>(U); 5154 })) 5155 ScalarPtrs.insert(I); 5156 else 5157 PossibleNonScalarPtrs.insert(I); 5158 }; 5159 5160 // We seed the scalars analysis with three classes of instructions: (1) 5161 // instructions marked uniform-after-vectorization and (2) bitcast, 5162 // getelementptr and (pointer) phi instructions used by memory accesses 5163 // requiring a scalar use. 5164 // 5165 // (1) Add to the worklist all instructions that have been identified as 5166 // uniform-after-vectorization. 5167 Worklist.insert(Uniforms[VF].begin(), Uniforms[VF].end()); 5168 5169 // (2) Add to the worklist all bitcast and getelementptr instructions used by 5170 // memory accesses requiring a scalar use. The pointer operands of loads and 5171 // stores will be scalar as long as the memory accesses is not a gather or 5172 // scatter operation. The value operand of a store will remain scalar if the 5173 // store is scalarized. 5174 for (auto *BB : TheLoop->blocks()) 5175 for (auto &I : *BB) { 5176 if (auto *Load = dyn_cast<LoadInst>(&I)) { 5177 evaluatePtrUse(Load, Load->getPointerOperand()); 5178 } else if (auto *Store = dyn_cast<StoreInst>(&I)) { 5179 evaluatePtrUse(Store, Store->getPointerOperand()); 5180 evaluatePtrUse(Store, Store->getValueOperand()); 5181 } 5182 } 5183 for (auto *I : ScalarPtrs) 5184 if (!PossibleNonScalarPtrs.count(I)) { 5185 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *I << "\n"); 5186 Worklist.insert(I); 5187 } 5188 5189 // Insert the forced scalars. 5190 // FIXME: Currently widenPHIInstruction() often creates a dead vector 5191 // induction variable when the PHI user is scalarized. 5192 auto ForcedScalar = ForcedScalars.find(VF); 5193 if (ForcedScalar != ForcedScalars.end()) 5194 for (auto *I : ForcedScalar->second) 5195 Worklist.insert(I); 5196 5197 // Expand the worklist by looking through any bitcasts and getelementptr 5198 // instructions we've already identified as scalar. This is similar to the 5199 // expansion step in collectLoopUniforms(); however, here we're only 5200 // expanding to include additional bitcasts and getelementptr instructions. 5201 unsigned Idx = 0; 5202 while (Idx != Worklist.size()) { 5203 Instruction *Dst = Worklist[Idx++]; 5204 if (!isLoopVaryingBitCastOrGEP(Dst->getOperand(0))) 5205 continue; 5206 auto *Src = cast<Instruction>(Dst->getOperand(0)); 5207 if (llvm::all_of(Src->users(), [&](User *U) -> bool { 5208 auto *J = cast<Instruction>(U); 5209 return !TheLoop->contains(J) || Worklist.count(J) || 5210 ((isa<LoadInst>(J) || isa<StoreInst>(J)) && 5211 isScalarUse(J, Src)); 5212 })) { 5213 Worklist.insert(Src); 5214 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Src << "\n"); 5215 } 5216 } 5217 5218 // An induction variable will remain scalar if all users of the induction 5219 // variable and induction variable update remain scalar. 5220 for (auto &Induction : Legal->getInductionVars()) { 5221 auto *Ind = Induction.first; 5222 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 5223 5224 // If tail-folding is applied, the primary induction variable will be used 5225 // to feed a vector compare. 5226 if (Ind == Legal->getPrimaryInduction() && foldTailByMasking()) 5227 continue; 5228 5229 // Determine if all users of the induction variable are scalar after 5230 // vectorization. 5231 auto ScalarInd = llvm::all_of(Ind->users(), [&](User *U) -> bool { 5232 auto *I = cast<Instruction>(U); 5233 return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I); 5234 }); 5235 if (!ScalarInd) 5236 continue; 5237 5238 // Determine if all users of the induction variable update instruction are 5239 // scalar after vectorization. 5240 auto ScalarIndUpdate = 5241 llvm::all_of(IndUpdate->users(), [&](User *U) -> bool { 5242 auto *I = cast<Instruction>(U); 5243 return I == Ind || !TheLoop->contains(I) || Worklist.count(I); 5244 }); 5245 if (!ScalarIndUpdate) 5246 continue; 5247 5248 // The induction variable and its update instruction will remain scalar. 5249 Worklist.insert(Ind); 5250 Worklist.insert(IndUpdate); 5251 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Ind << "\n"); 5252 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *IndUpdate 5253 << "\n"); 5254 } 5255 5256 Scalars[VF].insert(Worklist.begin(), Worklist.end()); 5257 } 5258 5259 bool LoopVectorizationCostModel::isScalarWithPredication(Instruction *I) const { 5260 if (!blockNeedsPredicationForAnyReason(I->getParent())) 5261 return false; 5262 switch(I->getOpcode()) { 5263 default: 5264 break; 5265 case Instruction::Load: 5266 case Instruction::Store: { 5267 if (!Legal->isMaskRequired(I)) 5268 return false; 5269 auto *Ptr = getLoadStorePointerOperand(I); 5270 auto *Ty = getLoadStoreType(I); 5271 const Align Alignment = getLoadStoreAlignment(I); 5272 return isa<LoadInst>(I) ? !(isLegalMaskedLoad(Ty, Ptr, Alignment) || 5273 TTI.isLegalMaskedGather(Ty, Alignment)) 5274 : !(isLegalMaskedStore(Ty, Ptr, Alignment) || 5275 TTI.isLegalMaskedScatter(Ty, Alignment)); 5276 } 5277 case Instruction::UDiv: 5278 case Instruction::SDiv: 5279 case Instruction::SRem: 5280 case Instruction::URem: 5281 return mayDivideByZero(*I); 5282 } 5283 return false; 5284 } 5285 5286 bool LoopVectorizationCostModel::interleavedAccessCanBeWidened( 5287 Instruction *I, ElementCount VF) { 5288 assert(isAccessInterleaved(I) && "Expecting interleaved access."); 5289 assert(getWideningDecision(I, VF) == CM_Unknown && 5290 "Decision should not be set yet."); 5291 auto *Group = getInterleavedAccessGroup(I); 5292 assert(Group && "Must have a group."); 5293 5294 // If the instruction's allocated size doesn't equal it's type size, it 5295 // requires padding and will be scalarized. 5296 auto &DL = I->getModule()->getDataLayout(); 5297 auto *ScalarTy = getLoadStoreType(I); 5298 if (hasIrregularType(ScalarTy, DL)) 5299 return false; 5300 5301 // Check if masking is required. 5302 // A Group may need masking for one of two reasons: it resides in a block that 5303 // needs predication, or it was decided to use masking to deal with gaps 5304 // (either a gap at the end of a load-access that may result in a speculative 5305 // load, or any gaps in a store-access). 5306 bool PredicatedAccessRequiresMasking = 5307 blockNeedsPredicationForAnyReason(I->getParent()) && 5308 Legal->isMaskRequired(I); 5309 bool LoadAccessWithGapsRequiresEpilogMasking = 5310 isa<LoadInst>(I) && Group->requiresScalarEpilogue() && 5311 !isScalarEpilogueAllowed(); 5312 bool StoreAccessWithGapsRequiresMasking = 5313 isa<StoreInst>(I) && (Group->getNumMembers() < Group->getFactor()); 5314 if (!PredicatedAccessRequiresMasking && 5315 !LoadAccessWithGapsRequiresEpilogMasking && 5316 !StoreAccessWithGapsRequiresMasking) 5317 return true; 5318 5319 // If masked interleaving is required, we expect that the user/target had 5320 // enabled it, because otherwise it either wouldn't have been created or 5321 // it should have been invalidated by the CostModel. 5322 assert(useMaskedInterleavedAccesses(TTI) && 5323 "Masked interleave-groups for predicated accesses are not enabled."); 5324 5325 if (Group->isReverse()) 5326 return false; 5327 5328 auto *Ty = getLoadStoreType(I); 5329 const Align Alignment = getLoadStoreAlignment(I); 5330 return isa<LoadInst>(I) ? TTI.isLegalMaskedLoad(Ty, Alignment) 5331 : TTI.isLegalMaskedStore(Ty, Alignment); 5332 } 5333 5334 bool LoopVectorizationCostModel::memoryInstructionCanBeWidened( 5335 Instruction *I, ElementCount VF) { 5336 // Get and ensure we have a valid memory instruction. 5337 assert((isa<LoadInst, StoreInst>(I)) && "Invalid memory instruction"); 5338 5339 auto *Ptr = getLoadStorePointerOperand(I); 5340 auto *ScalarTy = getLoadStoreType(I); 5341 5342 // In order to be widened, the pointer should be consecutive, first of all. 5343 if (!Legal->isConsecutivePtr(ScalarTy, Ptr)) 5344 return false; 5345 5346 // If the instruction is a store located in a predicated block, it will be 5347 // scalarized. 5348 if (isScalarWithPredication(I)) 5349 return false; 5350 5351 // If the instruction's allocated size doesn't equal it's type size, it 5352 // requires padding and will be scalarized. 5353 auto &DL = I->getModule()->getDataLayout(); 5354 if (hasIrregularType(ScalarTy, DL)) 5355 return false; 5356 5357 return true; 5358 } 5359 5360 void LoopVectorizationCostModel::collectLoopUniforms(ElementCount VF) { 5361 // We should not collect Uniforms more than once per VF. Right now, 5362 // this function is called from collectUniformsAndScalars(), which 5363 // already does this check. Collecting Uniforms for VF=1 does not make any 5364 // sense. 5365 5366 assert(VF.isVector() && Uniforms.find(VF) == Uniforms.end() && 5367 "This function should not be visited twice for the same VF"); 5368 5369 // Visit the list of Uniforms. If we'll not find any uniform value, we'll 5370 // not analyze again. Uniforms.count(VF) will return 1. 5371 Uniforms[VF].clear(); 5372 5373 // We now know that the loop is vectorizable! 5374 // Collect instructions inside the loop that will remain uniform after 5375 // vectorization. 5376 5377 // Global values, params and instructions outside of current loop are out of 5378 // scope. 5379 auto isOutOfScope = [&](Value *V) -> bool { 5380 Instruction *I = dyn_cast<Instruction>(V); 5381 return (!I || !TheLoop->contains(I)); 5382 }; 5383 5384 // Worklist containing uniform instructions demanding lane 0. 5385 SetVector<Instruction *> Worklist; 5386 BasicBlock *Latch = TheLoop->getLoopLatch(); 5387 5388 // Add uniform instructions demanding lane 0 to the worklist. Instructions 5389 // that are scalar with predication must not be considered uniform after 5390 // vectorization, because that would create an erroneous replicating region 5391 // where only a single instance out of VF should be formed. 5392 // TODO: optimize such seldom cases if found important, see PR40816. 5393 auto addToWorklistIfAllowed = [&](Instruction *I) -> void { 5394 if (isOutOfScope(I)) { 5395 LLVM_DEBUG(dbgs() << "LV: Found not uniform due to scope: " 5396 << *I << "\n"); 5397 return; 5398 } 5399 if (isScalarWithPredication(I)) { 5400 LLVM_DEBUG(dbgs() << "LV: Found not uniform being ScalarWithPredication: " 5401 << *I << "\n"); 5402 return; 5403 } 5404 LLVM_DEBUG(dbgs() << "LV: Found uniform instruction: " << *I << "\n"); 5405 Worklist.insert(I); 5406 }; 5407 5408 // Start with the conditional branch. If the branch condition is an 5409 // instruction contained in the loop that is only used by the branch, it is 5410 // uniform. 5411 auto *Cmp = dyn_cast<Instruction>(Latch->getTerminator()->getOperand(0)); 5412 if (Cmp && TheLoop->contains(Cmp) && Cmp->hasOneUse()) 5413 addToWorklistIfAllowed(Cmp); 5414 5415 auto isUniformDecision = [&](Instruction *I, ElementCount VF) { 5416 InstWidening WideningDecision = getWideningDecision(I, VF); 5417 assert(WideningDecision != CM_Unknown && 5418 "Widening decision should be ready at this moment"); 5419 5420 // A uniform memory op is itself uniform. We exclude uniform stores 5421 // here as they demand the last lane, not the first one. 5422 if (isa<LoadInst>(I) && Legal->isUniformMemOp(*I)) { 5423 assert(WideningDecision == CM_Scalarize); 5424 return true; 5425 } 5426 5427 return (WideningDecision == CM_Widen || 5428 WideningDecision == CM_Widen_Reverse || 5429 WideningDecision == CM_Interleave); 5430 }; 5431 5432 5433 // Returns true if Ptr is the pointer operand of a memory access instruction 5434 // I, and I is known to not require scalarization. 5435 auto isVectorizedMemAccessUse = [&](Instruction *I, Value *Ptr) -> bool { 5436 return getLoadStorePointerOperand(I) == Ptr && isUniformDecision(I, VF); 5437 }; 5438 5439 // Holds a list of values which are known to have at least one uniform use. 5440 // Note that there may be other uses which aren't uniform. A "uniform use" 5441 // here is something which only demands lane 0 of the unrolled iterations; 5442 // it does not imply that all lanes produce the same value (e.g. this is not 5443 // the usual meaning of uniform) 5444 SetVector<Value *> HasUniformUse; 5445 5446 // Scan the loop for instructions which are either a) known to have only 5447 // lane 0 demanded or b) are uses which demand only lane 0 of their operand. 5448 for (auto *BB : TheLoop->blocks()) 5449 for (auto &I : *BB) { 5450 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(&I)) { 5451 switch (II->getIntrinsicID()) { 5452 case Intrinsic::sideeffect: 5453 case Intrinsic::experimental_noalias_scope_decl: 5454 case Intrinsic::assume: 5455 case Intrinsic::lifetime_start: 5456 case Intrinsic::lifetime_end: 5457 if (TheLoop->hasLoopInvariantOperands(&I)) 5458 addToWorklistIfAllowed(&I); 5459 break; 5460 default: 5461 break; 5462 } 5463 } 5464 5465 // ExtractValue instructions must be uniform, because the operands are 5466 // known to be loop-invariant. 5467 if (auto *EVI = dyn_cast<ExtractValueInst>(&I)) { 5468 assert(isOutOfScope(EVI->getAggregateOperand()) && 5469 "Expected aggregate value to be loop invariant"); 5470 addToWorklistIfAllowed(EVI); 5471 continue; 5472 } 5473 5474 // If there's no pointer operand, there's nothing to do. 5475 auto *Ptr = getLoadStorePointerOperand(&I); 5476 if (!Ptr) 5477 continue; 5478 5479 // A uniform memory op is itself uniform. We exclude uniform stores 5480 // here as they demand the last lane, not the first one. 5481 if (isa<LoadInst>(I) && Legal->isUniformMemOp(I)) 5482 addToWorklistIfAllowed(&I); 5483 5484 if (isUniformDecision(&I, VF)) { 5485 assert(isVectorizedMemAccessUse(&I, Ptr) && "consistency check"); 5486 HasUniformUse.insert(Ptr); 5487 } 5488 } 5489 5490 // Add to the worklist any operands which have *only* uniform (e.g. lane 0 5491 // demanding) users. Since loops are assumed to be in LCSSA form, this 5492 // disallows uses outside the loop as well. 5493 for (auto *V : HasUniformUse) { 5494 if (isOutOfScope(V)) 5495 continue; 5496 auto *I = cast<Instruction>(V); 5497 auto UsersAreMemAccesses = 5498 llvm::all_of(I->users(), [&](User *U) -> bool { 5499 return isVectorizedMemAccessUse(cast<Instruction>(U), V); 5500 }); 5501 if (UsersAreMemAccesses) 5502 addToWorklistIfAllowed(I); 5503 } 5504 5505 // Expand Worklist in topological order: whenever a new instruction 5506 // is added , its users should be already inside Worklist. It ensures 5507 // a uniform instruction will only be used by uniform instructions. 5508 unsigned idx = 0; 5509 while (idx != Worklist.size()) { 5510 Instruction *I = Worklist[idx++]; 5511 5512 for (auto OV : I->operand_values()) { 5513 // isOutOfScope operands cannot be uniform instructions. 5514 if (isOutOfScope(OV)) 5515 continue; 5516 // First order recurrence Phi's should typically be considered 5517 // non-uniform. 5518 auto *OP = dyn_cast<PHINode>(OV); 5519 if (OP && Legal->isFirstOrderRecurrence(OP)) 5520 continue; 5521 // If all the users of the operand are uniform, then add the 5522 // operand into the uniform worklist. 5523 auto *OI = cast<Instruction>(OV); 5524 if (llvm::all_of(OI->users(), [&](User *U) -> bool { 5525 auto *J = cast<Instruction>(U); 5526 return Worklist.count(J) || isVectorizedMemAccessUse(J, OI); 5527 })) 5528 addToWorklistIfAllowed(OI); 5529 } 5530 } 5531 5532 // For an instruction to be added into Worklist above, all its users inside 5533 // the loop should also be in Worklist. However, this condition cannot be 5534 // true for phi nodes that form a cyclic dependence. We must process phi 5535 // nodes separately. An induction variable will remain uniform if all users 5536 // of the induction variable and induction variable update remain uniform. 5537 // The code below handles both pointer and non-pointer induction variables. 5538 for (auto &Induction : Legal->getInductionVars()) { 5539 auto *Ind = Induction.first; 5540 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 5541 5542 // Determine if all users of the induction variable are uniform after 5543 // vectorization. 5544 auto UniformInd = llvm::all_of(Ind->users(), [&](User *U) -> bool { 5545 auto *I = cast<Instruction>(U); 5546 return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) || 5547 isVectorizedMemAccessUse(I, Ind); 5548 }); 5549 if (!UniformInd) 5550 continue; 5551 5552 // Determine if all users of the induction variable update instruction are 5553 // uniform after vectorization. 5554 auto UniformIndUpdate = 5555 llvm::all_of(IndUpdate->users(), [&](User *U) -> bool { 5556 auto *I = cast<Instruction>(U); 5557 return I == Ind || !TheLoop->contains(I) || Worklist.count(I) || 5558 isVectorizedMemAccessUse(I, IndUpdate); 5559 }); 5560 if (!UniformIndUpdate) 5561 continue; 5562 5563 // The induction variable and its update instruction will remain uniform. 5564 addToWorklistIfAllowed(Ind); 5565 addToWorklistIfAllowed(IndUpdate); 5566 } 5567 5568 Uniforms[VF].insert(Worklist.begin(), Worklist.end()); 5569 } 5570 5571 bool LoopVectorizationCostModel::runtimeChecksRequired() { 5572 LLVM_DEBUG(dbgs() << "LV: Performing code size checks.\n"); 5573 5574 if (Legal->getRuntimePointerChecking()->Need) { 5575 reportVectorizationFailure("Runtime ptr check is required with -Os/-Oz", 5576 "runtime pointer checks needed. Enable vectorization of this " 5577 "loop with '#pragma clang loop vectorize(enable)' when " 5578 "compiling with -Os/-Oz", 5579 "CantVersionLoopWithOptForSize", ORE, TheLoop); 5580 return true; 5581 } 5582 5583 if (!PSE.getUnionPredicate().getPredicates().empty()) { 5584 reportVectorizationFailure("Runtime SCEV check is required with -Os/-Oz", 5585 "runtime SCEV checks needed. Enable vectorization of this " 5586 "loop with '#pragma clang loop vectorize(enable)' when " 5587 "compiling with -Os/-Oz", 5588 "CantVersionLoopWithOptForSize", ORE, TheLoop); 5589 return true; 5590 } 5591 5592 // FIXME: Avoid specializing for stride==1 instead of bailing out. 5593 if (!Legal->getLAI()->getSymbolicStrides().empty()) { 5594 reportVectorizationFailure("Runtime stride check for small trip count", 5595 "runtime stride == 1 checks needed. Enable vectorization of " 5596 "this loop without such check by compiling with -Os/-Oz", 5597 "CantVersionLoopWithOptForSize", ORE, TheLoop); 5598 return true; 5599 } 5600 5601 return false; 5602 } 5603 5604 ElementCount 5605 LoopVectorizationCostModel::getMaxLegalScalableVF(unsigned MaxSafeElements) { 5606 if (!TTI.supportsScalableVectors() && !ForceTargetSupportsScalableVectors) 5607 return ElementCount::getScalable(0); 5608 5609 if (Hints->isScalableVectorizationDisabled()) { 5610 reportVectorizationInfo("Scalable vectorization is explicitly disabled", 5611 "ScalableVectorizationDisabled", ORE, TheLoop); 5612 return ElementCount::getScalable(0); 5613 } 5614 5615 LLVM_DEBUG(dbgs() << "LV: Scalable vectorization is available\n"); 5616 5617 auto MaxScalableVF = ElementCount::getScalable( 5618 std::numeric_limits<ElementCount::ScalarTy>::max()); 5619 5620 // Test that the loop-vectorizer can legalize all operations for this MaxVF. 5621 // FIXME: While for scalable vectors this is currently sufficient, this should 5622 // be replaced by a more detailed mechanism that filters out specific VFs, 5623 // instead of invalidating vectorization for a whole set of VFs based on the 5624 // MaxVF. 5625 5626 // Disable scalable vectorization if the loop contains unsupported reductions. 5627 if (!canVectorizeReductions(MaxScalableVF)) { 5628 reportVectorizationInfo( 5629 "Scalable vectorization not supported for the reduction " 5630 "operations found in this loop.", 5631 "ScalableVFUnfeasible", ORE, TheLoop); 5632 return ElementCount::getScalable(0); 5633 } 5634 5635 // Disable scalable vectorization if the loop contains any instructions 5636 // with element types not supported for scalable vectors. 5637 if (any_of(ElementTypesInLoop, [&](Type *Ty) { 5638 return !Ty->isVoidTy() && 5639 !this->TTI.isElementTypeLegalForScalableVector(Ty); 5640 })) { 5641 reportVectorizationInfo("Scalable vectorization is not supported " 5642 "for all element types found in this loop.", 5643 "ScalableVFUnfeasible", ORE, TheLoop); 5644 return ElementCount::getScalable(0); 5645 } 5646 5647 if (Legal->isSafeForAnyVectorWidth()) 5648 return MaxScalableVF; 5649 5650 // Limit MaxScalableVF by the maximum safe dependence distance. 5651 Optional<unsigned> MaxVScale = TTI.getMaxVScale(); 5652 if (!MaxVScale && TheFunction->hasFnAttribute(Attribute::VScaleRange)) { 5653 unsigned VScaleMax = TheFunction->getFnAttribute(Attribute::VScaleRange) 5654 .getVScaleRangeArgs() 5655 .second; 5656 if (VScaleMax > 0) 5657 MaxVScale = VScaleMax; 5658 } 5659 MaxScalableVF = ElementCount::getScalable( 5660 MaxVScale ? (MaxSafeElements / MaxVScale.getValue()) : 0); 5661 if (!MaxScalableVF) 5662 reportVectorizationInfo( 5663 "Max legal vector width too small, scalable vectorization " 5664 "unfeasible.", 5665 "ScalableVFUnfeasible", ORE, TheLoop); 5666 5667 return MaxScalableVF; 5668 } 5669 5670 FixedScalableVFPair 5671 LoopVectorizationCostModel::computeFeasibleMaxVF(unsigned ConstTripCount, 5672 ElementCount UserVF) { 5673 MinBWs = computeMinimumValueSizes(TheLoop->getBlocks(), *DB, &TTI); 5674 unsigned SmallestType, WidestType; 5675 std::tie(SmallestType, WidestType) = getSmallestAndWidestTypes(); 5676 5677 // Get the maximum safe dependence distance in bits computed by LAA. 5678 // It is computed by MaxVF * sizeOf(type) * 8, where type is taken from 5679 // the memory accesses that is most restrictive (involved in the smallest 5680 // dependence distance). 5681 unsigned MaxSafeElements = 5682 PowerOf2Floor(Legal->getMaxSafeVectorWidthInBits() / WidestType); 5683 5684 auto MaxSafeFixedVF = ElementCount::getFixed(MaxSafeElements); 5685 auto MaxSafeScalableVF = getMaxLegalScalableVF(MaxSafeElements); 5686 5687 LLVM_DEBUG(dbgs() << "LV: The max safe fixed VF is: " << MaxSafeFixedVF 5688 << ".\n"); 5689 LLVM_DEBUG(dbgs() << "LV: The max safe scalable VF is: " << MaxSafeScalableVF 5690 << ".\n"); 5691 5692 // First analyze the UserVF, fall back if the UserVF should be ignored. 5693 if (UserVF) { 5694 auto MaxSafeUserVF = 5695 UserVF.isScalable() ? MaxSafeScalableVF : MaxSafeFixedVF; 5696 5697 if (ElementCount::isKnownLE(UserVF, MaxSafeUserVF)) { 5698 // If `VF=vscale x N` is safe, then so is `VF=N` 5699 if (UserVF.isScalable()) 5700 return FixedScalableVFPair( 5701 ElementCount::getFixed(UserVF.getKnownMinValue()), UserVF); 5702 else 5703 return UserVF; 5704 } 5705 5706 assert(ElementCount::isKnownGT(UserVF, MaxSafeUserVF)); 5707 5708 // Only clamp if the UserVF is not scalable. If the UserVF is scalable, it 5709 // is better to ignore the hint and let the compiler choose a suitable VF. 5710 if (!UserVF.isScalable()) { 5711 LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF 5712 << " is unsafe, clamping to max safe VF=" 5713 << MaxSafeFixedVF << ".\n"); 5714 ORE->emit([&]() { 5715 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor", 5716 TheLoop->getStartLoc(), 5717 TheLoop->getHeader()) 5718 << "User-specified vectorization factor " 5719 << ore::NV("UserVectorizationFactor", UserVF) 5720 << " is unsafe, clamping to maximum safe vectorization factor " 5721 << ore::NV("VectorizationFactor", MaxSafeFixedVF); 5722 }); 5723 return MaxSafeFixedVF; 5724 } 5725 5726 if (!TTI.supportsScalableVectors() && !ForceTargetSupportsScalableVectors) { 5727 LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF 5728 << " is ignored because scalable vectors are not " 5729 "available.\n"); 5730 ORE->emit([&]() { 5731 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor", 5732 TheLoop->getStartLoc(), 5733 TheLoop->getHeader()) 5734 << "User-specified vectorization factor " 5735 << ore::NV("UserVectorizationFactor", UserVF) 5736 << " is ignored because the target does not support scalable " 5737 "vectors. The compiler will pick a more suitable value."; 5738 }); 5739 } else { 5740 LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF 5741 << " is unsafe. Ignoring scalable UserVF.\n"); 5742 ORE->emit([&]() { 5743 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor", 5744 TheLoop->getStartLoc(), 5745 TheLoop->getHeader()) 5746 << "User-specified vectorization factor " 5747 << ore::NV("UserVectorizationFactor", UserVF) 5748 << " is unsafe. Ignoring the hint to let the compiler pick a " 5749 "more suitable value."; 5750 }); 5751 } 5752 } 5753 5754 LLVM_DEBUG(dbgs() << "LV: The Smallest and Widest types: " << SmallestType 5755 << " / " << WidestType << " bits.\n"); 5756 5757 FixedScalableVFPair Result(ElementCount::getFixed(1), 5758 ElementCount::getScalable(0)); 5759 if (auto MaxVF = getMaximizedVFForTarget(ConstTripCount, SmallestType, 5760 WidestType, MaxSafeFixedVF)) 5761 Result.FixedVF = MaxVF; 5762 5763 if (auto MaxVF = getMaximizedVFForTarget(ConstTripCount, SmallestType, 5764 WidestType, MaxSafeScalableVF)) 5765 if (MaxVF.isScalable()) { 5766 Result.ScalableVF = MaxVF; 5767 LLVM_DEBUG(dbgs() << "LV: Found feasible scalable VF = " << MaxVF 5768 << "\n"); 5769 } 5770 5771 return Result; 5772 } 5773 5774 FixedScalableVFPair 5775 LoopVectorizationCostModel::computeMaxVF(ElementCount UserVF, unsigned UserIC) { 5776 if (Legal->getRuntimePointerChecking()->Need && TTI.hasBranchDivergence()) { 5777 // TODO: It may by useful to do since it's still likely to be dynamically 5778 // uniform if the target can skip. 5779 reportVectorizationFailure( 5780 "Not inserting runtime ptr check for divergent target", 5781 "runtime pointer checks needed. Not enabled for divergent target", 5782 "CantVersionLoopWithDivergentTarget", ORE, TheLoop); 5783 return FixedScalableVFPair::getNone(); 5784 } 5785 5786 unsigned TC = PSE.getSE()->getSmallConstantTripCount(TheLoop); 5787 LLVM_DEBUG(dbgs() << "LV: Found trip count: " << TC << '\n'); 5788 if (TC == 1) { 5789 reportVectorizationFailure("Single iteration (non) loop", 5790 "loop trip count is one, irrelevant for vectorization", 5791 "SingleIterationLoop", ORE, TheLoop); 5792 return FixedScalableVFPair::getNone(); 5793 } 5794 5795 switch (ScalarEpilogueStatus) { 5796 case CM_ScalarEpilogueAllowed: 5797 return computeFeasibleMaxVF(TC, UserVF); 5798 case CM_ScalarEpilogueNotAllowedUsePredicate: 5799 LLVM_FALLTHROUGH; 5800 case CM_ScalarEpilogueNotNeededUsePredicate: 5801 LLVM_DEBUG( 5802 dbgs() << "LV: vector predicate hint/switch found.\n" 5803 << "LV: Not allowing scalar epilogue, creating predicated " 5804 << "vector loop.\n"); 5805 break; 5806 case CM_ScalarEpilogueNotAllowedLowTripLoop: 5807 // fallthrough as a special case of OptForSize 5808 case CM_ScalarEpilogueNotAllowedOptSize: 5809 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedOptSize) 5810 LLVM_DEBUG( 5811 dbgs() << "LV: Not allowing scalar epilogue due to -Os/-Oz.\n"); 5812 else 5813 LLVM_DEBUG(dbgs() << "LV: Not allowing scalar epilogue due to low trip " 5814 << "count.\n"); 5815 5816 // Bail if runtime checks are required, which are not good when optimising 5817 // for size. 5818 if (runtimeChecksRequired()) 5819 return FixedScalableVFPair::getNone(); 5820 5821 break; 5822 } 5823 5824 // The only loops we can vectorize without a scalar epilogue, are loops with 5825 // a bottom-test and a single exiting block. We'd have to handle the fact 5826 // that not every instruction executes on the last iteration. This will 5827 // require a lane mask which varies through the vector loop body. (TODO) 5828 if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch()) { 5829 // If there was a tail-folding hint/switch, but we can't fold the tail by 5830 // masking, fallback to a vectorization with a scalar epilogue. 5831 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotNeededUsePredicate) { 5832 LLVM_DEBUG(dbgs() << "LV: Cannot fold tail by masking: vectorize with a " 5833 "scalar epilogue instead.\n"); 5834 ScalarEpilogueStatus = CM_ScalarEpilogueAllowed; 5835 return computeFeasibleMaxVF(TC, UserVF); 5836 } 5837 return FixedScalableVFPair::getNone(); 5838 } 5839 5840 // Now try the tail folding 5841 5842 // Invalidate interleave groups that require an epilogue if we can't mask 5843 // the interleave-group. 5844 if (!useMaskedInterleavedAccesses(TTI)) { 5845 assert(WideningDecisions.empty() && Uniforms.empty() && Scalars.empty() && 5846 "No decisions should have been taken at this point"); 5847 // Note: There is no need to invalidate any cost modeling decisions here, as 5848 // non where taken so far. 5849 InterleaveInfo.invalidateGroupsRequiringScalarEpilogue(); 5850 } 5851 5852 FixedScalableVFPair MaxFactors = computeFeasibleMaxVF(TC, UserVF); 5853 // Avoid tail folding if the trip count is known to be a multiple of any VF 5854 // we chose. 5855 // FIXME: The condition below pessimises the case for fixed-width vectors, 5856 // when scalable VFs are also candidates for vectorization. 5857 if (MaxFactors.FixedVF.isVector() && !MaxFactors.ScalableVF) { 5858 ElementCount MaxFixedVF = MaxFactors.FixedVF; 5859 assert((UserVF.isNonZero() || isPowerOf2_32(MaxFixedVF.getFixedValue())) && 5860 "MaxFixedVF must be a power of 2"); 5861 unsigned MaxVFtimesIC = UserIC ? MaxFixedVF.getFixedValue() * UserIC 5862 : MaxFixedVF.getFixedValue(); 5863 ScalarEvolution *SE = PSE.getSE(); 5864 const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount(); 5865 const SCEV *ExitCount = SE->getAddExpr( 5866 BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType())); 5867 const SCEV *Rem = SE->getURemExpr( 5868 SE->applyLoopGuards(ExitCount, TheLoop), 5869 SE->getConstant(BackedgeTakenCount->getType(), MaxVFtimesIC)); 5870 if (Rem->isZero()) { 5871 // Accept MaxFixedVF if we do not have a tail. 5872 LLVM_DEBUG(dbgs() << "LV: No tail will remain for any chosen VF.\n"); 5873 return MaxFactors; 5874 } 5875 } 5876 5877 // For scalable vectors, don't use tail folding as this is currently not yet 5878 // supported. The code is likely to have ended up here if the tripcount is 5879 // low, in which case it makes sense not to use scalable vectors. 5880 if (MaxFactors.ScalableVF.isVector()) 5881 MaxFactors.ScalableVF = ElementCount::getScalable(0); 5882 5883 // If we don't know the precise trip count, or if the trip count that we 5884 // found modulo the vectorization factor is not zero, try to fold the tail 5885 // by masking. 5886 // FIXME: look for a smaller MaxVF that does divide TC rather than masking. 5887 if (Legal->prepareToFoldTailByMasking()) { 5888 FoldTailByMasking = true; 5889 return MaxFactors; 5890 } 5891 5892 // If there was a tail-folding hint/switch, but we can't fold the tail by 5893 // masking, fallback to a vectorization with a scalar epilogue. 5894 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotNeededUsePredicate) { 5895 LLVM_DEBUG(dbgs() << "LV: Cannot fold tail by masking: vectorize with a " 5896 "scalar epilogue instead.\n"); 5897 ScalarEpilogueStatus = CM_ScalarEpilogueAllowed; 5898 return MaxFactors; 5899 } 5900 5901 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedUsePredicate) { 5902 LLVM_DEBUG(dbgs() << "LV: Can't fold tail by masking: don't vectorize\n"); 5903 return FixedScalableVFPair::getNone(); 5904 } 5905 5906 if (TC == 0) { 5907 reportVectorizationFailure( 5908 "Unable to calculate the loop count due to complex control flow", 5909 "unable to calculate the loop count due to complex control flow", 5910 "UnknownLoopCountComplexCFG", ORE, TheLoop); 5911 return FixedScalableVFPair::getNone(); 5912 } 5913 5914 reportVectorizationFailure( 5915 "Cannot optimize for size and vectorize at the same time.", 5916 "cannot optimize for size and vectorize at the same time. " 5917 "Enable vectorization of this loop with '#pragma clang loop " 5918 "vectorize(enable)' when compiling with -Os/-Oz", 5919 "NoTailLoopWithOptForSize", ORE, TheLoop); 5920 return FixedScalableVFPair::getNone(); 5921 } 5922 5923 ElementCount LoopVectorizationCostModel::getMaximizedVFForTarget( 5924 unsigned ConstTripCount, unsigned SmallestType, unsigned WidestType, 5925 const ElementCount &MaxSafeVF) { 5926 bool ComputeScalableMaxVF = MaxSafeVF.isScalable(); 5927 TypeSize WidestRegister = TTI.getRegisterBitWidth( 5928 ComputeScalableMaxVF ? TargetTransformInfo::RGK_ScalableVector 5929 : TargetTransformInfo::RGK_FixedWidthVector); 5930 5931 // Convenience function to return the minimum of two ElementCounts. 5932 auto MinVF = [](const ElementCount &LHS, const ElementCount &RHS) { 5933 assert((LHS.isScalable() == RHS.isScalable()) && 5934 "Scalable flags must match"); 5935 return ElementCount::isKnownLT(LHS, RHS) ? LHS : RHS; 5936 }; 5937 5938 // Ensure MaxVF is a power of 2; the dependence distance bound may not be. 5939 // Note that both WidestRegister and WidestType may not be a powers of 2. 5940 auto MaxVectorElementCount = ElementCount::get( 5941 PowerOf2Floor(WidestRegister.getKnownMinSize() / WidestType), 5942 ComputeScalableMaxVF); 5943 MaxVectorElementCount = MinVF(MaxVectorElementCount, MaxSafeVF); 5944 LLVM_DEBUG(dbgs() << "LV: The Widest register safe to use is: " 5945 << (MaxVectorElementCount * WidestType) << " bits.\n"); 5946 5947 if (!MaxVectorElementCount) { 5948 LLVM_DEBUG(dbgs() << "LV: The target has no " 5949 << (ComputeScalableMaxVF ? "scalable" : "fixed") 5950 << " vector registers.\n"); 5951 return ElementCount::getFixed(1); 5952 } 5953 5954 const auto TripCountEC = ElementCount::getFixed(ConstTripCount); 5955 if (ConstTripCount && 5956 ElementCount::isKnownLE(TripCountEC, MaxVectorElementCount) && 5957 isPowerOf2_32(ConstTripCount)) { 5958 // We need to clamp the VF to be the ConstTripCount. There is no point in 5959 // choosing a higher viable VF as done in the loop below. If 5960 // MaxVectorElementCount is scalable, we only fall back on a fixed VF when 5961 // the TC is less than or equal to the known number of lanes. 5962 LLVM_DEBUG(dbgs() << "LV: Clamping the MaxVF to the constant trip count: " 5963 << ConstTripCount << "\n"); 5964 return TripCountEC; 5965 } 5966 5967 ElementCount MaxVF = MaxVectorElementCount; 5968 if (TTI.shouldMaximizeVectorBandwidth() || 5969 (MaximizeBandwidth && isScalarEpilogueAllowed())) { 5970 auto MaxVectorElementCountMaxBW = ElementCount::get( 5971 PowerOf2Floor(WidestRegister.getKnownMinSize() / SmallestType), 5972 ComputeScalableMaxVF); 5973 MaxVectorElementCountMaxBW = MinVF(MaxVectorElementCountMaxBW, MaxSafeVF); 5974 5975 // Collect all viable vectorization factors larger than the default MaxVF 5976 // (i.e. MaxVectorElementCount). 5977 SmallVector<ElementCount, 8> VFs; 5978 for (ElementCount VS = MaxVectorElementCount * 2; 5979 ElementCount::isKnownLE(VS, MaxVectorElementCountMaxBW); VS *= 2) 5980 VFs.push_back(VS); 5981 5982 // For each VF calculate its register usage. 5983 auto RUs = calculateRegisterUsage(VFs); 5984 5985 // Select the largest VF which doesn't require more registers than existing 5986 // ones. 5987 for (int i = RUs.size() - 1; i >= 0; --i) { 5988 bool Selected = true; 5989 for (auto &pair : RUs[i].MaxLocalUsers) { 5990 unsigned TargetNumRegisters = TTI.getNumberOfRegisters(pair.first); 5991 if (pair.second > TargetNumRegisters) 5992 Selected = false; 5993 } 5994 if (Selected) { 5995 MaxVF = VFs[i]; 5996 break; 5997 } 5998 } 5999 if (ElementCount MinVF = 6000 TTI.getMinimumVF(SmallestType, ComputeScalableMaxVF)) { 6001 if (ElementCount::isKnownLT(MaxVF, MinVF)) { 6002 LLVM_DEBUG(dbgs() << "LV: Overriding calculated MaxVF(" << MaxVF 6003 << ") with target's minimum: " << MinVF << '\n'); 6004 MaxVF = MinVF; 6005 } 6006 } 6007 } 6008 return MaxVF; 6009 } 6010 6011 bool LoopVectorizationCostModel::isMoreProfitable( 6012 const VectorizationFactor &A, const VectorizationFactor &B) const { 6013 InstructionCost CostA = A.Cost; 6014 InstructionCost CostB = B.Cost; 6015 6016 unsigned MaxTripCount = PSE.getSE()->getSmallConstantMaxTripCount(TheLoop); 6017 6018 if (!A.Width.isScalable() && !B.Width.isScalable() && FoldTailByMasking && 6019 MaxTripCount) { 6020 // If we are folding the tail and the trip count is a known (possibly small) 6021 // constant, the trip count will be rounded up to an integer number of 6022 // iterations. The total cost will be PerIterationCost*ceil(TripCount/VF), 6023 // which we compare directly. When not folding the tail, the total cost will 6024 // be PerIterationCost*floor(TC/VF) + Scalar remainder cost, and so is 6025 // approximated with the per-lane cost below instead of using the tripcount 6026 // as here. 6027 auto RTCostA = CostA * divideCeil(MaxTripCount, A.Width.getFixedValue()); 6028 auto RTCostB = CostB * divideCeil(MaxTripCount, B.Width.getFixedValue()); 6029 return RTCostA < RTCostB; 6030 } 6031 6032 // Improve estimate for the vector width if it is scalable. 6033 unsigned EstimatedWidthA = A.Width.getKnownMinValue(); 6034 unsigned EstimatedWidthB = B.Width.getKnownMinValue(); 6035 if (Optional<unsigned> VScale = TTI.getVScaleForTuning()) { 6036 if (A.Width.isScalable()) 6037 EstimatedWidthA *= VScale.getValue(); 6038 if (B.Width.isScalable()) 6039 EstimatedWidthB *= VScale.getValue(); 6040 } 6041 6042 // When set to preferred, for now assume vscale may be larger than 1 (or the 6043 // one being tuned for), so that scalable vectorization is slightly favorable 6044 // over fixed-width vectorization. 6045 if (Hints->isScalableVectorizationPreferred()) 6046 if (A.Width.isScalable() && !B.Width.isScalable()) 6047 return (CostA * B.Width.getFixedValue()) <= (CostB * EstimatedWidthA); 6048 6049 // To avoid the need for FP division: 6050 // (CostA / A.Width) < (CostB / B.Width) 6051 // <=> (CostA * B.Width) < (CostB * A.Width) 6052 return (CostA * EstimatedWidthB) < (CostB * EstimatedWidthA); 6053 } 6054 6055 VectorizationFactor LoopVectorizationCostModel::selectVectorizationFactor( 6056 const ElementCountSet &VFCandidates) { 6057 InstructionCost ExpectedCost = expectedCost(ElementCount::getFixed(1)).first; 6058 LLVM_DEBUG(dbgs() << "LV: Scalar loop costs: " << ExpectedCost << ".\n"); 6059 assert(ExpectedCost.isValid() && "Unexpected invalid cost for scalar loop"); 6060 assert(VFCandidates.count(ElementCount::getFixed(1)) && 6061 "Expected Scalar VF to be a candidate"); 6062 6063 const VectorizationFactor ScalarCost(ElementCount::getFixed(1), ExpectedCost); 6064 VectorizationFactor ChosenFactor = ScalarCost; 6065 6066 bool ForceVectorization = Hints->getForce() == LoopVectorizeHints::FK_Enabled; 6067 if (ForceVectorization && VFCandidates.size() > 1) { 6068 // Ignore scalar width, because the user explicitly wants vectorization. 6069 // Initialize cost to max so that VF = 2 is, at least, chosen during cost 6070 // evaluation. 6071 ChosenFactor.Cost = InstructionCost::getMax(); 6072 } 6073 6074 SmallVector<InstructionVFPair> InvalidCosts; 6075 for (const auto &i : VFCandidates) { 6076 // The cost for scalar VF=1 is already calculated, so ignore it. 6077 if (i.isScalar()) 6078 continue; 6079 6080 VectorizationCostTy C = expectedCost(i, &InvalidCosts); 6081 VectorizationFactor Candidate(i, C.first); 6082 6083 #ifndef NDEBUG 6084 unsigned AssumedMinimumVscale = 1; 6085 if (Optional<unsigned> VScale = TTI.getVScaleForTuning()) 6086 AssumedMinimumVscale = VScale.getValue(); 6087 unsigned Width = 6088 Candidate.Width.isScalable() 6089 ? Candidate.Width.getKnownMinValue() * AssumedMinimumVscale 6090 : Candidate.Width.getFixedValue(); 6091 LLVM_DEBUG(dbgs() << "LV: Vector loop of width " << i 6092 << " costs: " << (Candidate.Cost / Width)); 6093 if (i.isScalable()) 6094 LLVM_DEBUG(dbgs() << " (assuming a minimum vscale of " 6095 << AssumedMinimumVscale << ")"); 6096 LLVM_DEBUG(dbgs() << ".\n"); 6097 #endif 6098 6099 if (!C.second && !ForceVectorization) { 6100 LLVM_DEBUG( 6101 dbgs() << "LV: Not considering vector loop of width " << i 6102 << " because it will not generate any vector instructions.\n"); 6103 continue; 6104 } 6105 6106 // If profitable add it to ProfitableVF list. 6107 if (isMoreProfitable(Candidate, ScalarCost)) 6108 ProfitableVFs.push_back(Candidate); 6109 6110 if (isMoreProfitable(Candidate, ChosenFactor)) 6111 ChosenFactor = Candidate; 6112 } 6113 6114 // Emit a report of VFs with invalid costs in the loop. 6115 if (!InvalidCosts.empty()) { 6116 // Group the remarks per instruction, keeping the instruction order from 6117 // InvalidCosts. 6118 std::map<Instruction *, unsigned> Numbering; 6119 unsigned I = 0; 6120 for (auto &Pair : InvalidCosts) 6121 if (!Numbering.count(Pair.first)) 6122 Numbering[Pair.first] = I++; 6123 6124 // Sort the list, first on instruction(number) then on VF. 6125 llvm::sort(InvalidCosts, 6126 [&Numbering](InstructionVFPair &A, InstructionVFPair &B) { 6127 if (Numbering[A.first] != Numbering[B.first]) 6128 return Numbering[A.first] < Numbering[B.first]; 6129 ElementCountComparator ECC; 6130 return ECC(A.second, B.second); 6131 }); 6132 6133 // For a list of ordered instruction-vf pairs: 6134 // [(load, vf1), (load, vf2), (store, vf1)] 6135 // Group the instructions together to emit separate remarks for: 6136 // load (vf1, vf2) 6137 // store (vf1) 6138 auto Tail = ArrayRef<InstructionVFPair>(InvalidCosts); 6139 auto Subset = ArrayRef<InstructionVFPair>(); 6140 do { 6141 if (Subset.empty()) 6142 Subset = Tail.take_front(1); 6143 6144 Instruction *I = Subset.front().first; 6145 6146 // If the next instruction is different, or if there are no other pairs, 6147 // emit a remark for the collated subset. e.g. 6148 // [(load, vf1), (load, vf2))] 6149 // to emit: 6150 // remark: invalid costs for 'load' at VF=(vf, vf2) 6151 if (Subset == Tail || Tail[Subset.size()].first != I) { 6152 std::string OutString; 6153 raw_string_ostream OS(OutString); 6154 assert(!Subset.empty() && "Unexpected empty range"); 6155 OS << "Instruction with invalid costs prevented vectorization at VF=("; 6156 for (auto &Pair : Subset) 6157 OS << (Pair.second == Subset.front().second ? "" : ", ") 6158 << Pair.second; 6159 OS << "):"; 6160 if (auto *CI = dyn_cast<CallInst>(I)) 6161 OS << " call to " << CI->getCalledFunction()->getName(); 6162 else 6163 OS << " " << I->getOpcodeName(); 6164 OS.flush(); 6165 reportVectorizationInfo(OutString, "InvalidCost", ORE, TheLoop, I); 6166 Tail = Tail.drop_front(Subset.size()); 6167 Subset = {}; 6168 } else 6169 // Grow the subset by one element 6170 Subset = Tail.take_front(Subset.size() + 1); 6171 } while (!Tail.empty()); 6172 } 6173 6174 if (!EnableCondStoresVectorization && NumPredStores) { 6175 reportVectorizationFailure("There are conditional stores.", 6176 "store that is conditionally executed prevents vectorization", 6177 "ConditionalStore", ORE, TheLoop); 6178 ChosenFactor = ScalarCost; 6179 } 6180 6181 LLVM_DEBUG(if (ForceVectorization && !ChosenFactor.Width.isScalar() && 6182 ChosenFactor.Cost >= ScalarCost.Cost) dbgs() 6183 << "LV: Vectorization seems to be not beneficial, " 6184 << "but was forced by a user.\n"); 6185 LLVM_DEBUG(dbgs() << "LV: Selecting VF: " << ChosenFactor.Width << ".\n"); 6186 return ChosenFactor; 6187 } 6188 6189 bool LoopVectorizationCostModel::isCandidateForEpilogueVectorization( 6190 const Loop &L, ElementCount VF) const { 6191 // Cross iteration phis such as reductions need special handling and are 6192 // currently unsupported. 6193 if (any_of(L.getHeader()->phis(), [&](PHINode &Phi) { 6194 return Legal->isFirstOrderRecurrence(&Phi) || 6195 Legal->isReductionVariable(&Phi); 6196 })) 6197 return false; 6198 6199 // Phis with uses outside of the loop require special handling and are 6200 // currently unsupported. 6201 for (auto &Entry : Legal->getInductionVars()) { 6202 // Look for uses of the value of the induction at the last iteration. 6203 Value *PostInc = Entry.first->getIncomingValueForBlock(L.getLoopLatch()); 6204 for (User *U : PostInc->users()) 6205 if (!L.contains(cast<Instruction>(U))) 6206 return false; 6207 // Look for uses of penultimate value of the induction. 6208 for (User *U : Entry.first->users()) 6209 if (!L.contains(cast<Instruction>(U))) 6210 return false; 6211 } 6212 6213 // Induction variables that are widened require special handling that is 6214 // currently not supported. 6215 if (any_of(Legal->getInductionVars(), [&](auto &Entry) { 6216 return !(this->isScalarAfterVectorization(Entry.first, VF) || 6217 this->isProfitableToScalarize(Entry.first, VF)); 6218 })) 6219 return false; 6220 6221 // Epilogue vectorization code has not been auditted to ensure it handles 6222 // non-latch exits properly. It may be fine, but it needs auditted and 6223 // tested. 6224 if (L.getExitingBlock() != L.getLoopLatch()) 6225 return false; 6226 6227 return true; 6228 } 6229 6230 bool LoopVectorizationCostModel::isEpilogueVectorizationProfitable( 6231 const ElementCount VF) const { 6232 // FIXME: We need a much better cost-model to take different parameters such 6233 // as register pressure, code size increase and cost of extra branches into 6234 // account. For now we apply a very crude heuristic and only consider loops 6235 // with vectorization factors larger than a certain value. 6236 // We also consider epilogue vectorization unprofitable for targets that don't 6237 // consider interleaving beneficial (eg. MVE). 6238 if (TTI.getMaxInterleaveFactor(VF.getKnownMinValue()) <= 1) 6239 return false; 6240 if (VF.getFixedValue() >= EpilogueVectorizationMinVF) 6241 return true; 6242 return false; 6243 } 6244 6245 VectorizationFactor 6246 LoopVectorizationCostModel::selectEpilogueVectorizationFactor( 6247 const ElementCount MainLoopVF, const LoopVectorizationPlanner &LVP) { 6248 VectorizationFactor Result = VectorizationFactor::Disabled(); 6249 if (!EnableEpilogueVectorization) { 6250 LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization is disabled.\n";); 6251 return Result; 6252 } 6253 6254 if (!isScalarEpilogueAllowed()) { 6255 LLVM_DEBUG( 6256 dbgs() << "LEV: Unable to vectorize epilogue because no epilogue is " 6257 "allowed.\n";); 6258 return Result; 6259 } 6260 6261 // Not really a cost consideration, but check for unsupported cases here to 6262 // simplify the logic. 6263 if (!isCandidateForEpilogueVectorization(*TheLoop, MainLoopVF)) { 6264 LLVM_DEBUG( 6265 dbgs() << "LEV: Unable to vectorize epilogue because the loop is " 6266 "not a supported candidate.\n";); 6267 return Result; 6268 } 6269 6270 if (EpilogueVectorizationForceVF > 1) { 6271 LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization factor is forced.\n";); 6272 ElementCount ForcedEC = ElementCount::getFixed(EpilogueVectorizationForceVF); 6273 if (LVP.hasPlanWithVF(ForcedEC)) 6274 return {ForcedEC, 0}; 6275 else { 6276 LLVM_DEBUG( 6277 dbgs() 6278 << "LEV: Epilogue vectorization forced factor is not viable.\n";); 6279 return Result; 6280 } 6281 } 6282 6283 if (TheLoop->getHeader()->getParent()->hasOptSize() || 6284 TheLoop->getHeader()->getParent()->hasMinSize()) { 6285 LLVM_DEBUG( 6286 dbgs() 6287 << "LEV: Epilogue vectorization skipped due to opt for size.\n";); 6288 return Result; 6289 } 6290 6291 auto FixedMainLoopVF = ElementCount::getFixed(MainLoopVF.getKnownMinValue()); 6292 if (MainLoopVF.isScalable()) 6293 LLVM_DEBUG( 6294 dbgs() << "LEV: Epilogue vectorization using scalable vectors not " 6295 "yet supported. Converting to fixed-width (VF=" 6296 << FixedMainLoopVF << ") instead\n"); 6297 6298 if (!isEpilogueVectorizationProfitable(FixedMainLoopVF)) { 6299 LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization is not profitable for " 6300 "this loop\n"); 6301 return Result; 6302 } 6303 6304 for (auto &NextVF : ProfitableVFs) 6305 if (ElementCount::isKnownLT(NextVF.Width, FixedMainLoopVF) && 6306 (Result.Width.getFixedValue() == 1 || 6307 isMoreProfitable(NextVF, Result)) && 6308 LVP.hasPlanWithVF(NextVF.Width)) 6309 Result = NextVF; 6310 6311 if (Result != VectorizationFactor::Disabled()) 6312 LLVM_DEBUG(dbgs() << "LEV: Vectorizing epilogue loop with VF = " 6313 << Result.Width.getFixedValue() << "\n";); 6314 return Result; 6315 } 6316 6317 std::pair<unsigned, unsigned> 6318 LoopVectorizationCostModel::getSmallestAndWidestTypes() { 6319 unsigned MinWidth = -1U; 6320 unsigned MaxWidth = 8; 6321 const DataLayout &DL = TheFunction->getParent()->getDataLayout(); 6322 for (Type *T : ElementTypesInLoop) { 6323 MinWidth = std::min<unsigned>( 6324 MinWidth, DL.getTypeSizeInBits(T->getScalarType()).getFixedSize()); 6325 MaxWidth = std::max<unsigned>( 6326 MaxWidth, DL.getTypeSizeInBits(T->getScalarType()).getFixedSize()); 6327 } 6328 return {MinWidth, MaxWidth}; 6329 } 6330 6331 void LoopVectorizationCostModel::collectElementTypesForWidening() { 6332 ElementTypesInLoop.clear(); 6333 // For each block. 6334 for (BasicBlock *BB : TheLoop->blocks()) { 6335 // For each instruction in the loop. 6336 for (Instruction &I : BB->instructionsWithoutDebug()) { 6337 Type *T = I.getType(); 6338 6339 // Skip ignored values. 6340 if (ValuesToIgnore.count(&I)) 6341 continue; 6342 6343 // Only examine Loads, Stores and PHINodes. 6344 if (!isa<LoadInst>(I) && !isa<StoreInst>(I) && !isa<PHINode>(I)) 6345 continue; 6346 6347 // Examine PHI nodes that are reduction variables. Update the type to 6348 // account for the recurrence type. 6349 if (auto *PN = dyn_cast<PHINode>(&I)) { 6350 if (!Legal->isReductionVariable(PN)) 6351 continue; 6352 const RecurrenceDescriptor &RdxDesc = Legal->getReductionVars()[PN]; 6353 if (PreferInLoopReductions || useOrderedReductions(RdxDesc) || 6354 TTI.preferInLoopReduction(RdxDesc.getOpcode(), 6355 RdxDesc.getRecurrenceType(), 6356 TargetTransformInfo::ReductionFlags())) 6357 continue; 6358 T = RdxDesc.getRecurrenceType(); 6359 } 6360 6361 // Examine the stored values. 6362 if (auto *ST = dyn_cast<StoreInst>(&I)) 6363 T = ST->getValueOperand()->getType(); 6364 6365 // Ignore loaded pointer types and stored pointer types that are not 6366 // vectorizable. 6367 // 6368 // FIXME: The check here attempts to predict whether a load or store will 6369 // be vectorized. We only know this for certain after a VF has 6370 // been selected. Here, we assume that if an access can be 6371 // vectorized, it will be. We should also look at extending this 6372 // optimization to non-pointer types. 6373 // 6374 if (T->isPointerTy() && !isConsecutiveLoadOrStore(&I) && 6375 !isAccessInterleaved(&I) && !isLegalGatherOrScatter(&I)) 6376 continue; 6377 6378 ElementTypesInLoop.insert(T); 6379 } 6380 } 6381 } 6382 6383 unsigned LoopVectorizationCostModel::selectInterleaveCount(ElementCount VF, 6384 unsigned LoopCost) { 6385 // -- The interleave heuristics -- 6386 // We interleave the loop in order to expose ILP and reduce the loop overhead. 6387 // There are many micro-architectural considerations that we can't predict 6388 // at this level. For example, frontend pressure (on decode or fetch) due to 6389 // code size, or the number and capabilities of the execution ports. 6390 // 6391 // We use the following heuristics to select the interleave count: 6392 // 1. If the code has reductions, then we interleave to break the cross 6393 // iteration dependency. 6394 // 2. If the loop is really small, then we interleave to reduce the loop 6395 // overhead. 6396 // 3. We don't interleave if we think that we will spill registers to memory 6397 // due to the increased register pressure. 6398 6399 if (!isScalarEpilogueAllowed()) 6400 return 1; 6401 6402 // We used the distance for the interleave count. 6403 if (Legal->getMaxSafeDepDistBytes() != -1U) 6404 return 1; 6405 6406 auto BestKnownTC = getSmallBestKnownTC(*PSE.getSE(), TheLoop); 6407 const bool HasReductions = !Legal->getReductionVars().empty(); 6408 // Do not interleave loops with a relatively small known or estimated trip 6409 // count. But we will interleave when InterleaveSmallLoopScalarReduction is 6410 // enabled, and the code has scalar reductions(HasReductions && VF = 1), 6411 // because with the above conditions interleaving can expose ILP and break 6412 // cross iteration dependences for reductions. 6413 if (BestKnownTC && (*BestKnownTC < TinyTripCountInterleaveThreshold) && 6414 !(InterleaveSmallLoopScalarReduction && HasReductions && VF.isScalar())) 6415 return 1; 6416 6417 RegisterUsage R = calculateRegisterUsage({VF})[0]; 6418 // We divide by these constants so assume that we have at least one 6419 // instruction that uses at least one register. 6420 for (auto& pair : R.MaxLocalUsers) { 6421 pair.second = std::max(pair.second, 1U); 6422 } 6423 6424 // We calculate the interleave count using the following formula. 6425 // Subtract the number of loop invariants from the number of available 6426 // registers. These registers are used by all of the interleaved instances. 6427 // Next, divide the remaining registers by the number of registers that is 6428 // required by the loop, in order to estimate how many parallel instances 6429 // fit without causing spills. All of this is rounded down if necessary to be 6430 // a power of two. We want power of two interleave count to simplify any 6431 // addressing operations or alignment considerations. 6432 // We also want power of two interleave counts to ensure that the induction 6433 // variable of the vector loop wraps to zero, when tail is folded by masking; 6434 // this currently happens when OptForSize, in which case IC is set to 1 above. 6435 unsigned IC = UINT_MAX; 6436 6437 for (auto& pair : R.MaxLocalUsers) { 6438 unsigned TargetNumRegisters = TTI.getNumberOfRegisters(pair.first); 6439 LLVM_DEBUG(dbgs() << "LV: The target has " << TargetNumRegisters 6440 << " registers of " 6441 << TTI.getRegisterClassName(pair.first) << " register class\n"); 6442 if (VF.isScalar()) { 6443 if (ForceTargetNumScalarRegs.getNumOccurrences() > 0) 6444 TargetNumRegisters = ForceTargetNumScalarRegs; 6445 } else { 6446 if (ForceTargetNumVectorRegs.getNumOccurrences() > 0) 6447 TargetNumRegisters = ForceTargetNumVectorRegs; 6448 } 6449 unsigned MaxLocalUsers = pair.second; 6450 unsigned LoopInvariantRegs = 0; 6451 if (R.LoopInvariantRegs.find(pair.first) != R.LoopInvariantRegs.end()) 6452 LoopInvariantRegs = R.LoopInvariantRegs[pair.first]; 6453 6454 unsigned TmpIC = PowerOf2Floor((TargetNumRegisters - LoopInvariantRegs) / MaxLocalUsers); 6455 // Don't count the induction variable as interleaved. 6456 if (EnableIndVarRegisterHeur) { 6457 TmpIC = 6458 PowerOf2Floor((TargetNumRegisters - LoopInvariantRegs - 1) / 6459 std::max(1U, (MaxLocalUsers - 1))); 6460 } 6461 6462 IC = std::min(IC, TmpIC); 6463 } 6464 6465 // Clamp the interleave ranges to reasonable counts. 6466 unsigned MaxInterleaveCount = 6467 TTI.getMaxInterleaveFactor(VF.getKnownMinValue()); 6468 6469 // Check if the user has overridden the max. 6470 if (VF.isScalar()) { 6471 if (ForceTargetMaxScalarInterleaveFactor.getNumOccurrences() > 0) 6472 MaxInterleaveCount = ForceTargetMaxScalarInterleaveFactor; 6473 } else { 6474 if (ForceTargetMaxVectorInterleaveFactor.getNumOccurrences() > 0) 6475 MaxInterleaveCount = ForceTargetMaxVectorInterleaveFactor; 6476 } 6477 6478 // If trip count is known or estimated compile time constant, limit the 6479 // interleave count to be less than the trip count divided by VF, provided it 6480 // is at least 1. 6481 // 6482 // For scalable vectors we can't know if interleaving is beneficial. It may 6483 // not be beneficial for small loops if none of the lanes in the second vector 6484 // iterations is enabled. However, for larger loops, there is likely to be a 6485 // similar benefit as for fixed-width vectors. For now, we choose to leave 6486 // the InterleaveCount as if vscale is '1', although if some information about 6487 // the vector is known (e.g. min vector size), we can make a better decision. 6488 if (BestKnownTC) { 6489 MaxInterleaveCount = 6490 std::min(*BestKnownTC / VF.getKnownMinValue(), MaxInterleaveCount); 6491 // Make sure MaxInterleaveCount is greater than 0. 6492 MaxInterleaveCount = std::max(1u, MaxInterleaveCount); 6493 } 6494 6495 assert(MaxInterleaveCount > 0 && 6496 "Maximum interleave count must be greater than 0"); 6497 6498 // Clamp the calculated IC to be between the 1 and the max interleave count 6499 // that the target and trip count allows. 6500 if (IC > MaxInterleaveCount) 6501 IC = MaxInterleaveCount; 6502 else 6503 // Make sure IC is greater than 0. 6504 IC = std::max(1u, IC); 6505 6506 assert(IC > 0 && "Interleave count must be greater than 0."); 6507 6508 // If we did not calculate the cost for VF (because the user selected the VF) 6509 // then we calculate the cost of VF here. 6510 if (LoopCost == 0) { 6511 InstructionCost C = expectedCost(VF).first; 6512 assert(C.isValid() && "Expected to have chosen a VF with valid cost"); 6513 LoopCost = *C.getValue(); 6514 } 6515 6516 assert(LoopCost && "Non-zero loop cost expected"); 6517 6518 // Interleave if we vectorized this loop and there is a reduction that could 6519 // benefit from interleaving. 6520 if (VF.isVector() && HasReductions) { 6521 LLVM_DEBUG(dbgs() << "LV: Interleaving because of reductions.\n"); 6522 return IC; 6523 } 6524 6525 // Note that if we've already vectorized the loop we will have done the 6526 // runtime check and so interleaving won't require further checks. 6527 bool InterleavingRequiresRuntimePointerCheck = 6528 (VF.isScalar() && Legal->getRuntimePointerChecking()->Need); 6529 6530 // We want to interleave small loops in order to reduce the loop overhead and 6531 // potentially expose ILP opportunities. 6532 LLVM_DEBUG(dbgs() << "LV: Loop cost is " << LoopCost << '\n' 6533 << "LV: IC is " << IC << '\n' 6534 << "LV: VF is " << VF << '\n'); 6535 const bool AggressivelyInterleaveReductions = 6536 TTI.enableAggressiveInterleaving(HasReductions); 6537 if (!InterleavingRequiresRuntimePointerCheck && LoopCost < SmallLoopCost) { 6538 // We assume that the cost overhead is 1 and we use the cost model 6539 // to estimate the cost of the loop and interleave until the cost of the 6540 // loop overhead is about 5% of the cost of the loop. 6541 unsigned SmallIC = 6542 std::min(IC, (unsigned)PowerOf2Floor(SmallLoopCost / LoopCost)); 6543 6544 // Interleave until store/load ports (estimated by max interleave count) are 6545 // saturated. 6546 unsigned NumStores = Legal->getNumStores(); 6547 unsigned NumLoads = Legal->getNumLoads(); 6548 unsigned StoresIC = IC / (NumStores ? NumStores : 1); 6549 unsigned LoadsIC = IC / (NumLoads ? NumLoads : 1); 6550 6551 // There is little point in interleaving for reductions containing selects 6552 // and compares when VF=1 since it may just create more overhead than it's 6553 // worth for loops with small trip counts. This is because we still have to 6554 // do the final reduction after the loop. 6555 bool HasSelectCmpReductions = 6556 HasReductions && 6557 any_of(Legal->getReductionVars(), [&](auto &Reduction) -> bool { 6558 const RecurrenceDescriptor &RdxDesc = Reduction.second; 6559 return RecurrenceDescriptor::isSelectCmpRecurrenceKind( 6560 RdxDesc.getRecurrenceKind()); 6561 }); 6562 if (HasSelectCmpReductions) { 6563 LLVM_DEBUG(dbgs() << "LV: Not interleaving select-cmp reductions.\n"); 6564 return 1; 6565 } 6566 6567 // If we have a scalar reduction (vector reductions are already dealt with 6568 // by this point), we can increase the critical path length if the loop 6569 // we're interleaving is inside another loop. For tree-wise reductions 6570 // set the limit to 2, and for ordered reductions it's best to disable 6571 // interleaving entirely. 6572 if (HasReductions && TheLoop->getLoopDepth() > 1) { 6573 bool HasOrderedReductions = 6574 any_of(Legal->getReductionVars(), [&](auto &Reduction) -> bool { 6575 const RecurrenceDescriptor &RdxDesc = Reduction.second; 6576 return RdxDesc.isOrdered(); 6577 }); 6578 if (HasOrderedReductions) { 6579 LLVM_DEBUG( 6580 dbgs() << "LV: Not interleaving scalar ordered reductions.\n"); 6581 return 1; 6582 } 6583 6584 unsigned F = static_cast<unsigned>(MaxNestedScalarReductionIC); 6585 SmallIC = std::min(SmallIC, F); 6586 StoresIC = std::min(StoresIC, F); 6587 LoadsIC = std::min(LoadsIC, F); 6588 } 6589 6590 if (EnableLoadStoreRuntimeInterleave && 6591 std::max(StoresIC, LoadsIC) > SmallIC) { 6592 LLVM_DEBUG( 6593 dbgs() << "LV: Interleaving to saturate store or load ports.\n"); 6594 return std::max(StoresIC, LoadsIC); 6595 } 6596 6597 // If there are scalar reductions and TTI has enabled aggressive 6598 // interleaving for reductions, we will interleave to expose ILP. 6599 if (InterleaveSmallLoopScalarReduction && VF.isScalar() && 6600 AggressivelyInterleaveReductions) { 6601 LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n"); 6602 // Interleave no less than SmallIC but not as aggressive as the normal IC 6603 // to satisfy the rare situation when resources are too limited. 6604 return std::max(IC / 2, SmallIC); 6605 } else { 6606 LLVM_DEBUG(dbgs() << "LV: Interleaving to reduce branch cost.\n"); 6607 return SmallIC; 6608 } 6609 } 6610 6611 // Interleave if this is a large loop (small loops are already dealt with by 6612 // this point) that could benefit from interleaving. 6613 if (AggressivelyInterleaveReductions) { 6614 LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n"); 6615 return IC; 6616 } 6617 6618 LLVM_DEBUG(dbgs() << "LV: Not Interleaving.\n"); 6619 return 1; 6620 } 6621 6622 SmallVector<LoopVectorizationCostModel::RegisterUsage, 8> 6623 LoopVectorizationCostModel::calculateRegisterUsage(ArrayRef<ElementCount> VFs) { 6624 // This function calculates the register usage by measuring the highest number 6625 // of values that are alive at a single location. Obviously, this is a very 6626 // rough estimation. We scan the loop in a topological order in order and 6627 // assign a number to each instruction. We use RPO to ensure that defs are 6628 // met before their users. We assume that each instruction that has in-loop 6629 // users starts an interval. We record every time that an in-loop value is 6630 // used, so we have a list of the first and last occurrences of each 6631 // instruction. Next, we transpose this data structure into a multi map that 6632 // holds the list of intervals that *end* at a specific location. This multi 6633 // map allows us to perform a linear search. We scan the instructions linearly 6634 // and record each time that a new interval starts, by placing it in a set. 6635 // If we find this value in the multi-map then we remove it from the set. 6636 // The max register usage is the maximum size of the set. 6637 // We also search for instructions that are defined outside the loop, but are 6638 // used inside the loop. We need this number separately from the max-interval 6639 // usage number because when we unroll, loop-invariant values do not take 6640 // more register. 6641 LoopBlocksDFS DFS(TheLoop); 6642 DFS.perform(LI); 6643 6644 RegisterUsage RU; 6645 6646 // Each 'key' in the map opens a new interval. The values 6647 // of the map are the index of the 'last seen' usage of the 6648 // instruction that is the key. 6649 using IntervalMap = DenseMap<Instruction *, unsigned>; 6650 6651 // Maps instruction to its index. 6652 SmallVector<Instruction *, 64> IdxToInstr; 6653 // Marks the end of each interval. 6654 IntervalMap EndPoint; 6655 // Saves the list of instruction indices that are used in the loop. 6656 SmallPtrSet<Instruction *, 8> Ends; 6657 // Saves the list of values that are used in the loop but are 6658 // defined outside the loop, such as arguments and constants. 6659 SmallPtrSet<Value *, 8> LoopInvariants; 6660 6661 for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) { 6662 for (Instruction &I : BB->instructionsWithoutDebug()) { 6663 IdxToInstr.push_back(&I); 6664 6665 // Save the end location of each USE. 6666 for (Value *U : I.operands()) { 6667 auto *Instr = dyn_cast<Instruction>(U); 6668 6669 // Ignore non-instruction values such as arguments, constants, etc. 6670 if (!Instr) 6671 continue; 6672 6673 // If this instruction is outside the loop then record it and continue. 6674 if (!TheLoop->contains(Instr)) { 6675 LoopInvariants.insert(Instr); 6676 continue; 6677 } 6678 6679 // Overwrite previous end points. 6680 EndPoint[Instr] = IdxToInstr.size(); 6681 Ends.insert(Instr); 6682 } 6683 } 6684 } 6685 6686 // Saves the list of intervals that end with the index in 'key'. 6687 using InstrList = SmallVector<Instruction *, 2>; 6688 DenseMap<unsigned, InstrList> TransposeEnds; 6689 6690 // Transpose the EndPoints to a list of values that end at each index. 6691 for (auto &Interval : EndPoint) 6692 TransposeEnds[Interval.second].push_back(Interval.first); 6693 6694 SmallPtrSet<Instruction *, 8> OpenIntervals; 6695 SmallVector<RegisterUsage, 8> RUs(VFs.size()); 6696 SmallVector<SmallMapVector<unsigned, unsigned, 4>, 8> MaxUsages(VFs.size()); 6697 6698 LLVM_DEBUG(dbgs() << "LV(REG): Calculating max register usage:\n"); 6699 6700 // A lambda that gets the register usage for the given type and VF. 6701 const auto &TTICapture = TTI; 6702 auto GetRegUsage = [&TTICapture](Type *Ty, ElementCount VF) -> unsigned { 6703 if (Ty->isTokenTy() || !VectorType::isValidElementType(Ty)) 6704 return 0; 6705 InstructionCost::CostType RegUsage = 6706 *TTICapture.getRegUsageForType(VectorType::get(Ty, VF)).getValue(); 6707 assert(RegUsage >= 0 && RegUsage <= std::numeric_limits<unsigned>::max() && 6708 "Nonsensical values for register usage."); 6709 return RegUsage; 6710 }; 6711 6712 for (unsigned int i = 0, s = IdxToInstr.size(); i < s; ++i) { 6713 Instruction *I = IdxToInstr[i]; 6714 6715 // Remove all of the instructions that end at this location. 6716 InstrList &List = TransposeEnds[i]; 6717 for (Instruction *ToRemove : List) 6718 OpenIntervals.erase(ToRemove); 6719 6720 // Ignore instructions that are never used within the loop. 6721 if (!Ends.count(I)) 6722 continue; 6723 6724 // Skip ignored values. 6725 if (ValuesToIgnore.count(I)) 6726 continue; 6727 6728 // For each VF find the maximum usage of registers. 6729 for (unsigned j = 0, e = VFs.size(); j < e; ++j) { 6730 // Count the number of live intervals. 6731 SmallMapVector<unsigned, unsigned, 4> RegUsage; 6732 6733 if (VFs[j].isScalar()) { 6734 for (auto Inst : OpenIntervals) { 6735 unsigned ClassID = TTI.getRegisterClassForType(false, Inst->getType()); 6736 if (RegUsage.find(ClassID) == RegUsage.end()) 6737 RegUsage[ClassID] = 1; 6738 else 6739 RegUsage[ClassID] += 1; 6740 } 6741 } else { 6742 collectUniformsAndScalars(VFs[j]); 6743 for (auto Inst : OpenIntervals) { 6744 // Skip ignored values for VF > 1. 6745 if (VecValuesToIgnore.count(Inst)) 6746 continue; 6747 if (isScalarAfterVectorization(Inst, VFs[j])) { 6748 unsigned ClassID = TTI.getRegisterClassForType(false, Inst->getType()); 6749 if (RegUsage.find(ClassID) == RegUsage.end()) 6750 RegUsage[ClassID] = 1; 6751 else 6752 RegUsage[ClassID] += 1; 6753 } else { 6754 unsigned ClassID = TTI.getRegisterClassForType(true, Inst->getType()); 6755 if (RegUsage.find(ClassID) == RegUsage.end()) 6756 RegUsage[ClassID] = GetRegUsage(Inst->getType(), VFs[j]); 6757 else 6758 RegUsage[ClassID] += GetRegUsage(Inst->getType(), VFs[j]); 6759 } 6760 } 6761 } 6762 6763 for (auto& pair : RegUsage) { 6764 if (MaxUsages[j].find(pair.first) != MaxUsages[j].end()) 6765 MaxUsages[j][pair.first] = std::max(MaxUsages[j][pair.first], pair.second); 6766 else 6767 MaxUsages[j][pair.first] = pair.second; 6768 } 6769 } 6770 6771 LLVM_DEBUG(dbgs() << "LV(REG): At #" << i << " Interval # " 6772 << OpenIntervals.size() << '\n'); 6773 6774 // Add the current instruction to the list of open intervals. 6775 OpenIntervals.insert(I); 6776 } 6777 6778 for (unsigned i = 0, e = VFs.size(); i < e; ++i) { 6779 SmallMapVector<unsigned, unsigned, 4> Invariant; 6780 6781 for (auto Inst : LoopInvariants) { 6782 unsigned Usage = 6783 VFs[i].isScalar() ? 1 : GetRegUsage(Inst->getType(), VFs[i]); 6784 unsigned ClassID = 6785 TTI.getRegisterClassForType(VFs[i].isVector(), Inst->getType()); 6786 if (Invariant.find(ClassID) == Invariant.end()) 6787 Invariant[ClassID] = Usage; 6788 else 6789 Invariant[ClassID] += Usage; 6790 } 6791 6792 LLVM_DEBUG({ 6793 dbgs() << "LV(REG): VF = " << VFs[i] << '\n'; 6794 dbgs() << "LV(REG): Found max usage: " << MaxUsages[i].size() 6795 << " item\n"; 6796 for (const auto &pair : MaxUsages[i]) { 6797 dbgs() << "LV(REG): RegisterClass: " 6798 << TTI.getRegisterClassName(pair.first) << ", " << pair.second 6799 << " registers\n"; 6800 } 6801 dbgs() << "LV(REG): Found invariant usage: " << Invariant.size() 6802 << " item\n"; 6803 for (const auto &pair : Invariant) { 6804 dbgs() << "LV(REG): RegisterClass: " 6805 << TTI.getRegisterClassName(pair.first) << ", " << pair.second 6806 << " registers\n"; 6807 } 6808 }); 6809 6810 RU.LoopInvariantRegs = Invariant; 6811 RU.MaxLocalUsers = MaxUsages[i]; 6812 RUs[i] = RU; 6813 } 6814 6815 return RUs; 6816 } 6817 6818 bool LoopVectorizationCostModel::useEmulatedMaskMemRefHack(Instruction *I){ 6819 // TODO: Cost model for emulated masked load/store is completely 6820 // broken. This hack guides the cost model to use an artificially 6821 // high enough value to practically disable vectorization with such 6822 // operations, except where previously deployed legality hack allowed 6823 // using very low cost values. This is to avoid regressions coming simply 6824 // from moving "masked load/store" check from legality to cost model. 6825 // Masked Load/Gather emulation was previously never allowed. 6826 // Limited number of Masked Store/Scatter emulation was allowed. 6827 assert(isPredicatedInst(I) && 6828 "Expecting a scalar emulated instruction"); 6829 return isa<LoadInst>(I) || 6830 (isa<StoreInst>(I) && 6831 NumPredStores > NumberOfStoresToPredicate); 6832 } 6833 6834 void LoopVectorizationCostModel::collectInstsToScalarize(ElementCount VF) { 6835 // If we aren't vectorizing the loop, or if we've already collected the 6836 // instructions to scalarize, there's nothing to do. Collection may already 6837 // have occurred if we have a user-selected VF and are now computing the 6838 // expected cost for interleaving. 6839 if (VF.isScalar() || VF.isZero() || 6840 InstsToScalarize.find(VF) != InstsToScalarize.end()) 6841 return; 6842 6843 // Initialize a mapping for VF in InstsToScalalarize. If we find that it's 6844 // not profitable to scalarize any instructions, the presence of VF in the 6845 // map will indicate that we've analyzed it already. 6846 ScalarCostsTy &ScalarCostsVF = InstsToScalarize[VF]; 6847 6848 // Find all the instructions that are scalar with predication in the loop and 6849 // determine if it would be better to not if-convert the blocks they are in. 6850 // If so, we also record the instructions to scalarize. 6851 for (BasicBlock *BB : TheLoop->blocks()) { 6852 if (!blockNeedsPredicationForAnyReason(BB)) 6853 continue; 6854 for (Instruction &I : *BB) 6855 if (isScalarWithPredication(&I)) { 6856 ScalarCostsTy ScalarCosts; 6857 // Do not apply discount if scalable, because that would lead to 6858 // invalid scalarization costs. 6859 // Do not apply discount logic if hacked cost is needed 6860 // for emulated masked memrefs. 6861 if (!VF.isScalable() && !useEmulatedMaskMemRefHack(&I) && 6862 computePredInstDiscount(&I, ScalarCosts, VF) >= 0) 6863 ScalarCostsVF.insert(ScalarCosts.begin(), ScalarCosts.end()); 6864 // Remember that BB will remain after vectorization. 6865 PredicatedBBsAfterVectorization.insert(BB); 6866 } 6867 } 6868 } 6869 6870 int LoopVectorizationCostModel::computePredInstDiscount( 6871 Instruction *PredInst, ScalarCostsTy &ScalarCosts, ElementCount VF) { 6872 assert(!isUniformAfterVectorization(PredInst, VF) && 6873 "Instruction marked uniform-after-vectorization will be predicated"); 6874 6875 // Initialize the discount to zero, meaning that the scalar version and the 6876 // vector version cost the same. 6877 InstructionCost Discount = 0; 6878 6879 // Holds instructions to analyze. The instructions we visit are mapped in 6880 // ScalarCosts. Those instructions are the ones that would be scalarized if 6881 // we find that the scalar version costs less. 6882 SmallVector<Instruction *, 8> Worklist; 6883 6884 // Returns true if the given instruction can be scalarized. 6885 auto canBeScalarized = [&](Instruction *I) -> bool { 6886 // We only attempt to scalarize instructions forming a single-use chain 6887 // from the original predicated block that would otherwise be vectorized. 6888 // Although not strictly necessary, we give up on instructions we know will 6889 // already be scalar to avoid traversing chains that are unlikely to be 6890 // beneficial. 6891 if (!I->hasOneUse() || PredInst->getParent() != I->getParent() || 6892 isScalarAfterVectorization(I, VF)) 6893 return false; 6894 6895 // If the instruction is scalar with predication, it will be analyzed 6896 // separately. We ignore it within the context of PredInst. 6897 if (isScalarWithPredication(I)) 6898 return false; 6899 6900 // If any of the instruction's operands are uniform after vectorization, 6901 // the instruction cannot be scalarized. This prevents, for example, a 6902 // masked load from being scalarized. 6903 // 6904 // We assume we will only emit a value for lane zero of an instruction 6905 // marked uniform after vectorization, rather than VF identical values. 6906 // Thus, if we scalarize an instruction that uses a uniform, we would 6907 // create uses of values corresponding to the lanes we aren't emitting code 6908 // for. This behavior can be changed by allowing getScalarValue to clone 6909 // the lane zero values for uniforms rather than asserting. 6910 for (Use &U : I->operands()) 6911 if (auto *J = dyn_cast<Instruction>(U.get())) 6912 if (isUniformAfterVectorization(J, VF)) 6913 return false; 6914 6915 // Otherwise, we can scalarize the instruction. 6916 return true; 6917 }; 6918 6919 // Compute the expected cost discount from scalarizing the entire expression 6920 // feeding the predicated instruction. We currently only consider expressions 6921 // that are single-use instruction chains. 6922 Worklist.push_back(PredInst); 6923 while (!Worklist.empty()) { 6924 Instruction *I = Worklist.pop_back_val(); 6925 6926 // If we've already analyzed the instruction, there's nothing to do. 6927 if (ScalarCosts.find(I) != ScalarCosts.end()) 6928 continue; 6929 6930 // Compute the cost of the vector instruction. Note that this cost already 6931 // includes the scalarization overhead of the predicated instruction. 6932 InstructionCost VectorCost = getInstructionCost(I, VF).first; 6933 6934 // Compute the cost of the scalarized instruction. This cost is the cost of 6935 // the instruction as if it wasn't if-converted and instead remained in the 6936 // predicated block. We will scale this cost by block probability after 6937 // computing the scalarization overhead. 6938 InstructionCost ScalarCost = 6939 VF.getFixedValue() * 6940 getInstructionCost(I, ElementCount::getFixed(1)).first; 6941 6942 // Compute the scalarization overhead of needed insertelement instructions 6943 // and phi nodes. 6944 if (isScalarWithPredication(I) && !I->getType()->isVoidTy()) { 6945 ScalarCost += TTI.getScalarizationOverhead( 6946 cast<VectorType>(ToVectorTy(I->getType(), VF)), 6947 APInt::getAllOnes(VF.getFixedValue()), true, false); 6948 ScalarCost += 6949 VF.getFixedValue() * 6950 TTI.getCFInstrCost(Instruction::PHI, TTI::TCK_RecipThroughput); 6951 } 6952 6953 // Compute the scalarization overhead of needed extractelement 6954 // instructions. For each of the instruction's operands, if the operand can 6955 // be scalarized, add it to the worklist; otherwise, account for the 6956 // overhead. 6957 for (Use &U : I->operands()) 6958 if (auto *J = dyn_cast<Instruction>(U.get())) { 6959 assert(VectorType::isValidElementType(J->getType()) && 6960 "Instruction has non-scalar type"); 6961 if (canBeScalarized(J)) 6962 Worklist.push_back(J); 6963 else if (needsExtract(J, VF)) { 6964 ScalarCost += TTI.getScalarizationOverhead( 6965 cast<VectorType>(ToVectorTy(J->getType(), VF)), 6966 APInt::getAllOnes(VF.getFixedValue()), false, true); 6967 } 6968 } 6969 6970 // Scale the total scalar cost by block probability. 6971 ScalarCost /= getReciprocalPredBlockProb(); 6972 6973 // Compute the discount. A non-negative discount means the vector version 6974 // of the instruction costs more, and scalarizing would be beneficial. 6975 Discount += VectorCost - ScalarCost; 6976 ScalarCosts[I] = ScalarCost; 6977 } 6978 6979 return *Discount.getValue(); 6980 } 6981 6982 LoopVectorizationCostModel::VectorizationCostTy 6983 LoopVectorizationCostModel::expectedCost( 6984 ElementCount VF, SmallVectorImpl<InstructionVFPair> *Invalid) { 6985 VectorizationCostTy Cost; 6986 6987 // For each block. 6988 for (BasicBlock *BB : TheLoop->blocks()) { 6989 VectorizationCostTy BlockCost; 6990 6991 // For each instruction in the old loop. 6992 for (Instruction &I : BB->instructionsWithoutDebug()) { 6993 // Skip ignored values. 6994 if (ValuesToIgnore.count(&I) || 6995 (VF.isVector() && VecValuesToIgnore.count(&I))) 6996 continue; 6997 6998 VectorizationCostTy C = getInstructionCost(&I, VF); 6999 7000 // Check if we should override the cost. 7001 if (C.first.isValid() && 7002 ForceTargetInstructionCost.getNumOccurrences() > 0) 7003 C.first = InstructionCost(ForceTargetInstructionCost); 7004 7005 // Keep a list of instructions with invalid costs. 7006 if (Invalid && !C.first.isValid()) 7007 Invalid->emplace_back(&I, VF); 7008 7009 BlockCost.first += C.first; 7010 BlockCost.second |= C.second; 7011 LLVM_DEBUG(dbgs() << "LV: Found an estimated cost of " << C.first 7012 << " for VF " << VF << " For instruction: " << I 7013 << '\n'); 7014 } 7015 7016 // If we are vectorizing a predicated block, it will have been 7017 // if-converted. This means that the block's instructions (aside from 7018 // stores and instructions that may divide by zero) will now be 7019 // unconditionally executed. For the scalar case, we may not always execute 7020 // the predicated block, if it is an if-else block. Thus, scale the block's 7021 // cost by the probability of executing it. blockNeedsPredication from 7022 // Legal is used so as to not include all blocks in tail folded loops. 7023 if (VF.isScalar() && Legal->blockNeedsPredication(BB)) 7024 BlockCost.first /= getReciprocalPredBlockProb(); 7025 7026 Cost.first += BlockCost.first; 7027 Cost.second |= BlockCost.second; 7028 } 7029 7030 return Cost; 7031 } 7032 7033 /// Gets Address Access SCEV after verifying that the access pattern 7034 /// is loop invariant except the induction variable dependence. 7035 /// 7036 /// This SCEV can be sent to the Target in order to estimate the address 7037 /// calculation cost. 7038 static const SCEV *getAddressAccessSCEV( 7039 Value *Ptr, 7040 LoopVectorizationLegality *Legal, 7041 PredicatedScalarEvolution &PSE, 7042 const Loop *TheLoop) { 7043 7044 auto *Gep = dyn_cast<GetElementPtrInst>(Ptr); 7045 if (!Gep) 7046 return nullptr; 7047 7048 // We are looking for a gep with all loop invariant indices except for one 7049 // which should be an induction variable. 7050 auto SE = PSE.getSE(); 7051 unsigned NumOperands = Gep->getNumOperands(); 7052 for (unsigned i = 1; i < NumOperands; ++i) { 7053 Value *Opd = Gep->getOperand(i); 7054 if (!SE->isLoopInvariant(SE->getSCEV(Opd), TheLoop) && 7055 !Legal->isInductionVariable(Opd)) 7056 return nullptr; 7057 } 7058 7059 // Now we know we have a GEP ptr, %inv, %ind, %inv. return the Ptr SCEV. 7060 return PSE.getSCEV(Ptr); 7061 } 7062 7063 static bool isStrideMul(Instruction *I, LoopVectorizationLegality *Legal) { 7064 return Legal->hasStride(I->getOperand(0)) || 7065 Legal->hasStride(I->getOperand(1)); 7066 } 7067 7068 InstructionCost 7069 LoopVectorizationCostModel::getMemInstScalarizationCost(Instruction *I, 7070 ElementCount VF) { 7071 assert(VF.isVector() && 7072 "Scalarization cost of instruction implies vectorization."); 7073 if (VF.isScalable()) 7074 return InstructionCost::getInvalid(); 7075 7076 Type *ValTy = getLoadStoreType(I); 7077 auto SE = PSE.getSE(); 7078 7079 unsigned AS = getLoadStoreAddressSpace(I); 7080 Value *Ptr = getLoadStorePointerOperand(I); 7081 Type *PtrTy = ToVectorTy(Ptr->getType(), VF); 7082 7083 // Figure out whether the access is strided and get the stride value 7084 // if it's known in compile time 7085 const SCEV *PtrSCEV = getAddressAccessSCEV(Ptr, Legal, PSE, TheLoop); 7086 7087 // Get the cost of the scalar memory instruction and address computation. 7088 InstructionCost Cost = 7089 VF.getKnownMinValue() * TTI.getAddressComputationCost(PtrTy, SE, PtrSCEV); 7090 7091 // Don't pass *I here, since it is scalar but will actually be part of a 7092 // vectorized loop where the user of it is a vectorized instruction. 7093 const Align Alignment = getLoadStoreAlignment(I); 7094 Cost += VF.getKnownMinValue() * 7095 TTI.getMemoryOpCost(I->getOpcode(), ValTy->getScalarType(), Alignment, 7096 AS, TTI::TCK_RecipThroughput); 7097 7098 // Get the overhead of the extractelement and insertelement instructions 7099 // we might create due to scalarization. 7100 Cost += getScalarizationOverhead(I, VF); 7101 7102 // If we have a predicated load/store, it will need extra i1 extracts and 7103 // conditional branches, but may not be executed for each vector lane. Scale 7104 // the cost by the probability of executing the predicated block. 7105 if (isPredicatedInst(I)) { 7106 Cost /= getReciprocalPredBlockProb(); 7107 7108 // Add the cost of an i1 extract and a branch 7109 auto *Vec_i1Ty = 7110 VectorType::get(IntegerType::getInt1Ty(ValTy->getContext()), VF); 7111 Cost += TTI.getScalarizationOverhead( 7112 Vec_i1Ty, APInt::getAllOnes(VF.getKnownMinValue()), 7113 /*Insert=*/false, /*Extract=*/true); 7114 Cost += TTI.getCFInstrCost(Instruction::Br, TTI::TCK_RecipThroughput); 7115 7116 if (useEmulatedMaskMemRefHack(I)) 7117 // Artificially setting to a high enough value to practically disable 7118 // vectorization with such operations. 7119 Cost = 3000000; 7120 } 7121 7122 return Cost; 7123 } 7124 7125 InstructionCost 7126 LoopVectorizationCostModel::getConsecutiveMemOpCost(Instruction *I, 7127 ElementCount VF) { 7128 Type *ValTy = getLoadStoreType(I); 7129 auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF)); 7130 Value *Ptr = getLoadStorePointerOperand(I); 7131 unsigned AS = getLoadStoreAddressSpace(I); 7132 int ConsecutiveStride = Legal->isConsecutivePtr(ValTy, Ptr); 7133 enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 7134 7135 assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) && 7136 "Stride should be 1 or -1 for consecutive memory access"); 7137 const Align Alignment = getLoadStoreAlignment(I); 7138 InstructionCost Cost = 0; 7139 if (Legal->isMaskRequired(I)) 7140 Cost += TTI.getMaskedMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS, 7141 CostKind); 7142 else 7143 Cost += TTI.getMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS, 7144 CostKind, I); 7145 7146 bool Reverse = ConsecutiveStride < 0; 7147 if (Reverse) 7148 Cost += 7149 TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, None, 0); 7150 return Cost; 7151 } 7152 7153 InstructionCost 7154 LoopVectorizationCostModel::getUniformMemOpCost(Instruction *I, 7155 ElementCount VF) { 7156 assert(Legal->isUniformMemOp(*I)); 7157 7158 Type *ValTy = getLoadStoreType(I); 7159 auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF)); 7160 const Align Alignment = getLoadStoreAlignment(I); 7161 unsigned AS = getLoadStoreAddressSpace(I); 7162 enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 7163 if (isa<LoadInst>(I)) { 7164 return TTI.getAddressComputationCost(ValTy) + 7165 TTI.getMemoryOpCost(Instruction::Load, ValTy, Alignment, AS, 7166 CostKind) + 7167 TTI.getShuffleCost(TargetTransformInfo::SK_Broadcast, VectorTy); 7168 } 7169 StoreInst *SI = cast<StoreInst>(I); 7170 7171 bool isLoopInvariantStoreValue = Legal->isUniform(SI->getValueOperand()); 7172 return TTI.getAddressComputationCost(ValTy) + 7173 TTI.getMemoryOpCost(Instruction::Store, ValTy, Alignment, AS, 7174 CostKind) + 7175 (isLoopInvariantStoreValue 7176 ? 0 7177 : TTI.getVectorInstrCost(Instruction::ExtractElement, VectorTy, 7178 VF.getKnownMinValue() - 1)); 7179 } 7180 7181 InstructionCost 7182 LoopVectorizationCostModel::getGatherScatterCost(Instruction *I, 7183 ElementCount VF) { 7184 Type *ValTy = getLoadStoreType(I); 7185 auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF)); 7186 const Align Alignment = getLoadStoreAlignment(I); 7187 const Value *Ptr = getLoadStorePointerOperand(I); 7188 7189 return TTI.getAddressComputationCost(VectorTy) + 7190 TTI.getGatherScatterOpCost( 7191 I->getOpcode(), VectorTy, Ptr, Legal->isMaskRequired(I), Alignment, 7192 TargetTransformInfo::TCK_RecipThroughput, I); 7193 } 7194 7195 InstructionCost 7196 LoopVectorizationCostModel::getInterleaveGroupCost(Instruction *I, 7197 ElementCount VF) { 7198 // TODO: Once we have support for interleaving with scalable vectors 7199 // we can calculate the cost properly here. 7200 if (VF.isScalable()) 7201 return InstructionCost::getInvalid(); 7202 7203 Type *ValTy = getLoadStoreType(I); 7204 auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF)); 7205 unsigned AS = getLoadStoreAddressSpace(I); 7206 7207 auto Group = getInterleavedAccessGroup(I); 7208 assert(Group && "Fail to get an interleaved access group."); 7209 7210 unsigned InterleaveFactor = Group->getFactor(); 7211 auto *WideVecTy = VectorType::get(ValTy, VF * InterleaveFactor); 7212 7213 // Holds the indices of existing members in the interleaved group. 7214 SmallVector<unsigned, 4> Indices; 7215 for (unsigned IF = 0; IF < InterleaveFactor; IF++) 7216 if (Group->getMember(IF)) 7217 Indices.push_back(IF); 7218 7219 // Calculate the cost of the whole interleaved group. 7220 bool UseMaskForGaps = 7221 (Group->requiresScalarEpilogue() && !isScalarEpilogueAllowed()) || 7222 (isa<StoreInst>(I) && (Group->getNumMembers() < Group->getFactor())); 7223 InstructionCost Cost = TTI.getInterleavedMemoryOpCost( 7224 I->getOpcode(), WideVecTy, Group->getFactor(), Indices, Group->getAlign(), 7225 AS, TTI::TCK_RecipThroughput, Legal->isMaskRequired(I), UseMaskForGaps); 7226 7227 if (Group->isReverse()) { 7228 // TODO: Add support for reversed masked interleaved access. 7229 assert(!Legal->isMaskRequired(I) && 7230 "Reverse masked interleaved access not supported."); 7231 Cost += 7232 Group->getNumMembers() * 7233 TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, None, 0); 7234 } 7235 return Cost; 7236 } 7237 7238 Optional<InstructionCost> LoopVectorizationCostModel::getReductionPatternCost( 7239 Instruction *I, ElementCount VF, Type *Ty, TTI::TargetCostKind CostKind) { 7240 using namespace llvm::PatternMatch; 7241 // Early exit for no inloop reductions 7242 if (InLoopReductionChains.empty() || VF.isScalar() || !isa<VectorType>(Ty)) 7243 return None; 7244 auto *VectorTy = cast<VectorType>(Ty); 7245 7246 // We are looking for a pattern of, and finding the minimal acceptable cost: 7247 // reduce(mul(ext(A), ext(B))) or 7248 // reduce(mul(A, B)) or 7249 // reduce(ext(A)) or 7250 // reduce(A). 7251 // The basic idea is that we walk down the tree to do that, finding the root 7252 // reduction instruction in InLoopReductionImmediateChains. From there we find 7253 // the pattern of mul/ext and test the cost of the entire pattern vs the cost 7254 // of the components. If the reduction cost is lower then we return it for the 7255 // reduction instruction and 0 for the other instructions in the pattern. If 7256 // it is not we return an invalid cost specifying the orignal cost method 7257 // should be used. 7258 Instruction *RetI = I; 7259 if (match(RetI, m_ZExtOrSExt(m_Value()))) { 7260 if (!RetI->hasOneUser()) 7261 return None; 7262 RetI = RetI->user_back(); 7263 } 7264 if (match(RetI, m_Mul(m_Value(), m_Value())) && 7265 RetI->user_back()->getOpcode() == Instruction::Add) { 7266 if (!RetI->hasOneUser()) 7267 return None; 7268 RetI = RetI->user_back(); 7269 } 7270 7271 // Test if the found instruction is a reduction, and if not return an invalid 7272 // cost specifying the parent to use the original cost modelling. 7273 if (!InLoopReductionImmediateChains.count(RetI)) 7274 return None; 7275 7276 // Find the reduction this chain is a part of and calculate the basic cost of 7277 // the reduction on its own. 7278 Instruction *LastChain = InLoopReductionImmediateChains[RetI]; 7279 Instruction *ReductionPhi = LastChain; 7280 while (!isa<PHINode>(ReductionPhi)) 7281 ReductionPhi = InLoopReductionImmediateChains[ReductionPhi]; 7282 7283 const RecurrenceDescriptor &RdxDesc = 7284 Legal->getReductionVars()[cast<PHINode>(ReductionPhi)]; 7285 7286 InstructionCost BaseCost = TTI.getArithmeticReductionCost( 7287 RdxDesc.getOpcode(), VectorTy, RdxDesc.getFastMathFlags(), CostKind); 7288 7289 // If we're using ordered reductions then we can just return the base cost 7290 // here, since getArithmeticReductionCost calculates the full ordered 7291 // reduction cost when FP reassociation is not allowed. 7292 if (useOrderedReductions(RdxDesc)) 7293 return BaseCost; 7294 7295 // Get the operand that was not the reduction chain and match it to one of the 7296 // patterns, returning the better cost if it is found. 7297 Instruction *RedOp = RetI->getOperand(1) == LastChain 7298 ? dyn_cast<Instruction>(RetI->getOperand(0)) 7299 : dyn_cast<Instruction>(RetI->getOperand(1)); 7300 7301 VectorTy = VectorType::get(I->getOperand(0)->getType(), VectorTy); 7302 7303 Instruction *Op0, *Op1; 7304 if (RedOp && 7305 match(RedOp, 7306 m_ZExtOrSExt(m_Mul(m_Instruction(Op0), m_Instruction(Op1)))) && 7307 match(Op0, m_ZExtOrSExt(m_Value())) && 7308 Op0->getOpcode() == Op1->getOpcode() && 7309 Op0->getOperand(0)->getType() == Op1->getOperand(0)->getType() && 7310 !TheLoop->isLoopInvariant(Op0) && !TheLoop->isLoopInvariant(Op1) && 7311 (Op0->getOpcode() == RedOp->getOpcode() || Op0 == Op1)) { 7312 7313 // Matched reduce(ext(mul(ext(A), ext(B))) 7314 // Note that the extend opcodes need to all match, or if A==B they will have 7315 // been converted to zext(mul(sext(A), sext(A))) as it is known positive, 7316 // which is equally fine. 7317 bool IsUnsigned = isa<ZExtInst>(Op0); 7318 auto *ExtType = VectorType::get(Op0->getOperand(0)->getType(), VectorTy); 7319 auto *MulType = VectorType::get(Op0->getType(), VectorTy); 7320 7321 InstructionCost ExtCost = 7322 TTI.getCastInstrCost(Op0->getOpcode(), MulType, ExtType, 7323 TTI::CastContextHint::None, CostKind, Op0); 7324 InstructionCost MulCost = 7325 TTI.getArithmeticInstrCost(Instruction::Mul, MulType, CostKind); 7326 InstructionCost Ext2Cost = 7327 TTI.getCastInstrCost(RedOp->getOpcode(), VectorTy, MulType, 7328 TTI::CastContextHint::None, CostKind, RedOp); 7329 7330 InstructionCost RedCost = TTI.getExtendedAddReductionCost( 7331 /*IsMLA=*/true, IsUnsigned, RdxDesc.getRecurrenceType(), ExtType, 7332 CostKind); 7333 7334 if (RedCost.isValid() && 7335 RedCost < ExtCost * 2 + MulCost + Ext2Cost + BaseCost) 7336 return I == RetI ? RedCost : 0; 7337 } else if (RedOp && match(RedOp, m_ZExtOrSExt(m_Value())) && 7338 !TheLoop->isLoopInvariant(RedOp)) { 7339 // Matched reduce(ext(A)) 7340 bool IsUnsigned = isa<ZExtInst>(RedOp); 7341 auto *ExtType = VectorType::get(RedOp->getOperand(0)->getType(), VectorTy); 7342 InstructionCost RedCost = TTI.getExtendedAddReductionCost( 7343 /*IsMLA=*/false, IsUnsigned, RdxDesc.getRecurrenceType(), ExtType, 7344 CostKind); 7345 7346 InstructionCost ExtCost = 7347 TTI.getCastInstrCost(RedOp->getOpcode(), VectorTy, ExtType, 7348 TTI::CastContextHint::None, CostKind, RedOp); 7349 if (RedCost.isValid() && RedCost < BaseCost + ExtCost) 7350 return I == RetI ? RedCost : 0; 7351 } else if (RedOp && 7352 match(RedOp, m_Mul(m_Instruction(Op0), m_Instruction(Op1)))) { 7353 if (match(Op0, m_ZExtOrSExt(m_Value())) && 7354 Op0->getOpcode() == Op1->getOpcode() && 7355 Op0->getOperand(0)->getType() == Op1->getOperand(0)->getType() && 7356 !TheLoop->isLoopInvariant(Op0) && !TheLoop->isLoopInvariant(Op1)) { 7357 bool IsUnsigned = isa<ZExtInst>(Op0); 7358 auto *ExtType = VectorType::get(Op0->getOperand(0)->getType(), VectorTy); 7359 // Matched reduce(mul(ext, ext)) 7360 InstructionCost ExtCost = 7361 TTI.getCastInstrCost(Op0->getOpcode(), VectorTy, ExtType, 7362 TTI::CastContextHint::None, CostKind, Op0); 7363 InstructionCost MulCost = 7364 TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind); 7365 7366 InstructionCost RedCost = TTI.getExtendedAddReductionCost( 7367 /*IsMLA=*/true, IsUnsigned, RdxDesc.getRecurrenceType(), ExtType, 7368 CostKind); 7369 7370 if (RedCost.isValid() && RedCost < ExtCost * 2 + MulCost + BaseCost) 7371 return I == RetI ? RedCost : 0; 7372 } else if (!match(I, m_ZExtOrSExt(m_Value()))) { 7373 // Matched reduce(mul()) 7374 InstructionCost MulCost = 7375 TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind); 7376 7377 InstructionCost RedCost = TTI.getExtendedAddReductionCost( 7378 /*IsMLA=*/true, true, RdxDesc.getRecurrenceType(), VectorTy, 7379 CostKind); 7380 7381 if (RedCost.isValid() && RedCost < MulCost + BaseCost) 7382 return I == RetI ? RedCost : 0; 7383 } 7384 } 7385 7386 return I == RetI ? Optional<InstructionCost>(BaseCost) : None; 7387 } 7388 7389 InstructionCost 7390 LoopVectorizationCostModel::getMemoryInstructionCost(Instruction *I, 7391 ElementCount VF) { 7392 // Calculate scalar cost only. Vectorization cost should be ready at this 7393 // moment. 7394 if (VF.isScalar()) { 7395 Type *ValTy = getLoadStoreType(I); 7396 const Align Alignment = getLoadStoreAlignment(I); 7397 unsigned AS = getLoadStoreAddressSpace(I); 7398 7399 return TTI.getAddressComputationCost(ValTy) + 7400 TTI.getMemoryOpCost(I->getOpcode(), ValTy, Alignment, AS, 7401 TTI::TCK_RecipThroughput, I); 7402 } 7403 return getWideningCost(I, VF); 7404 } 7405 7406 LoopVectorizationCostModel::VectorizationCostTy 7407 LoopVectorizationCostModel::getInstructionCost(Instruction *I, 7408 ElementCount VF) { 7409 // If we know that this instruction will remain uniform, check the cost of 7410 // the scalar version. 7411 if (isUniformAfterVectorization(I, VF)) 7412 VF = ElementCount::getFixed(1); 7413 7414 if (VF.isVector() && isProfitableToScalarize(I, VF)) 7415 return VectorizationCostTy(InstsToScalarize[VF][I], false); 7416 7417 // Forced scalars do not have any scalarization overhead. 7418 auto ForcedScalar = ForcedScalars.find(VF); 7419 if (VF.isVector() && ForcedScalar != ForcedScalars.end()) { 7420 auto InstSet = ForcedScalar->second; 7421 if (InstSet.count(I)) 7422 return VectorizationCostTy( 7423 (getInstructionCost(I, ElementCount::getFixed(1)).first * 7424 VF.getKnownMinValue()), 7425 false); 7426 } 7427 7428 Type *VectorTy; 7429 InstructionCost C = getInstructionCost(I, VF, VectorTy); 7430 7431 bool TypeNotScalarized = false; 7432 if (VF.isVector() && VectorTy->isVectorTy()) { 7433 unsigned NumParts = TTI.getNumberOfParts(VectorTy); 7434 if (NumParts) 7435 TypeNotScalarized = NumParts < VF.getKnownMinValue(); 7436 else 7437 C = InstructionCost::getInvalid(); 7438 } 7439 return VectorizationCostTy(C, TypeNotScalarized); 7440 } 7441 7442 InstructionCost 7443 LoopVectorizationCostModel::getScalarizationOverhead(Instruction *I, 7444 ElementCount VF) const { 7445 7446 // There is no mechanism yet to create a scalable scalarization loop, 7447 // so this is currently Invalid. 7448 if (VF.isScalable()) 7449 return InstructionCost::getInvalid(); 7450 7451 if (VF.isScalar()) 7452 return 0; 7453 7454 InstructionCost Cost = 0; 7455 Type *RetTy = ToVectorTy(I->getType(), VF); 7456 if (!RetTy->isVoidTy() && 7457 (!isa<LoadInst>(I) || !TTI.supportsEfficientVectorElementLoadStore())) 7458 Cost += TTI.getScalarizationOverhead( 7459 cast<VectorType>(RetTy), APInt::getAllOnes(VF.getKnownMinValue()), true, 7460 false); 7461 7462 // Some targets keep addresses scalar. 7463 if (isa<LoadInst>(I) && !TTI.prefersVectorizedAddressing()) 7464 return Cost; 7465 7466 // Some targets support efficient element stores. 7467 if (isa<StoreInst>(I) && TTI.supportsEfficientVectorElementLoadStore()) 7468 return Cost; 7469 7470 // Collect operands to consider. 7471 CallInst *CI = dyn_cast<CallInst>(I); 7472 Instruction::op_range Ops = CI ? CI->args() : I->operands(); 7473 7474 // Skip operands that do not require extraction/scalarization and do not incur 7475 // any overhead. 7476 SmallVector<Type *> Tys; 7477 for (auto *V : filterExtractingOperands(Ops, VF)) 7478 Tys.push_back(MaybeVectorizeType(V->getType(), VF)); 7479 return Cost + TTI.getOperandsScalarizationOverhead( 7480 filterExtractingOperands(Ops, VF), Tys); 7481 } 7482 7483 void LoopVectorizationCostModel::setCostBasedWideningDecision(ElementCount VF) { 7484 if (VF.isScalar()) 7485 return; 7486 NumPredStores = 0; 7487 for (BasicBlock *BB : TheLoop->blocks()) { 7488 // For each instruction in the old loop. 7489 for (Instruction &I : *BB) { 7490 Value *Ptr = getLoadStorePointerOperand(&I); 7491 if (!Ptr) 7492 continue; 7493 7494 // TODO: We should generate better code and update the cost model for 7495 // predicated uniform stores. Today they are treated as any other 7496 // predicated store (see added test cases in 7497 // invariant-store-vectorization.ll). 7498 if (isa<StoreInst>(&I) && isScalarWithPredication(&I)) 7499 NumPredStores++; 7500 7501 if (Legal->isUniformMemOp(I)) { 7502 // TODO: Avoid replicating loads and stores instead of 7503 // relying on instcombine to remove them. 7504 // Load: Scalar load + broadcast 7505 // Store: Scalar store + isLoopInvariantStoreValue ? 0 : extract 7506 InstructionCost Cost; 7507 if (isa<StoreInst>(&I) && VF.isScalable() && 7508 isLegalGatherOrScatter(&I)) { 7509 Cost = getGatherScatterCost(&I, VF); 7510 setWideningDecision(&I, VF, CM_GatherScatter, Cost); 7511 } else { 7512 assert((isa<LoadInst>(&I) || !VF.isScalable()) && 7513 "Cannot yet scalarize uniform stores"); 7514 Cost = getUniformMemOpCost(&I, VF); 7515 setWideningDecision(&I, VF, CM_Scalarize, Cost); 7516 } 7517 continue; 7518 } 7519 7520 // We assume that widening is the best solution when possible. 7521 if (memoryInstructionCanBeWidened(&I, VF)) { 7522 InstructionCost Cost = getConsecutiveMemOpCost(&I, VF); 7523 int ConsecutiveStride = Legal->isConsecutivePtr( 7524 getLoadStoreType(&I), getLoadStorePointerOperand(&I)); 7525 assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) && 7526 "Expected consecutive stride."); 7527 InstWidening Decision = 7528 ConsecutiveStride == 1 ? CM_Widen : CM_Widen_Reverse; 7529 setWideningDecision(&I, VF, Decision, Cost); 7530 continue; 7531 } 7532 7533 // Choose between Interleaving, Gather/Scatter or Scalarization. 7534 InstructionCost InterleaveCost = InstructionCost::getInvalid(); 7535 unsigned NumAccesses = 1; 7536 if (isAccessInterleaved(&I)) { 7537 auto Group = getInterleavedAccessGroup(&I); 7538 assert(Group && "Fail to get an interleaved access group."); 7539 7540 // Make one decision for the whole group. 7541 if (getWideningDecision(&I, VF) != CM_Unknown) 7542 continue; 7543 7544 NumAccesses = Group->getNumMembers(); 7545 if (interleavedAccessCanBeWidened(&I, VF)) 7546 InterleaveCost = getInterleaveGroupCost(&I, VF); 7547 } 7548 7549 InstructionCost GatherScatterCost = 7550 isLegalGatherOrScatter(&I) 7551 ? getGatherScatterCost(&I, VF) * NumAccesses 7552 : InstructionCost::getInvalid(); 7553 7554 InstructionCost ScalarizationCost = 7555 getMemInstScalarizationCost(&I, VF) * NumAccesses; 7556 7557 // Choose better solution for the current VF, 7558 // write down this decision and use it during vectorization. 7559 InstructionCost Cost; 7560 InstWidening Decision; 7561 if (InterleaveCost <= GatherScatterCost && 7562 InterleaveCost < ScalarizationCost) { 7563 Decision = CM_Interleave; 7564 Cost = InterleaveCost; 7565 } else if (GatherScatterCost < ScalarizationCost) { 7566 Decision = CM_GatherScatter; 7567 Cost = GatherScatterCost; 7568 } else { 7569 Decision = CM_Scalarize; 7570 Cost = ScalarizationCost; 7571 } 7572 // If the instructions belongs to an interleave group, the whole group 7573 // receives the same decision. The whole group receives the cost, but 7574 // the cost will actually be assigned to one instruction. 7575 if (auto Group = getInterleavedAccessGroup(&I)) 7576 setWideningDecision(Group, VF, Decision, Cost); 7577 else 7578 setWideningDecision(&I, VF, Decision, Cost); 7579 } 7580 } 7581 7582 // Make sure that any load of address and any other address computation 7583 // remains scalar unless there is gather/scatter support. This avoids 7584 // inevitable extracts into address registers, and also has the benefit of 7585 // activating LSR more, since that pass can't optimize vectorized 7586 // addresses. 7587 if (TTI.prefersVectorizedAddressing()) 7588 return; 7589 7590 // Start with all scalar pointer uses. 7591 SmallPtrSet<Instruction *, 8> AddrDefs; 7592 for (BasicBlock *BB : TheLoop->blocks()) 7593 for (Instruction &I : *BB) { 7594 Instruction *PtrDef = 7595 dyn_cast_or_null<Instruction>(getLoadStorePointerOperand(&I)); 7596 if (PtrDef && TheLoop->contains(PtrDef) && 7597 getWideningDecision(&I, VF) != CM_GatherScatter) 7598 AddrDefs.insert(PtrDef); 7599 } 7600 7601 // Add all instructions used to generate the addresses. 7602 SmallVector<Instruction *, 4> Worklist; 7603 append_range(Worklist, AddrDefs); 7604 while (!Worklist.empty()) { 7605 Instruction *I = Worklist.pop_back_val(); 7606 for (auto &Op : I->operands()) 7607 if (auto *InstOp = dyn_cast<Instruction>(Op)) 7608 if ((InstOp->getParent() == I->getParent()) && !isa<PHINode>(InstOp) && 7609 AddrDefs.insert(InstOp).second) 7610 Worklist.push_back(InstOp); 7611 } 7612 7613 for (auto *I : AddrDefs) { 7614 if (isa<LoadInst>(I)) { 7615 // Setting the desired widening decision should ideally be handled in 7616 // by cost functions, but since this involves the task of finding out 7617 // if the loaded register is involved in an address computation, it is 7618 // instead changed here when we know this is the case. 7619 InstWidening Decision = getWideningDecision(I, VF); 7620 if (Decision == CM_Widen || Decision == CM_Widen_Reverse) 7621 // Scalarize a widened load of address. 7622 setWideningDecision( 7623 I, VF, CM_Scalarize, 7624 (VF.getKnownMinValue() * 7625 getMemoryInstructionCost(I, ElementCount::getFixed(1)))); 7626 else if (auto Group = getInterleavedAccessGroup(I)) { 7627 // Scalarize an interleave group of address loads. 7628 for (unsigned I = 0; I < Group->getFactor(); ++I) { 7629 if (Instruction *Member = Group->getMember(I)) 7630 setWideningDecision( 7631 Member, VF, CM_Scalarize, 7632 (VF.getKnownMinValue() * 7633 getMemoryInstructionCost(Member, ElementCount::getFixed(1)))); 7634 } 7635 } 7636 } else 7637 // Make sure I gets scalarized and a cost estimate without 7638 // scalarization overhead. 7639 ForcedScalars[VF].insert(I); 7640 } 7641 } 7642 7643 InstructionCost 7644 LoopVectorizationCostModel::getInstructionCost(Instruction *I, ElementCount VF, 7645 Type *&VectorTy) { 7646 Type *RetTy = I->getType(); 7647 if (canTruncateToMinimalBitwidth(I, VF)) 7648 RetTy = IntegerType::get(RetTy->getContext(), MinBWs[I]); 7649 auto SE = PSE.getSE(); 7650 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 7651 7652 auto hasSingleCopyAfterVectorization = [this](Instruction *I, 7653 ElementCount VF) -> bool { 7654 if (VF.isScalar()) 7655 return true; 7656 7657 auto Scalarized = InstsToScalarize.find(VF); 7658 assert(Scalarized != InstsToScalarize.end() && 7659 "VF not yet analyzed for scalarization profitability"); 7660 return !Scalarized->second.count(I) && 7661 llvm::all_of(I->users(), [&](User *U) { 7662 auto *UI = cast<Instruction>(U); 7663 return !Scalarized->second.count(UI); 7664 }); 7665 }; 7666 (void) hasSingleCopyAfterVectorization; 7667 7668 if (isScalarAfterVectorization(I, VF)) { 7669 // With the exception of GEPs and PHIs, after scalarization there should 7670 // only be one copy of the instruction generated in the loop. This is 7671 // because the VF is either 1, or any instructions that need scalarizing 7672 // have already been dealt with by the the time we get here. As a result, 7673 // it means we don't have to multiply the instruction cost by VF. 7674 assert(I->getOpcode() == Instruction::GetElementPtr || 7675 I->getOpcode() == Instruction::PHI || 7676 (I->getOpcode() == Instruction::BitCast && 7677 I->getType()->isPointerTy()) || 7678 hasSingleCopyAfterVectorization(I, VF)); 7679 VectorTy = RetTy; 7680 } else 7681 VectorTy = ToVectorTy(RetTy, VF); 7682 7683 // TODO: We need to estimate the cost of intrinsic calls. 7684 switch (I->getOpcode()) { 7685 case Instruction::GetElementPtr: 7686 // We mark this instruction as zero-cost because the cost of GEPs in 7687 // vectorized code depends on whether the corresponding memory instruction 7688 // is scalarized or not. Therefore, we handle GEPs with the memory 7689 // instruction cost. 7690 return 0; 7691 case Instruction::Br: { 7692 // In cases of scalarized and predicated instructions, there will be VF 7693 // predicated blocks in the vectorized loop. Each branch around these 7694 // blocks requires also an extract of its vector compare i1 element. 7695 bool ScalarPredicatedBB = false; 7696 BranchInst *BI = cast<BranchInst>(I); 7697 if (VF.isVector() && BI->isConditional() && 7698 (PredicatedBBsAfterVectorization.count(BI->getSuccessor(0)) || 7699 PredicatedBBsAfterVectorization.count(BI->getSuccessor(1)))) 7700 ScalarPredicatedBB = true; 7701 7702 if (ScalarPredicatedBB) { 7703 // Not possible to scalarize scalable vector with predicated instructions. 7704 if (VF.isScalable()) 7705 return InstructionCost::getInvalid(); 7706 // Return cost for branches around scalarized and predicated blocks. 7707 auto *Vec_i1Ty = 7708 VectorType::get(IntegerType::getInt1Ty(RetTy->getContext()), VF); 7709 return ( 7710 TTI.getScalarizationOverhead( 7711 Vec_i1Ty, APInt::getAllOnes(VF.getFixedValue()), false, true) + 7712 (TTI.getCFInstrCost(Instruction::Br, CostKind) * VF.getFixedValue())); 7713 } else if (I->getParent() == TheLoop->getLoopLatch() || VF.isScalar()) 7714 // The back-edge branch will remain, as will all scalar branches. 7715 return TTI.getCFInstrCost(Instruction::Br, CostKind); 7716 else 7717 // This branch will be eliminated by if-conversion. 7718 return 0; 7719 // Note: We currently assume zero cost for an unconditional branch inside 7720 // a predicated block since it will become a fall-through, although we 7721 // may decide in the future to call TTI for all branches. 7722 } 7723 case Instruction::PHI: { 7724 auto *Phi = cast<PHINode>(I); 7725 7726 // First-order recurrences are replaced by vector shuffles inside the loop. 7727 // NOTE: Don't use ToVectorTy as SK_ExtractSubvector expects a vector type. 7728 if (VF.isVector() && Legal->isFirstOrderRecurrence(Phi)) 7729 return TTI.getShuffleCost( 7730 TargetTransformInfo::SK_ExtractSubvector, cast<VectorType>(VectorTy), 7731 None, VF.getKnownMinValue() - 1, FixedVectorType::get(RetTy, 1)); 7732 7733 // Phi nodes in non-header blocks (not inductions, reductions, etc.) are 7734 // converted into select instructions. We require N - 1 selects per phi 7735 // node, where N is the number of incoming values. 7736 if (VF.isVector() && Phi->getParent() != TheLoop->getHeader()) 7737 return (Phi->getNumIncomingValues() - 1) * 7738 TTI.getCmpSelInstrCost( 7739 Instruction::Select, ToVectorTy(Phi->getType(), VF), 7740 ToVectorTy(Type::getInt1Ty(Phi->getContext()), VF), 7741 CmpInst::BAD_ICMP_PREDICATE, CostKind); 7742 7743 return TTI.getCFInstrCost(Instruction::PHI, CostKind); 7744 } 7745 case Instruction::UDiv: 7746 case Instruction::SDiv: 7747 case Instruction::URem: 7748 case Instruction::SRem: 7749 // If we have a predicated instruction, it may not be executed for each 7750 // vector lane. Get the scalarization cost and scale this amount by the 7751 // probability of executing the predicated block. If the instruction is not 7752 // predicated, we fall through to the next case. 7753 if (VF.isVector() && isScalarWithPredication(I)) { 7754 InstructionCost Cost = 0; 7755 7756 // These instructions have a non-void type, so account for the phi nodes 7757 // that we will create. This cost is likely to be zero. The phi node 7758 // cost, if any, should be scaled by the block probability because it 7759 // models a copy at the end of each predicated block. 7760 Cost += VF.getKnownMinValue() * 7761 TTI.getCFInstrCost(Instruction::PHI, CostKind); 7762 7763 // The cost of the non-predicated instruction. 7764 Cost += VF.getKnownMinValue() * 7765 TTI.getArithmeticInstrCost(I->getOpcode(), RetTy, CostKind); 7766 7767 // The cost of insertelement and extractelement instructions needed for 7768 // scalarization. 7769 Cost += getScalarizationOverhead(I, VF); 7770 7771 // Scale the cost by the probability of executing the predicated blocks. 7772 // This assumes the predicated block for each vector lane is equally 7773 // likely. 7774 return Cost / getReciprocalPredBlockProb(); 7775 } 7776 LLVM_FALLTHROUGH; 7777 case Instruction::Add: 7778 case Instruction::FAdd: 7779 case Instruction::Sub: 7780 case Instruction::FSub: 7781 case Instruction::Mul: 7782 case Instruction::FMul: 7783 case Instruction::FDiv: 7784 case Instruction::FRem: 7785 case Instruction::Shl: 7786 case Instruction::LShr: 7787 case Instruction::AShr: 7788 case Instruction::And: 7789 case Instruction::Or: 7790 case Instruction::Xor: { 7791 // Since we will replace the stride by 1 the multiplication should go away. 7792 if (I->getOpcode() == Instruction::Mul && isStrideMul(I, Legal)) 7793 return 0; 7794 7795 // Detect reduction patterns 7796 if (auto RedCost = getReductionPatternCost(I, VF, VectorTy, CostKind)) 7797 return *RedCost; 7798 7799 // Certain instructions can be cheaper to vectorize if they have a constant 7800 // second vector operand. One example of this are shifts on x86. 7801 Value *Op2 = I->getOperand(1); 7802 TargetTransformInfo::OperandValueProperties Op2VP; 7803 TargetTransformInfo::OperandValueKind Op2VK = 7804 TTI.getOperandInfo(Op2, Op2VP); 7805 if (Op2VK == TargetTransformInfo::OK_AnyValue && Legal->isUniform(Op2)) 7806 Op2VK = TargetTransformInfo::OK_UniformValue; 7807 7808 SmallVector<const Value *, 4> Operands(I->operand_values()); 7809 return TTI.getArithmeticInstrCost( 7810 I->getOpcode(), VectorTy, CostKind, TargetTransformInfo::OK_AnyValue, 7811 Op2VK, TargetTransformInfo::OP_None, Op2VP, Operands, I); 7812 } 7813 case Instruction::FNeg: { 7814 return TTI.getArithmeticInstrCost( 7815 I->getOpcode(), VectorTy, CostKind, TargetTransformInfo::OK_AnyValue, 7816 TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None, 7817 TargetTransformInfo::OP_None, I->getOperand(0), I); 7818 } 7819 case Instruction::Select: { 7820 SelectInst *SI = cast<SelectInst>(I); 7821 const SCEV *CondSCEV = SE->getSCEV(SI->getCondition()); 7822 bool ScalarCond = (SE->isLoopInvariant(CondSCEV, TheLoop)); 7823 7824 const Value *Op0, *Op1; 7825 using namespace llvm::PatternMatch; 7826 if (!ScalarCond && (match(I, m_LogicalAnd(m_Value(Op0), m_Value(Op1))) || 7827 match(I, m_LogicalOr(m_Value(Op0), m_Value(Op1))))) { 7828 // select x, y, false --> x & y 7829 // select x, true, y --> x | y 7830 TTI::OperandValueProperties Op1VP = TTI::OP_None; 7831 TTI::OperandValueProperties Op2VP = TTI::OP_None; 7832 TTI::OperandValueKind Op1VK = TTI::getOperandInfo(Op0, Op1VP); 7833 TTI::OperandValueKind Op2VK = TTI::getOperandInfo(Op1, Op2VP); 7834 assert(Op0->getType()->getScalarSizeInBits() == 1 && 7835 Op1->getType()->getScalarSizeInBits() == 1); 7836 7837 SmallVector<const Value *, 2> Operands{Op0, Op1}; 7838 return TTI.getArithmeticInstrCost( 7839 match(I, m_LogicalOr()) ? Instruction::Or : Instruction::And, VectorTy, 7840 CostKind, Op1VK, Op2VK, Op1VP, Op2VP, Operands, I); 7841 } 7842 7843 Type *CondTy = SI->getCondition()->getType(); 7844 if (!ScalarCond) 7845 CondTy = VectorType::get(CondTy, VF); 7846 return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, CondTy, 7847 CmpInst::BAD_ICMP_PREDICATE, CostKind, I); 7848 } 7849 case Instruction::ICmp: 7850 case Instruction::FCmp: { 7851 Type *ValTy = I->getOperand(0)->getType(); 7852 Instruction *Op0AsInstruction = dyn_cast<Instruction>(I->getOperand(0)); 7853 if (canTruncateToMinimalBitwidth(Op0AsInstruction, VF)) 7854 ValTy = IntegerType::get(ValTy->getContext(), MinBWs[Op0AsInstruction]); 7855 VectorTy = ToVectorTy(ValTy, VF); 7856 return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, nullptr, 7857 CmpInst::BAD_ICMP_PREDICATE, CostKind, I); 7858 } 7859 case Instruction::Store: 7860 case Instruction::Load: { 7861 ElementCount Width = VF; 7862 if (Width.isVector()) { 7863 InstWidening Decision = getWideningDecision(I, Width); 7864 assert(Decision != CM_Unknown && 7865 "CM decision should be taken at this point"); 7866 if (Decision == CM_Scalarize) 7867 Width = ElementCount::getFixed(1); 7868 } 7869 VectorTy = ToVectorTy(getLoadStoreType(I), Width); 7870 return getMemoryInstructionCost(I, VF); 7871 } 7872 case Instruction::BitCast: 7873 if (I->getType()->isPointerTy()) 7874 return 0; 7875 LLVM_FALLTHROUGH; 7876 case Instruction::ZExt: 7877 case Instruction::SExt: 7878 case Instruction::FPToUI: 7879 case Instruction::FPToSI: 7880 case Instruction::FPExt: 7881 case Instruction::PtrToInt: 7882 case Instruction::IntToPtr: 7883 case Instruction::SIToFP: 7884 case Instruction::UIToFP: 7885 case Instruction::Trunc: 7886 case Instruction::FPTrunc: { 7887 // Computes the CastContextHint from a Load/Store instruction. 7888 auto ComputeCCH = [&](Instruction *I) -> TTI::CastContextHint { 7889 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) && 7890 "Expected a load or a store!"); 7891 7892 if (VF.isScalar() || !TheLoop->contains(I)) 7893 return TTI::CastContextHint::Normal; 7894 7895 switch (getWideningDecision(I, VF)) { 7896 case LoopVectorizationCostModel::CM_GatherScatter: 7897 return TTI::CastContextHint::GatherScatter; 7898 case LoopVectorizationCostModel::CM_Interleave: 7899 return TTI::CastContextHint::Interleave; 7900 case LoopVectorizationCostModel::CM_Scalarize: 7901 case LoopVectorizationCostModel::CM_Widen: 7902 return Legal->isMaskRequired(I) ? TTI::CastContextHint::Masked 7903 : TTI::CastContextHint::Normal; 7904 case LoopVectorizationCostModel::CM_Widen_Reverse: 7905 return TTI::CastContextHint::Reversed; 7906 case LoopVectorizationCostModel::CM_Unknown: 7907 llvm_unreachable("Instr did not go through cost modelling?"); 7908 } 7909 7910 llvm_unreachable("Unhandled case!"); 7911 }; 7912 7913 unsigned Opcode = I->getOpcode(); 7914 TTI::CastContextHint CCH = TTI::CastContextHint::None; 7915 // For Trunc, the context is the only user, which must be a StoreInst. 7916 if (Opcode == Instruction::Trunc || Opcode == Instruction::FPTrunc) { 7917 if (I->hasOneUse()) 7918 if (StoreInst *Store = dyn_cast<StoreInst>(*I->user_begin())) 7919 CCH = ComputeCCH(Store); 7920 } 7921 // For Z/Sext, the context is the operand, which must be a LoadInst. 7922 else if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt || 7923 Opcode == Instruction::FPExt) { 7924 if (LoadInst *Load = dyn_cast<LoadInst>(I->getOperand(0))) 7925 CCH = ComputeCCH(Load); 7926 } 7927 7928 // We optimize the truncation of induction variables having constant 7929 // integer steps. The cost of these truncations is the same as the scalar 7930 // operation. 7931 if (isOptimizableIVTruncate(I, VF)) { 7932 auto *Trunc = cast<TruncInst>(I); 7933 return TTI.getCastInstrCost(Instruction::Trunc, Trunc->getDestTy(), 7934 Trunc->getSrcTy(), CCH, CostKind, Trunc); 7935 } 7936 7937 // Detect reduction patterns 7938 if (auto RedCost = getReductionPatternCost(I, VF, VectorTy, CostKind)) 7939 return *RedCost; 7940 7941 Type *SrcScalarTy = I->getOperand(0)->getType(); 7942 Type *SrcVecTy = 7943 VectorTy->isVectorTy() ? ToVectorTy(SrcScalarTy, VF) : SrcScalarTy; 7944 if (canTruncateToMinimalBitwidth(I, VF)) { 7945 // This cast is going to be shrunk. This may remove the cast or it might 7946 // turn it into slightly different cast. For example, if MinBW == 16, 7947 // "zext i8 %1 to i32" becomes "zext i8 %1 to i16". 7948 // 7949 // Calculate the modified src and dest types. 7950 Type *MinVecTy = VectorTy; 7951 if (Opcode == Instruction::Trunc) { 7952 SrcVecTy = smallestIntegerVectorType(SrcVecTy, MinVecTy); 7953 VectorTy = 7954 largestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy); 7955 } else if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt) { 7956 SrcVecTy = largestIntegerVectorType(SrcVecTy, MinVecTy); 7957 VectorTy = 7958 smallestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy); 7959 } 7960 } 7961 7962 return TTI.getCastInstrCost(Opcode, VectorTy, SrcVecTy, CCH, CostKind, I); 7963 } 7964 case Instruction::Call: { 7965 bool NeedToScalarize; 7966 CallInst *CI = cast<CallInst>(I); 7967 InstructionCost CallCost = getVectorCallCost(CI, VF, NeedToScalarize); 7968 if (getVectorIntrinsicIDForCall(CI, TLI)) { 7969 InstructionCost IntrinsicCost = getVectorIntrinsicCost(CI, VF); 7970 return std::min(CallCost, IntrinsicCost); 7971 } 7972 return CallCost; 7973 } 7974 case Instruction::ExtractValue: 7975 return TTI.getInstructionCost(I, TTI::TCK_RecipThroughput); 7976 case Instruction::Alloca: 7977 // We cannot easily widen alloca to a scalable alloca, as 7978 // the result would need to be a vector of pointers. 7979 if (VF.isScalable()) 7980 return InstructionCost::getInvalid(); 7981 LLVM_FALLTHROUGH; 7982 default: 7983 // This opcode is unknown. Assume that it is the same as 'mul'. 7984 return TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind); 7985 } // end of switch. 7986 } 7987 7988 char LoopVectorize::ID = 0; 7989 7990 static const char lv_name[] = "Loop Vectorization"; 7991 7992 INITIALIZE_PASS_BEGIN(LoopVectorize, LV_NAME, lv_name, false, false) 7993 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 7994 INITIALIZE_PASS_DEPENDENCY(BasicAAWrapperPass) 7995 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 7996 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass) 7997 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 7998 INITIALIZE_PASS_DEPENDENCY(BlockFrequencyInfoWrapperPass) 7999 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 8000 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass) 8001 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) 8002 INITIALIZE_PASS_DEPENDENCY(LoopAccessLegacyAnalysis) 8003 INITIALIZE_PASS_DEPENDENCY(DemandedBitsWrapperPass) 8004 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass) 8005 INITIALIZE_PASS_DEPENDENCY(ProfileSummaryInfoWrapperPass) 8006 INITIALIZE_PASS_DEPENDENCY(InjectTLIMappingsLegacy) 8007 INITIALIZE_PASS_END(LoopVectorize, LV_NAME, lv_name, false, false) 8008 8009 namespace llvm { 8010 8011 Pass *createLoopVectorizePass() { return new LoopVectorize(); } 8012 8013 Pass *createLoopVectorizePass(bool InterleaveOnlyWhenForced, 8014 bool VectorizeOnlyWhenForced) { 8015 return new LoopVectorize(InterleaveOnlyWhenForced, VectorizeOnlyWhenForced); 8016 } 8017 8018 } // end namespace llvm 8019 8020 bool LoopVectorizationCostModel::isConsecutiveLoadOrStore(Instruction *Inst) { 8021 // Check if the pointer operand of a load or store instruction is 8022 // consecutive. 8023 if (auto *Ptr = getLoadStorePointerOperand(Inst)) 8024 return Legal->isConsecutivePtr(getLoadStoreType(Inst), Ptr); 8025 return false; 8026 } 8027 8028 void LoopVectorizationCostModel::collectValuesToIgnore() { 8029 // Ignore ephemeral values. 8030 CodeMetrics::collectEphemeralValues(TheLoop, AC, ValuesToIgnore); 8031 8032 // Ignore type-promoting instructions we identified during reduction 8033 // detection. 8034 for (auto &Reduction : Legal->getReductionVars()) { 8035 RecurrenceDescriptor &RedDes = Reduction.second; 8036 const SmallPtrSetImpl<Instruction *> &Casts = RedDes.getCastInsts(); 8037 VecValuesToIgnore.insert(Casts.begin(), Casts.end()); 8038 } 8039 // Ignore type-casting instructions we identified during induction 8040 // detection. 8041 for (auto &Induction : Legal->getInductionVars()) { 8042 InductionDescriptor &IndDes = Induction.second; 8043 const SmallVectorImpl<Instruction *> &Casts = IndDes.getCastInsts(); 8044 VecValuesToIgnore.insert(Casts.begin(), Casts.end()); 8045 } 8046 } 8047 8048 void LoopVectorizationCostModel::collectInLoopReductions() { 8049 for (auto &Reduction : Legal->getReductionVars()) { 8050 PHINode *Phi = Reduction.first; 8051 RecurrenceDescriptor &RdxDesc = Reduction.second; 8052 8053 // We don't collect reductions that are type promoted (yet). 8054 if (RdxDesc.getRecurrenceType() != Phi->getType()) 8055 continue; 8056 8057 // If the target would prefer this reduction to happen "in-loop", then we 8058 // want to record it as such. 8059 unsigned Opcode = RdxDesc.getOpcode(); 8060 if (!PreferInLoopReductions && !useOrderedReductions(RdxDesc) && 8061 !TTI.preferInLoopReduction(Opcode, Phi->getType(), 8062 TargetTransformInfo::ReductionFlags())) 8063 continue; 8064 8065 // Check that we can correctly put the reductions into the loop, by 8066 // finding the chain of operations that leads from the phi to the loop 8067 // exit value. 8068 SmallVector<Instruction *, 4> ReductionOperations = 8069 RdxDesc.getReductionOpChain(Phi, TheLoop); 8070 bool InLoop = !ReductionOperations.empty(); 8071 if (InLoop) { 8072 InLoopReductionChains[Phi] = ReductionOperations; 8073 // Add the elements to InLoopReductionImmediateChains for cost modelling. 8074 Instruction *LastChain = Phi; 8075 for (auto *I : ReductionOperations) { 8076 InLoopReductionImmediateChains[I] = LastChain; 8077 LastChain = I; 8078 } 8079 } 8080 LLVM_DEBUG(dbgs() << "LV: Using " << (InLoop ? "inloop" : "out of loop") 8081 << " reduction for phi: " << *Phi << "\n"); 8082 } 8083 } 8084 8085 // TODO: we could return a pair of values that specify the max VF and 8086 // min VF, to be used in `buildVPlans(MinVF, MaxVF)` instead of 8087 // `buildVPlans(VF, VF)`. We cannot do it because VPLAN at the moment 8088 // doesn't have a cost model that can choose which plan to execute if 8089 // more than one is generated. 8090 static unsigned determineVPlanVF(const unsigned WidestVectorRegBits, 8091 LoopVectorizationCostModel &CM) { 8092 unsigned WidestType; 8093 std::tie(std::ignore, WidestType) = CM.getSmallestAndWidestTypes(); 8094 return WidestVectorRegBits / WidestType; 8095 } 8096 8097 VectorizationFactor 8098 LoopVectorizationPlanner::planInVPlanNativePath(ElementCount UserVF) { 8099 assert(!UserVF.isScalable() && "scalable vectors not yet supported"); 8100 ElementCount VF = UserVF; 8101 // Outer loop handling: They may require CFG and instruction level 8102 // transformations before even evaluating whether vectorization is profitable. 8103 // Since we cannot modify the incoming IR, we need to build VPlan upfront in 8104 // the vectorization pipeline. 8105 if (!OrigLoop->isInnermost()) { 8106 // If the user doesn't provide a vectorization factor, determine a 8107 // reasonable one. 8108 if (UserVF.isZero()) { 8109 VF = ElementCount::getFixed(determineVPlanVF( 8110 TTI->getRegisterBitWidth(TargetTransformInfo::RGK_FixedWidthVector) 8111 .getFixedSize(), 8112 CM)); 8113 LLVM_DEBUG(dbgs() << "LV: VPlan computed VF " << VF << ".\n"); 8114 8115 // Make sure we have a VF > 1 for stress testing. 8116 if (VPlanBuildStressTest && (VF.isScalar() || VF.isZero())) { 8117 LLVM_DEBUG(dbgs() << "LV: VPlan stress testing: " 8118 << "overriding computed VF.\n"); 8119 VF = ElementCount::getFixed(4); 8120 } 8121 } 8122 assert(EnableVPlanNativePath && "VPlan-native path is not enabled."); 8123 assert(isPowerOf2_32(VF.getKnownMinValue()) && 8124 "VF needs to be a power of two"); 8125 LLVM_DEBUG(dbgs() << "LV: Using " << (!UserVF.isZero() ? "user " : "") 8126 << "VF " << VF << " to build VPlans.\n"); 8127 buildVPlans(VF, VF); 8128 8129 // For VPlan build stress testing, we bail out after VPlan construction. 8130 if (VPlanBuildStressTest) 8131 return VectorizationFactor::Disabled(); 8132 8133 return {VF, 0 /*Cost*/}; 8134 } 8135 8136 LLVM_DEBUG( 8137 dbgs() << "LV: Not vectorizing. Inner loops aren't supported in the " 8138 "VPlan-native path.\n"); 8139 return VectorizationFactor::Disabled(); 8140 } 8141 8142 Optional<VectorizationFactor> 8143 LoopVectorizationPlanner::plan(ElementCount UserVF, unsigned UserIC) { 8144 assert(OrigLoop->isInnermost() && "Inner loop expected."); 8145 FixedScalableVFPair MaxFactors = CM.computeMaxVF(UserVF, UserIC); 8146 if (!MaxFactors) // Cases that should not to be vectorized nor interleaved. 8147 return None; 8148 8149 // Invalidate interleave groups if all blocks of loop will be predicated. 8150 if (CM.blockNeedsPredicationForAnyReason(OrigLoop->getHeader()) && 8151 !useMaskedInterleavedAccesses(*TTI)) { 8152 LLVM_DEBUG( 8153 dbgs() 8154 << "LV: Invalidate all interleaved groups due to fold-tail by masking " 8155 "which requires masked-interleaved support.\n"); 8156 if (CM.InterleaveInfo.invalidateGroups()) 8157 // Invalidating interleave groups also requires invalidating all decisions 8158 // based on them, which includes widening decisions and uniform and scalar 8159 // values. 8160 CM.invalidateCostModelingDecisions(); 8161 } 8162 8163 ElementCount MaxUserVF = 8164 UserVF.isScalable() ? MaxFactors.ScalableVF : MaxFactors.FixedVF; 8165 bool UserVFIsLegal = ElementCount::isKnownLE(UserVF, MaxUserVF); 8166 if (!UserVF.isZero() && UserVFIsLegal) { 8167 assert(isPowerOf2_32(UserVF.getKnownMinValue()) && 8168 "VF needs to be a power of two"); 8169 // Collect the instructions (and their associated costs) that will be more 8170 // profitable to scalarize. 8171 if (CM.selectUserVectorizationFactor(UserVF)) { 8172 LLVM_DEBUG(dbgs() << "LV: Using user VF " << UserVF << ".\n"); 8173 CM.collectInLoopReductions(); 8174 buildVPlansWithVPRecipes(UserVF, UserVF); 8175 LLVM_DEBUG(printPlans(dbgs())); 8176 return {{UserVF, 0}}; 8177 } else 8178 reportVectorizationInfo("UserVF ignored because of invalid costs.", 8179 "InvalidCost", ORE, OrigLoop); 8180 } 8181 8182 // Populate the set of Vectorization Factor Candidates. 8183 ElementCountSet VFCandidates; 8184 for (auto VF = ElementCount::getFixed(1); 8185 ElementCount::isKnownLE(VF, MaxFactors.FixedVF); VF *= 2) 8186 VFCandidates.insert(VF); 8187 for (auto VF = ElementCount::getScalable(1); 8188 ElementCount::isKnownLE(VF, MaxFactors.ScalableVF); VF *= 2) 8189 VFCandidates.insert(VF); 8190 8191 for (const auto &VF : VFCandidates) { 8192 // Collect Uniform and Scalar instructions after vectorization with VF. 8193 CM.collectUniformsAndScalars(VF); 8194 8195 // Collect the instructions (and their associated costs) that will be more 8196 // profitable to scalarize. 8197 if (VF.isVector()) 8198 CM.collectInstsToScalarize(VF); 8199 } 8200 8201 CM.collectInLoopReductions(); 8202 buildVPlansWithVPRecipes(ElementCount::getFixed(1), MaxFactors.FixedVF); 8203 buildVPlansWithVPRecipes(ElementCount::getScalable(1), MaxFactors.ScalableVF); 8204 8205 LLVM_DEBUG(printPlans(dbgs())); 8206 if (!MaxFactors.hasVector()) 8207 return VectorizationFactor::Disabled(); 8208 8209 // Select the optimal vectorization factor. 8210 auto SelectedVF = CM.selectVectorizationFactor(VFCandidates); 8211 8212 // Check if it is profitable to vectorize with runtime checks. 8213 unsigned NumRuntimePointerChecks = Requirements.getNumRuntimePointerChecks(); 8214 if (SelectedVF.Width.getKnownMinValue() > 1 && NumRuntimePointerChecks) { 8215 bool PragmaThresholdReached = 8216 NumRuntimePointerChecks > PragmaVectorizeMemoryCheckThreshold; 8217 bool ThresholdReached = 8218 NumRuntimePointerChecks > VectorizerParams::RuntimeMemoryCheckThreshold; 8219 if ((ThresholdReached && !Hints.allowReordering()) || 8220 PragmaThresholdReached) { 8221 ORE->emit([&]() { 8222 return OptimizationRemarkAnalysisAliasing( 8223 DEBUG_TYPE, "CantReorderMemOps", OrigLoop->getStartLoc(), 8224 OrigLoop->getHeader()) 8225 << "loop not vectorized: cannot prove it is safe to reorder " 8226 "memory operations"; 8227 }); 8228 LLVM_DEBUG(dbgs() << "LV: Too many memory checks needed.\n"); 8229 Hints.emitRemarkWithHints(); 8230 return VectorizationFactor::Disabled(); 8231 } 8232 } 8233 return SelectedVF; 8234 } 8235 8236 VPlan &LoopVectorizationPlanner::getBestPlanFor(ElementCount VF) const { 8237 assert(count_if(VPlans, 8238 [VF](const VPlanPtr &Plan) { return Plan->hasVF(VF); }) == 8239 1 && 8240 "Best VF has not a single VPlan."); 8241 8242 for (const VPlanPtr &Plan : VPlans) { 8243 if (Plan->hasVF(VF)) 8244 return *Plan.get(); 8245 } 8246 llvm_unreachable("No plan found!"); 8247 } 8248 8249 void LoopVectorizationPlanner::executePlan(ElementCount BestVF, unsigned BestUF, 8250 VPlan &BestVPlan, 8251 InnerLoopVectorizer &ILV, 8252 DominatorTree *DT) { 8253 LLVM_DEBUG(dbgs() << "Executing best plan with VF=" << BestVF << ", UF=" << BestUF 8254 << '\n'); 8255 8256 // Perform the actual loop transformation. 8257 8258 // 1. Create a new empty loop. Unlink the old loop and connect the new one. 8259 VPTransformState State{BestVF, BestUF, LI, DT, ILV.Builder, &ILV, &BestVPlan}; 8260 State.CFG.PrevBB = ILV.createVectorizedLoopSkeleton(); 8261 State.TripCount = ILV.getOrCreateTripCount(nullptr); 8262 State.CanonicalIV = ILV.Induction; 8263 8264 ILV.printDebugTracesAtStart(); 8265 8266 //===------------------------------------------------===// 8267 // 8268 // Notice: any optimization or new instruction that go 8269 // into the code below should also be implemented in 8270 // the cost-model. 8271 // 8272 //===------------------------------------------------===// 8273 8274 // 2. Copy and widen instructions from the old loop into the new loop. 8275 BestVPlan.execute(&State); 8276 8277 // 3. Fix the vectorized code: take care of header phi's, live-outs, 8278 // predication, updating analyses. 8279 ILV.fixVectorizedLoop(State); 8280 8281 ILV.printDebugTracesAtEnd(); 8282 } 8283 8284 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 8285 void LoopVectorizationPlanner::printPlans(raw_ostream &O) { 8286 for (const auto &Plan : VPlans) 8287 if (PrintVPlansInDotFormat) 8288 Plan->printDOT(O); 8289 else 8290 Plan->print(O); 8291 } 8292 #endif 8293 8294 void LoopVectorizationPlanner::collectTriviallyDeadInstructions( 8295 SmallPtrSetImpl<Instruction *> &DeadInstructions) { 8296 8297 // We create new control-flow for the vectorized loop, so the original exit 8298 // conditions will be dead after vectorization if it's only used by the 8299 // terminator 8300 SmallVector<BasicBlock*> ExitingBlocks; 8301 OrigLoop->getExitingBlocks(ExitingBlocks); 8302 for (auto *BB : ExitingBlocks) { 8303 auto *Cmp = dyn_cast<Instruction>(BB->getTerminator()->getOperand(0)); 8304 if (!Cmp || !Cmp->hasOneUse()) 8305 continue; 8306 8307 // TODO: we should introduce a getUniqueExitingBlocks on Loop 8308 if (!DeadInstructions.insert(Cmp).second) 8309 continue; 8310 8311 // The operands of the icmp is often a dead trunc, used by IndUpdate. 8312 // TODO: can recurse through operands in general 8313 for (Value *Op : Cmp->operands()) { 8314 if (isa<TruncInst>(Op) && Op->hasOneUse()) 8315 DeadInstructions.insert(cast<Instruction>(Op)); 8316 } 8317 } 8318 8319 // We create new "steps" for induction variable updates to which the original 8320 // induction variables map. An original update instruction will be dead if 8321 // all its users except the induction variable are dead. 8322 auto *Latch = OrigLoop->getLoopLatch(); 8323 for (auto &Induction : Legal->getInductionVars()) { 8324 PHINode *Ind = Induction.first; 8325 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 8326 8327 // If the tail is to be folded by masking, the primary induction variable, 8328 // if exists, isn't dead: it will be used for masking. Don't kill it. 8329 if (CM.foldTailByMasking() && IndUpdate == Legal->getPrimaryInduction()) 8330 continue; 8331 8332 if (llvm::all_of(IndUpdate->users(), [&](User *U) -> bool { 8333 return U == Ind || DeadInstructions.count(cast<Instruction>(U)); 8334 })) 8335 DeadInstructions.insert(IndUpdate); 8336 8337 // We record as "Dead" also the type-casting instructions we had identified 8338 // during induction analysis. We don't need any handling for them in the 8339 // vectorized loop because we have proven that, under a proper runtime 8340 // test guarding the vectorized loop, the value of the phi, and the casted 8341 // value of the phi, are the same. The last instruction in this casting chain 8342 // will get its scalar/vector/widened def from the scalar/vector/widened def 8343 // of the respective phi node. Any other casts in the induction def-use chain 8344 // have no other uses outside the phi update chain, and will be ignored. 8345 InductionDescriptor &IndDes = Induction.second; 8346 const SmallVectorImpl<Instruction *> &Casts = IndDes.getCastInsts(); 8347 DeadInstructions.insert(Casts.begin(), Casts.end()); 8348 } 8349 } 8350 8351 Value *InnerLoopUnroller::reverseVector(Value *Vec) { return Vec; } 8352 8353 Value *InnerLoopUnroller::getBroadcastInstrs(Value *V) { return V; } 8354 8355 Value *InnerLoopUnroller::getStepVector(Value *Val, Value *StartIdx, 8356 Value *Step, 8357 Instruction::BinaryOps BinOp) { 8358 // When unrolling and the VF is 1, we only need to add a simple scalar. 8359 Type *Ty = Val->getType(); 8360 assert(!Ty->isVectorTy() && "Val must be a scalar"); 8361 8362 if (Ty->isFloatingPointTy()) { 8363 // Floating-point operations inherit FMF via the builder's flags. 8364 Value *MulOp = Builder.CreateFMul(StartIdx, Step); 8365 return Builder.CreateBinOp(BinOp, Val, MulOp); 8366 } 8367 return Builder.CreateAdd(Val, Builder.CreateMul(StartIdx, Step), "induction"); 8368 } 8369 8370 static void AddRuntimeUnrollDisableMetaData(Loop *L) { 8371 SmallVector<Metadata *, 4> MDs; 8372 // Reserve first location for self reference to the LoopID metadata node. 8373 MDs.push_back(nullptr); 8374 bool IsUnrollMetadata = false; 8375 MDNode *LoopID = L->getLoopID(); 8376 if (LoopID) { 8377 // First find existing loop unrolling disable metadata. 8378 for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) { 8379 auto *MD = dyn_cast<MDNode>(LoopID->getOperand(i)); 8380 if (MD) { 8381 const auto *S = dyn_cast<MDString>(MD->getOperand(0)); 8382 IsUnrollMetadata = 8383 S && S->getString().startswith("llvm.loop.unroll.disable"); 8384 } 8385 MDs.push_back(LoopID->getOperand(i)); 8386 } 8387 } 8388 8389 if (!IsUnrollMetadata) { 8390 // Add runtime unroll disable metadata. 8391 LLVMContext &Context = L->getHeader()->getContext(); 8392 SmallVector<Metadata *, 1> DisableOperands; 8393 DisableOperands.push_back( 8394 MDString::get(Context, "llvm.loop.unroll.runtime.disable")); 8395 MDNode *DisableNode = MDNode::get(Context, DisableOperands); 8396 MDs.push_back(DisableNode); 8397 MDNode *NewLoopID = MDNode::get(Context, MDs); 8398 // Set operand 0 to refer to the loop id itself. 8399 NewLoopID->replaceOperandWith(0, NewLoopID); 8400 L->setLoopID(NewLoopID); 8401 } 8402 } 8403 8404 //===--------------------------------------------------------------------===// 8405 // EpilogueVectorizerMainLoop 8406 //===--------------------------------------------------------------------===// 8407 8408 /// This function is partially responsible for generating the control flow 8409 /// depicted in https://llvm.org/docs/Vectorizers.html#epilogue-vectorization. 8410 BasicBlock *EpilogueVectorizerMainLoop::createEpilogueVectorizedLoopSkeleton() { 8411 MDNode *OrigLoopID = OrigLoop->getLoopID(); 8412 Loop *Lp = createVectorLoopSkeleton(""); 8413 8414 // Generate the code to check the minimum iteration count of the vector 8415 // epilogue (see below). 8416 EPI.EpilogueIterationCountCheck = 8417 emitMinimumIterationCountCheck(Lp, LoopScalarPreHeader, true); 8418 EPI.EpilogueIterationCountCheck->setName("iter.check"); 8419 8420 // Generate the code to check any assumptions that we've made for SCEV 8421 // expressions. 8422 EPI.SCEVSafetyCheck = emitSCEVChecks(Lp, LoopScalarPreHeader); 8423 8424 // Generate the code that checks at runtime if arrays overlap. We put the 8425 // checks into a separate block to make the more common case of few elements 8426 // faster. 8427 EPI.MemSafetyCheck = emitMemRuntimeChecks(Lp, LoopScalarPreHeader); 8428 8429 // Generate the iteration count check for the main loop, *after* the check 8430 // for the epilogue loop, so that the path-length is shorter for the case 8431 // that goes directly through the vector epilogue. The longer-path length for 8432 // the main loop is compensated for, by the gain from vectorizing the larger 8433 // trip count. Note: the branch will get updated later on when we vectorize 8434 // the epilogue. 8435 EPI.MainLoopIterationCountCheck = 8436 emitMinimumIterationCountCheck(Lp, LoopScalarPreHeader, false); 8437 8438 // Generate the induction variable. 8439 OldInduction = Legal->getPrimaryInduction(); 8440 Type *IdxTy = Legal->getWidestInductionType(); 8441 Value *StartIdx = ConstantInt::get(IdxTy, 0); 8442 8443 IRBuilder<> B(&*Lp->getLoopPreheader()->getFirstInsertionPt()); 8444 Value *Step = getRuntimeVF(B, IdxTy, VF * UF); 8445 Value *CountRoundDown = getOrCreateVectorTripCount(Lp); 8446 EPI.VectorTripCount = CountRoundDown; 8447 Induction = 8448 createInductionVariable(Lp, StartIdx, CountRoundDown, Step, 8449 getDebugLocFromInstOrOperands(OldInduction)); 8450 8451 // Skip induction resume value creation here because they will be created in 8452 // the second pass. If we created them here, they wouldn't be used anyway, 8453 // because the vplan in the second pass still contains the inductions from the 8454 // original loop. 8455 8456 return completeLoopSkeleton(Lp, OrigLoopID); 8457 } 8458 8459 void EpilogueVectorizerMainLoop::printDebugTracesAtStart() { 8460 LLVM_DEBUG({ 8461 dbgs() << "Create Skeleton for epilogue vectorized loop (first pass)\n" 8462 << "Main Loop VF:" << EPI.MainLoopVF 8463 << ", Main Loop UF:" << EPI.MainLoopUF 8464 << ", Epilogue Loop VF:" << EPI.EpilogueVF 8465 << ", Epilogue Loop UF:" << EPI.EpilogueUF << "\n"; 8466 }); 8467 } 8468 8469 void EpilogueVectorizerMainLoop::printDebugTracesAtEnd() { 8470 DEBUG_WITH_TYPE(VerboseDebug, { 8471 dbgs() << "intermediate fn:\n" << *Induction->getFunction() << "\n"; 8472 }); 8473 } 8474 8475 BasicBlock *EpilogueVectorizerMainLoop::emitMinimumIterationCountCheck( 8476 Loop *L, BasicBlock *Bypass, bool ForEpilogue) { 8477 assert(L && "Expected valid Loop."); 8478 assert(Bypass && "Expected valid bypass basic block."); 8479 ElementCount VFactor = ForEpilogue ? EPI.EpilogueVF : VF; 8480 unsigned UFactor = ForEpilogue ? EPI.EpilogueUF : UF; 8481 Value *Count = getOrCreateTripCount(L); 8482 // Reuse existing vector loop preheader for TC checks. 8483 // Note that new preheader block is generated for vector loop. 8484 BasicBlock *const TCCheckBlock = LoopVectorPreHeader; 8485 IRBuilder<> Builder(TCCheckBlock->getTerminator()); 8486 8487 // Generate code to check if the loop's trip count is less than VF * UF of the 8488 // main vector loop. 8489 auto P = Cost->requiresScalarEpilogue(ForEpilogue ? EPI.EpilogueVF : VF) ? 8490 ICmpInst::ICMP_ULE : ICmpInst::ICMP_ULT; 8491 8492 Value *CheckMinIters = Builder.CreateICmp( 8493 P, Count, createStepForVF(Builder, Count->getType(), VFactor, UFactor), 8494 "min.iters.check"); 8495 8496 if (!ForEpilogue) 8497 TCCheckBlock->setName("vector.main.loop.iter.check"); 8498 8499 // Create new preheader for vector loop. 8500 LoopVectorPreHeader = SplitBlock(TCCheckBlock, TCCheckBlock->getTerminator(), 8501 DT, LI, nullptr, "vector.ph"); 8502 8503 if (ForEpilogue) { 8504 assert(DT->properlyDominates(DT->getNode(TCCheckBlock), 8505 DT->getNode(Bypass)->getIDom()) && 8506 "TC check is expected to dominate Bypass"); 8507 8508 // Update dominator for Bypass & LoopExit. 8509 DT->changeImmediateDominator(Bypass, TCCheckBlock); 8510 if (!Cost->requiresScalarEpilogue(EPI.EpilogueVF)) 8511 // For loops with multiple exits, there's no edge from the middle block 8512 // to exit blocks (as the epilogue must run) and thus no need to update 8513 // the immediate dominator of the exit blocks. 8514 DT->changeImmediateDominator(LoopExitBlock, TCCheckBlock); 8515 8516 LoopBypassBlocks.push_back(TCCheckBlock); 8517 8518 // Save the trip count so we don't have to regenerate it in the 8519 // vec.epilog.iter.check. This is safe to do because the trip count 8520 // generated here dominates the vector epilog iter check. 8521 EPI.TripCount = Count; 8522 } 8523 8524 ReplaceInstWithInst( 8525 TCCheckBlock->getTerminator(), 8526 BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters)); 8527 8528 return TCCheckBlock; 8529 } 8530 8531 //===--------------------------------------------------------------------===// 8532 // EpilogueVectorizerEpilogueLoop 8533 //===--------------------------------------------------------------------===// 8534 8535 /// This function is partially responsible for generating the control flow 8536 /// depicted in https://llvm.org/docs/Vectorizers.html#epilogue-vectorization. 8537 BasicBlock * 8538 EpilogueVectorizerEpilogueLoop::createEpilogueVectorizedLoopSkeleton() { 8539 MDNode *OrigLoopID = OrigLoop->getLoopID(); 8540 Loop *Lp = createVectorLoopSkeleton("vec.epilog."); 8541 8542 // Now, compare the remaining count and if there aren't enough iterations to 8543 // execute the vectorized epilogue skip to the scalar part. 8544 BasicBlock *VecEpilogueIterationCountCheck = LoopVectorPreHeader; 8545 VecEpilogueIterationCountCheck->setName("vec.epilog.iter.check"); 8546 LoopVectorPreHeader = 8547 SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT, 8548 LI, nullptr, "vec.epilog.ph"); 8549 emitMinimumVectorEpilogueIterCountCheck(Lp, LoopScalarPreHeader, 8550 VecEpilogueIterationCountCheck); 8551 8552 // Adjust the control flow taking the state info from the main loop 8553 // vectorization into account. 8554 assert(EPI.MainLoopIterationCountCheck && EPI.EpilogueIterationCountCheck && 8555 "expected this to be saved from the previous pass."); 8556 EPI.MainLoopIterationCountCheck->getTerminator()->replaceUsesOfWith( 8557 VecEpilogueIterationCountCheck, LoopVectorPreHeader); 8558 8559 DT->changeImmediateDominator(LoopVectorPreHeader, 8560 EPI.MainLoopIterationCountCheck); 8561 8562 EPI.EpilogueIterationCountCheck->getTerminator()->replaceUsesOfWith( 8563 VecEpilogueIterationCountCheck, LoopScalarPreHeader); 8564 8565 if (EPI.SCEVSafetyCheck) 8566 EPI.SCEVSafetyCheck->getTerminator()->replaceUsesOfWith( 8567 VecEpilogueIterationCountCheck, LoopScalarPreHeader); 8568 if (EPI.MemSafetyCheck) 8569 EPI.MemSafetyCheck->getTerminator()->replaceUsesOfWith( 8570 VecEpilogueIterationCountCheck, LoopScalarPreHeader); 8571 8572 DT->changeImmediateDominator( 8573 VecEpilogueIterationCountCheck, 8574 VecEpilogueIterationCountCheck->getSinglePredecessor()); 8575 8576 DT->changeImmediateDominator(LoopScalarPreHeader, 8577 EPI.EpilogueIterationCountCheck); 8578 if (!Cost->requiresScalarEpilogue(EPI.EpilogueVF)) 8579 // If there is an epilogue which must run, there's no edge from the 8580 // middle block to exit blocks and thus no need to update the immediate 8581 // dominator of the exit blocks. 8582 DT->changeImmediateDominator(LoopExitBlock, 8583 EPI.EpilogueIterationCountCheck); 8584 8585 // Keep track of bypass blocks, as they feed start values to the induction 8586 // phis in the scalar loop preheader. 8587 if (EPI.SCEVSafetyCheck) 8588 LoopBypassBlocks.push_back(EPI.SCEVSafetyCheck); 8589 if (EPI.MemSafetyCheck) 8590 LoopBypassBlocks.push_back(EPI.MemSafetyCheck); 8591 LoopBypassBlocks.push_back(EPI.EpilogueIterationCountCheck); 8592 8593 // Generate a resume induction for the vector epilogue and put it in the 8594 // vector epilogue preheader 8595 Type *IdxTy = Legal->getWidestInductionType(); 8596 PHINode *EPResumeVal = PHINode::Create(IdxTy, 2, "vec.epilog.resume.val", 8597 LoopVectorPreHeader->getFirstNonPHI()); 8598 EPResumeVal->addIncoming(EPI.VectorTripCount, VecEpilogueIterationCountCheck); 8599 EPResumeVal->addIncoming(ConstantInt::get(IdxTy, 0), 8600 EPI.MainLoopIterationCountCheck); 8601 8602 // Generate the induction variable. 8603 OldInduction = Legal->getPrimaryInduction(); 8604 Value *CountRoundDown = getOrCreateVectorTripCount(Lp); 8605 Constant *Step = ConstantInt::get(IdxTy, VF.getKnownMinValue() * UF); 8606 Value *StartIdx = EPResumeVal; 8607 Induction = 8608 createInductionVariable(Lp, StartIdx, CountRoundDown, Step, 8609 getDebugLocFromInstOrOperands(OldInduction)); 8610 8611 // Generate induction resume values. These variables save the new starting 8612 // indexes for the scalar loop. They are used to test if there are any tail 8613 // iterations left once the vector loop has completed. 8614 // Note that when the vectorized epilogue is skipped due to iteration count 8615 // check, then the resume value for the induction variable comes from 8616 // the trip count of the main vector loop, hence passing the AdditionalBypass 8617 // argument. 8618 createInductionResumeValues(Lp, CountRoundDown, 8619 {VecEpilogueIterationCountCheck, 8620 EPI.VectorTripCount} /* AdditionalBypass */); 8621 8622 AddRuntimeUnrollDisableMetaData(Lp); 8623 return completeLoopSkeleton(Lp, OrigLoopID); 8624 } 8625 8626 BasicBlock * 8627 EpilogueVectorizerEpilogueLoop::emitMinimumVectorEpilogueIterCountCheck( 8628 Loop *L, BasicBlock *Bypass, BasicBlock *Insert) { 8629 8630 assert(EPI.TripCount && 8631 "Expected trip count to have been safed in the first pass."); 8632 assert( 8633 (!isa<Instruction>(EPI.TripCount) || 8634 DT->dominates(cast<Instruction>(EPI.TripCount)->getParent(), Insert)) && 8635 "saved trip count does not dominate insertion point."); 8636 Value *TC = EPI.TripCount; 8637 IRBuilder<> Builder(Insert->getTerminator()); 8638 Value *Count = Builder.CreateSub(TC, EPI.VectorTripCount, "n.vec.remaining"); 8639 8640 // Generate code to check if the loop's trip count is less than VF * UF of the 8641 // vector epilogue loop. 8642 auto P = Cost->requiresScalarEpilogue(EPI.EpilogueVF) ? 8643 ICmpInst::ICMP_ULE : ICmpInst::ICMP_ULT; 8644 8645 Value *CheckMinIters = 8646 Builder.CreateICmp(P, Count, 8647 createStepForVF(Builder, Count->getType(), 8648 EPI.EpilogueVF, EPI.EpilogueUF), 8649 "min.epilog.iters.check"); 8650 8651 ReplaceInstWithInst( 8652 Insert->getTerminator(), 8653 BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters)); 8654 8655 LoopBypassBlocks.push_back(Insert); 8656 return Insert; 8657 } 8658 8659 void EpilogueVectorizerEpilogueLoop::printDebugTracesAtStart() { 8660 LLVM_DEBUG({ 8661 dbgs() << "Create Skeleton for epilogue vectorized loop (second pass)\n" 8662 << "Epilogue Loop VF:" << EPI.EpilogueVF 8663 << ", Epilogue Loop UF:" << EPI.EpilogueUF << "\n"; 8664 }); 8665 } 8666 8667 void EpilogueVectorizerEpilogueLoop::printDebugTracesAtEnd() { 8668 DEBUG_WITH_TYPE(VerboseDebug, { 8669 dbgs() << "final fn:\n" << *Induction->getFunction() << "\n"; 8670 }); 8671 } 8672 8673 bool LoopVectorizationPlanner::getDecisionAndClampRange( 8674 const std::function<bool(ElementCount)> &Predicate, VFRange &Range) { 8675 assert(!Range.isEmpty() && "Trying to test an empty VF range."); 8676 bool PredicateAtRangeStart = Predicate(Range.Start); 8677 8678 for (ElementCount TmpVF = Range.Start * 2; 8679 ElementCount::isKnownLT(TmpVF, Range.End); TmpVF *= 2) 8680 if (Predicate(TmpVF) != PredicateAtRangeStart) { 8681 Range.End = TmpVF; 8682 break; 8683 } 8684 8685 return PredicateAtRangeStart; 8686 } 8687 8688 /// Build VPlans for the full range of feasible VF's = {\p MinVF, 2 * \p MinVF, 8689 /// 4 * \p MinVF, ..., \p MaxVF} by repeatedly building a VPlan for a sub-range 8690 /// of VF's starting at a given VF and extending it as much as possible. Each 8691 /// vectorization decision can potentially shorten this sub-range during 8692 /// buildVPlan(). 8693 void LoopVectorizationPlanner::buildVPlans(ElementCount MinVF, 8694 ElementCount MaxVF) { 8695 auto MaxVFPlusOne = MaxVF.getWithIncrement(1); 8696 for (ElementCount VF = MinVF; ElementCount::isKnownLT(VF, MaxVFPlusOne);) { 8697 VFRange SubRange = {VF, MaxVFPlusOne}; 8698 VPlans.push_back(buildVPlan(SubRange)); 8699 VF = SubRange.End; 8700 } 8701 } 8702 8703 VPValue *VPRecipeBuilder::createEdgeMask(BasicBlock *Src, BasicBlock *Dst, 8704 VPlanPtr &Plan) { 8705 assert(is_contained(predecessors(Dst), Src) && "Invalid edge"); 8706 8707 // Look for cached value. 8708 std::pair<BasicBlock *, BasicBlock *> Edge(Src, Dst); 8709 EdgeMaskCacheTy::iterator ECEntryIt = EdgeMaskCache.find(Edge); 8710 if (ECEntryIt != EdgeMaskCache.end()) 8711 return ECEntryIt->second; 8712 8713 VPValue *SrcMask = createBlockInMask(Src, Plan); 8714 8715 // The terminator has to be a branch inst! 8716 BranchInst *BI = dyn_cast<BranchInst>(Src->getTerminator()); 8717 assert(BI && "Unexpected terminator found"); 8718 8719 if (!BI->isConditional() || BI->getSuccessor(0) == BI->getSuccessor(1)) 8720 return EdgeMaskCache[Edge] = SrcMask; 8721 8722 // If source is an exiting block, we know the exit edge is dynamically dead 8723 // in the vector loop, and thus we don't need to restrict the mask. Avoid 8724 // adding uses of an otherwise potentially dead instruction. 8725 if (OrigLoop->isLoopExiting(Src)) 8726 return EdgeMaskCache[Edge] = SrcMask; 8727 8728 VPValue *EdgeMask = Plan->getOrAddVPValue(BI->getCondition()); 8729 assert(EdgeMask && "No Edge Mask found for condition"); 8730 8731 if (BI->getSuccessor(0) != Dst) 8732 EdgeMask = Builder.createNot(EdgeMask); 8733 8734 if (SrcMask) { // Otherwise block in-mask is all-one, no need to AND. 8735 // The condition is 'SrcMask && EdgeMask', which is equivalent to 8736 // 'select i1 SrcMask, i1 EdgeMask, i1 false'. 8737 // The select version does not introduce new UB if SrcMask is false and 8738 // EdgeMask is poison. Using 'and' here introduces undefined behavior. 8739 VPValue *False = Plan->getOrAddVPValue( 8740 ConstantInt::getFalse(BI->getCondition()->getType())); 8741 EdgeMask = Builder.createSelect(SrcMask, EdgeMask, False); 8742 } 8743 8744 return EdgeMaskCache[Edge] = EdgeMask; 8745 } 8746 8747 VPValue *VPRecipeBuilder::createBlockInMask(BasicBlock *BB, VPlanPtr &Plan) { 8748 assert(OrigLoop->contains(BB) && "Block is not a part of a loop"); 8749 8750 // Look for cached value. 8751 BlockMaskCacheTy::iterator BCEntryIt = BlockMaskCache.find(BB); 8752 if (BCEntryIt != BlockMaskCache.end()) 8753 return BCEntryIt->second; 8754 8755 // All-one mask is modelled as no-mask following the convention for masked 8756 // load/store/gather/scatter. Initialize BlockMask to no-mask. 8757 VPValue *BlockMask = nullptr; 8758 8759 if (OrigLoop->getHeader() == BB) { 8760 if (!CM.blockNeedsPredicationForAnyReason(BB)) 8761 return BlockMaskCache[BB] = BlockMask; // Loop incoming mask is all-one. 8762 8763 // Create the block in mask as the first non-phi instruction in the block. 8764 VPBuilder::InsertPointGuard Guard(Builder); 8765 auto NewInsertionPoint = Builder.getInsertBlock()->getFirstNonPhi(); 8766 Builder.setInsertPoint(Builder.getInsertBlock(), NewInsertionPoint); 8767 8768 // Introduce the early-exit compare IV <= BTC to form header block mask. 8769 // This is used instead of IV < TC because TC may wrap, unlike BTC. 8770 // Start by constructing the desired canonical IV. 8771 VPValue *IV = nullptr; 8772 if (Legal->getPrimaryInduction()) 8773 IV = Plan->getOrAddVPValue(Legal->getPrimaryInduction()); 8774 else { 8775 auto *IVRecipe = new VPWidenCanonicalIVRecipe(); 8776 Builder.getInsertBlock()->insert(IVRecipe, NewInsertionPoint); 8777 IV = IVRecipe; 8778 } 8779 VPValue *BTC = Plan->getOrCreateBackedgeTakenCount(); 8780 bool TailFolded = !CM.isScalarEpilogueAllowed(); 8781 8782 if (TailFolded && CM.TTI.emitGetActiveLaneMask()) { 8783 // While ActiveLaneMask is a binary op that consumes the loop tripcount 8784 // as a second argument, we only pass the IV here and extract the 8785 // tripcount from the transform state where codegen of the VP instructions 8786 // happen. 8787 BlockMask = Builder.createNaryOp(VPInstruction::ActiveLaneMask, {IV}); 8788 } else { 8789 BlockMask = Builder.createNaryOp(VPInstruction::ICmpULE, {IV, BTC}); 8790 } 8791 return BlockMaskCache[BB] = BlockMask; 8792 } 8793 8794 // This is the block mask. We OR all incoming edges. 8795 for (auto *Predecessor : predecessors(BB)) { 8796 VPValue *EdgeMask = createEdgeMask(Predecessor, BB, Plan); 8797 if (!EdgeMask) // Mask of predecessor is all-one so mask of block is too. 8798 return BlockMaskCache[BB] = EdgeMask; 8799 8800 if (!BlockMask) { // BlockMask has its initialized nullptr value. 8801 BlockMask = EdgeMask; 8802 continue; 8803 } 8804 8805 BlockMask = Builder.createOr(BlockMask, EdgeMask); 8806 } 8807 8808 return BlockMaskCache[BB] = BlockMask; 8809 } 8810 8811 VPRecipeBase *VPRecipeBuilder::tryToWidenMemory(Instruction *I, 8812 ArrayRef<VPValue *> Operands, 8813 VFRange &Range, 8814 VPlanPtr &Plan) { 8815 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) && 8816 "Must be called with either a load or store"); 8817 8818 auto willWiden = [&](ElementCount VF) -> bool { 8819 if (VF.isScalar()) 8820 return false; 8821 LoopVectorizationCostModel::InstWidening Decision = 8822 CM.getWideningDecision(I, VF); 8823 assert(Decision != LoopVectorizationCostModel::CM_Unknown && 8824 "CM decision should be taken at this point."); 8825 if (Decision == LoopVectorizationCostModel::CM_Interleave) 8826 return true; 8827 if (CM.isScalarAfterVectorization(I, VF) || 8828 CM.isProfitableToScalarize(I, VF)) 8829 return false; 8830 return Decision != LoopVectorizationCostModel::CM_Scalarize; 8831 }; 8832 8833 if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range)) 8834 return nullptr; 8835 8836 VPValue *Mask = nullptr; 8837 if (Legal->isMaskRequired(I)) 8838 Mask = createBlockInMask(I->getParent(), Plan); 8839 8840 // Determine if the pointer operand of the access is either consecutive or 8841 // reverse consecutive. 8842 LoopVectorizationCostModel::InstWidening Decision = 8843 CM.getWideningDecision(I, Range.Start); 8844 bool Reverse = Decision == LoopVectorizationCostModel::CM_Widen_Reverse; 8845 bool Consecutive = 8846 Reverse || Decision == LoopVectorizationCostModel::CM_Widen; 8847 8848 if (LoadInst *Load = dyn_cast<LoadInst>(I)) 8849 return new VPWidenMemoryInstructionRecipe(*Load, Operands[0], Mask, 8850 Consecutive, Reverse); 8851 8852 StoreInst *Store = cast<StoreInst>(I); 8853 return new VPWidenMemoryInstructionRecipe(*Store, Operands[1], Operands[0], 8854 Mask, Consecutive, Reverse); 8855 } 8856 8857 VPWidenIntOrFpInductionRecipe * 8858 VPRecipeBuilder::tryToOptimizeInductionPHI(PHINode *Phi, 8859 ArrayRef<VPValue *> Operands) const { 8860 // Check if this is an integer or fp induction. If so, build the recipe that 8861 // produces its scalar and vector values. 8862 InductionDescriptor II = Legal->getInductionVars().lookup(Phi); 8863 if (II.getKind() == InductionDescriptor::IK_IntInduction || 8864 II.getKind() == InductionDescriptor::IK_FpInduction) { 8865 assert(II.getStartValue() == 8866 Phi->getIncomingValueForBlock(OrigLoop->getLoopPreheader())); 8867 const SmallVectorImpl<Instruction *> &Casts = II.getCastInsts(); 8868 return new VPWidenIntOrFpInductionRecipe( 8869 Phi, Operands[0], Casts.empty() ? nullptr : Casts.front()); 8870 } 8871 8872 return nullptr; 8873 } 8874 8875 VPWidenIntOrFpInductionRecipe *VPRecipeBuilder::tryToOptimizeInductionTruncate( 8876 TruncInst *I, ArrayRef<VPValue *> Operands, VFRange &Range, 8877 VPlan &Plan) const { 8878 // Optimize the special case where the source is a constant integer 8879 // induction variable. Notice that we can only optimize the 'trunc' case 8880 // because (a) FP conversions lose precision, (b) sext/zext may wrap, and 8881 // (c) other casts depend on pointer size. 8882 8883 // Determine whether \p K is a truncation based on an induction variable that 8884 // can be optimized. 8885 auto isOptimizableIVTruncate = 8886 [&](Instruction *K) -> std::function<bool(ElementCount)> { 8887 return [=](ElementCount VF) -> bool { 8888 return CM.isOptimizableIVTruncate(K, VF); 8889 }; 8890 }; 8891 8892 if (LoopVectorizationPlanner::getDecisionAndClampRange( 8893 isOptimizableIVTruncate(I), Range)) { 8894 8895 InductionDescriptor II = 8896 Legal->getInductionVars().lookup(cast<PHINode>(I->getOperand(0))); 8897 VPValue *Start = Plan.getOrAddVPValue(II.getStartValue()); 8898 return new VPWidenIntOrFpInductionRecipe(cast<PHINode>(I->getOperand(0)), 8899 Start, nullptr, I); 8900 } 8901 return nullptr; 8902 } 8903 8904 VPRecipeOrVPValueTy VPRecipeBuilder::tryToBlend(PHINode *Phi, 8905 ArrayRef<VPValue *> Operands, 8906 VPlanPtr &Plan) { 8907 // If all incoming values are equal, the incoming VPValue can be used directly 8908 // instead of creating a new VPBlendRecipe. 8909 VPValue *FirstIncoming = Operands[0]; 8910 if (all_of(Operands, [FirstIncoming](const VPValue *Inc) { 8911 return FirstIncoming == Inc; 8912 })) { 8913 return Operands[0]; 8914 } 8915 8916 // We know that all PHIs in non-header blocks are converted into selects, so 8917 // we don't have to worry about the insertion order and we can just use the 8918 // builder. At this point we generate the predication tree. There may be 8919 // duplications since this is a simple recursive scan, but future 8920 // optimizations will clean it up. 8921 SmallVector<VPValue *, 2> OperandsWithMask; 8922 unsigned NumIncoming = Phi->getNumIncomingValues(); 8923 8924 for (unsigned In = 0; In < NumIncoming; In++) { 8925 VPValue *EdgeMask = 8926 createEdgeMask(Phi->getIncomingBlock(In), Phi->getParent(), Plan); 8927 assert((EdgeMask || NumIncoming == 1) && 8928 "Multiple predecessors with one having a full mask"); 8929 OperandsWithMask.push_back(Operands[In]); 8930 if (EdgeMask) 8931 OperandsWithMask.push_back(EdgeMask); 8932 } 8933 return toVPRecipeResult(new VPBlendRecipe(Phi, OperandsWithMask)); 8934 } 8935 8936 VPWidenCallRecipe *VPRecipeBuilder::tryToWidenCall(CallInst *CI, 8937 ArrayRef<VPValue *> Operands, 8938 VFRange &Range) const { 8939 8940 bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange( 8941 [this, CI](ElementCount VF) { return CM.isScalarWithPredication(CI); }, 8942 Range); 8943 8944 if (IsPredicated) 8945 return nullptr; 8946 8947 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 8948 if (ID && (ID == Intrinsic::assume || ID == Intrinsic::lifetime_end || 8949 ID == Intrinsic::lifetime_start || ID == Intrinsic::sideeffect || 8950 ID == Intrinsic::pseudoprobe || 8951 ID == Intrinsic::experimental_noalias_scope_decl)) 8952 return nullptr; 8953 8954 auto willWiden = [&](ElementCount VF) -> bool { 8955 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 8956 // The following case may be scalarized depending on the VF. 8957 // The flag shows whether we use Intrinsic or a usual Call for vectorized 8958 // version of the instruction. 8959 // Is it beneficial to perform intrinsic call compared to lib call? 8960 bool NeedToScalarize = false; 8961 InstructionCost CallCost = CM.getVectorCallCost(CI, VF, NeedToScalarize); 8962 InstructionCost IntrinsicCost = ID ? CM.getVectorIntrinsicCost(CI, VF) : 0; 8963 bool UseVectorIntrinsic = ID && IntrinsicCost <= CallCost; 8964 return UseVectorIntrinsic || !NeedToScalarize; 8965 }; 8966 8967 if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range)) 8968 return nullptr; 8969 8970 ArrayRef<VPValue *> Ops = Operands.take_front(CI->arg_size()); 8971 return new VPWidenCallRecipe(*CI, make_range(Ops.begin(), Ops.end())); 8972 } 8973 8974 bool VPRecipeBuilder::shouldWiden(Instruction *I, VFRange &Range) const { 8975 assert(!isa<BranchInst>(I) && !isa<PHINode>(I) && !isa<LoadInst>(I) && 8976 !isa<StoreInst>(I) && "Instruction should have been handled earlier"); 8977 // Instruction should be widened, unless it is scalar after vectorization, 8978 // scalarization is profitable or it is predicated. 8979 auto WillScalarize = [this, I](ElementCount VF) -> bool { 8980 return CM.isScalarAfterVectorization(I, VF) || 8981 CM.isProfitableToScalarize(I, VF) || CM.isScalarWithPredication(I); 8982 }; 8983 return !LoopVectorizationPlanner::getDecisionAndClampRange(WillScalarize, 8984 Range); 8985 } 8986 8987 VPWidenRecipe *VPRecipeBuilder::tryToWiden(Instruction *I, 8988 ArrayRef<VPValue *> Operands) const { 8989 auto IsVectorizableOpcode = [](unsigned Opcode) { 8990 switch (Opcode) { 8991 case Instruction::Add: 8992 case Instruction::And: 8993 case Instruction::AShr: 8994 case Instruction::BitCast: 8995 case Instruction::FAdd: 8996 case Instruction::FCmp: 8997 case Instruction::FDiv: 8998 case Instruction::FMul: 8999 case Instruction::FNeg: 9000 case Instruction::FPExt: 9001 case Instruction::FPToSI: 9002 case Instruction::FPToUI: 9003 case Instruction::FPTrunc: 9004 case Instruction::FRem: 9005 case Instruction::FSub: 9006 case Instruction::ICmp: 9007 case Instruction::IntToPtr: 9008 case Instruction::LShr: 9009 case Instruction::Mul: 9010 case Instruction::Or: 9011 case Instruction::PtrToInt: 9012 case Instruction::SDiv: 9013 case Instruction::Select: 9014 case Instruction::SExt: 9015 case Instruction::Shl: 9016 case Instruction::SIToFP: 9017 case Instruction::SRem: 9018 case Instruction::Sub: 9019 case Instruction::Trunc: 9020 case Instruction::UDiv: 9021 case Instruction::UIToFP: 9022 case Instruction::URem: 9023 case Instruction::Xor: 9024 case Instruction::ZExt: 9025 return true; 9026 } 9027 return false; 9028 }; 9029 9030 if (!IsVectorizableOpcode(I->getOpcode())) 9031 return nullptr; 9032 9033 // Success: widen this instruction. 9034 return new VPWidenRecipe(*I, make_range(Operands.begin(), Operands.end())); 9035 } 9036 9037 void VPRecipeBuilder::fixHeaderPhis() { 9038 BasicBlock *OrigLatch = OrigLoop->getLoopLatch(); 9039 for (VPWidenPHIRecipe *R : PhisToFix) { 9040 auto *PN = cast<PHINode>(R->getUnderlyingValue()); 9041 VPRecipeBase *IncR = 9042 getRecipe(cast<Instruction>(PN->getIncomingValueForBlock(OrigLatch))); 9043 R->addOperand(IncR->getVPSingleValue()); 9044 } 9045 } 9046 9047 VPBasicBlock *VPRecipeBuilder::handleReplication( 9048 Instruction *I, VFRange &Range, VPBasicBlock *VPBB, 9049 VPlanPtr &Plan) { 9050 bool IsUniform = LoopVectorizationPlanner::getDecisionAndClampRange( 9051 [&](ElementCount VF) { return CM.isUniformAfterVectorization(I, VF); }, 9052 Range); 9053 9054 bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange( 9055 [&](ElementCount VF) { return CM.isPredicatedInst(I); }, Range); 9056 9057 // Even if the instruction is not marked as uniform, there are certain 9058 // intrinsic calls that can be effectively treated as such, so we check for 9059 // them here. Conservatively, we only do this for scalable vectors, since 9060 // for fixed-width VFs we can always fall back on full scalarization. 9061 if (!IsUniform && Range.Start.isScalable() && isa<IntrinsicInst>(I)) { 9062 switch (cast<IntrinsicInst>(I)->getIntrinsicID()) { 9063 case Intrinsic::assume: 9064 case Intrinsic::lifetime_start: 9065 case Intrinsic::lifetime_end: 9066 // For scalable vectors if one of the operands is variant then we still 9067 // want to mark as uniform, which will generate one instruction for just 9068 // the first lane of the vector. We can't scalarize the call in the same 9069 // way as for fixed-width vectors because we don't know how many lanes 9070 // there are. 9071 // 9072 // The reasons for doing it this way for scalable vectors are: 9073 // 1. For the assume intrinsic generating the instruction for the first 9074 // lane is still be better than not generating any at all. For 9075 // example, the input may be a splat across all lanes. 9076 // 2. For the lifetime start/end intrinsics the pointer operand only 9077 // does anything useful when the input comes from a stack object, 9078 // which suggests it should always be uniform. For non-stack objects 9079 // the effect is to poison the object, which still allows us to 9080 // remove the call. 9081 IsUniform = true; 9082 break; 9083 default: 9084 break; 9085 } 9086 } 9087 9088 auto *Recipe = new VPReplicateRecipe(I, Plan->mapToVPValues(I->operands()), 9089 IsUniform, IsPredicated); 9090 setRecipe(I, Recipe); 9091 Plan->addVPValue(I, Recipe); 9092 9093 // Find if I uses a predicated instruction. If so, it will use its scalar 9094 // value. Avoid hoisting the insert-element which packs the scalar value into 9095 // a vector value, as that happens iff all users use the vector value. 9096 for (VPValue *Op : Recipe->operands()) { 9097 auto *PredR = dyn_cast_or_null<VPPredInstPHIRecipe>(Op->getDef()); 9098 if (!PredR) 9099 continue; 9100 auto *RepR = 9101 cast_or_null<VPReplicateRecipe>(PredR->getOperand(0)->getDef()); 9102 assert(RepR->isPredicated() && 9103 "expected Replicate recipe to be predicated"); 9104 RepR->setAlsoPack(false); 9105 } 9106 9107 // Finalize the recipe for Instr, first if it is not predicated. 9108 if (!IsPredicated) { 9109 LLVM_DEBUG(dbgs() << "LV: Scalarizing:" << *I << "\n"); 9110 VPBB->appendRecipe(Recipe); 9111 return VPBB; 9112 } 9113 LLVM_DEBUG(dbgs() << "LV: Scalarizing and predicating:" << *I << "\n"); 9114 assert(VPBB->getSuccessors().empty() && 9115 "VPBB has successors when handling predicated replication."); 9116 // Record predicated instructions for above packing optimizations. 9117 VPBlockBase *Region = createReplicateRegion(I, Recipe, Plan); 9118 VPBlockUtils::insertBlockAfter(Region, VPBB); 9119 auto *RegSucc = new VPBasicBlock(); 9120 VPBlockUtils::insertBlockAfter(RegSucc, Region); 9121 return RegSucc; 9122 } 9123 9124 VPRegionBlock *VPRecipeBuilder::createReplicateRegion(Instruction *Instr, 9125 VPRecipeBase *PredRecipe, 9126 VPlanPtr &Plan) { 9127 // Instructions marked for predication are replicated and placed under an 9128 // if-then construct to prevent side-effects. 9129 9130 // Generate recipes to compute the block mask for this region. 9131 VPValue *BlockInMask = createBlockInMask(Instr->getParent(), Plan); 9132 9133 // Build the triangular if-then region. 9134 std::string RegionName = (Twine("pred.") + Instr->getOpcodeName()).str(); 9135 assert(Instr->getParent() && "Predicated instruction not in any basic block"); 9136 auto *BOMRecipe = new VPBranchOnMaskRecipe(BlockInMask); 9137 auto *Entry = new VPBasicBlock(Twine(RegionName) + ".entry", BOMRecipe); 9138 auto *PHIRecipe = Instr->getType()->isVoidTy() 9139 ? nullptr 9140 : new VPPredInstPHIRecipe(Plan->getOrAddVPValue(Instr)); 9141 if (PHIRecipe) { 9142 Plan->removeVPValueFor(Instr); 9143 Plan->addVPValue(Instr, PHIRecipe); 9144 } 9145 auto *Exit = new VPBasicBlock(Twine(RegionName) + ".continue", PHIRecipe); 9146 auto *Pred = new VPBasicBlock(Twine(RegionName) + ".if", PredRecipe); 9147 VPRegionBlock *Region = new VPRegionBlock(Entry, Exit, RegionName, true); 9148 9149 // Note: first set Entry as region entry and then connect successors starting 9150 // from it in order, to propagate the "parent" of each VPBasicBlock. 9151 VPBlockUtils::insertTwoBlocksAfter(Pred, Exit, BlockInMask, Entry); 9152 VPBlockUtils::connectBlocks(Pred, Exit); 9153 9154 return Region; 9155 } 9156 9157 VPRecipeOrVPValueTy 9158 VPRecipeBuilder::tryToCreateWidenRecipe(Instruction *Instr, 9159 ArrayRef<VPValue *> Operands, 9160 VFRange &Range, VPlanPtr &Plan) { 9161 // First, check for specific widening recipes that deal with calls, memory 9162 // operations, inductions and Phi nodes. 9163 if (auto *CI = dyn_cast<CallInst>(Instr)) 9164 return toVPRecipeResult(tryToWidenCall(CI, Operands, Range)); 9165 9166 if (isa<LoadInst>(Instr) || isa<StoreInst>(Instr)) 9167 return toVPRecipeResult(tryToWidenMemory(Instr, Operands, Range, Plan)); 9168 9169 VPRecipeBase *Recipe; 9170 if (auto Phi = dyn_cast<PHINode>(Instr)) { 9171 if (Phi->getParent() != OrigLoop->getHeader()) 9172 return tryToBlend(Phi, Operands, Plan); 9173 if ((Recipe = tryToOptimizeInductionPHI(Phi, Operands))) 9174 return toVPRecipeResult(Recipe); 9175 9176 VPWidenPHIRecipe *PhiRecipe = nullptr; 9177 if (Legal->isReductionVariable(Phi) || Legal->isFirstOrderRecurrence(Phi)) { 9178 VPValue *StartV = Operands[0]; 9179 if (Legal->isReductionVariable(Phi)) { 9180 RecurrenceDescriptor &RdxDesc = Legal->getReductionVars()[Phi]; 9181 assert(RdxDesc.getRecurrenceStartValue() == 9182 Phi->getIncomingValueForBlock(OrigLoop->getLoopPreheader())); 9183 PhiRecipe = new VPReductionPHIRecipe(Phi, RdxDesc, *StartV, 9184 CM.isInLoopReduction(Phi), 9185 CM.useOrderedReductions(RdxDesc)); 9186 } else { 9187 PhiRecipe = new VPFirstOrderRecurrencePHIRecipe(Phi, *StartV); 9188 } 9189 9190 // Record the incoming value from the backedge, so we can add the incoming 9191 // value from the backedge after all recipes have been created. 9192 recordRecipeOf(cast<Instruction>( 9193 Phi->getIncomingValueForBlock(OrigLoop->getLoopLatch()))); 9194 PhisToFix.push_back(PhiRecipe); 9195 } else { 9196 // TODO: record start and backedge value for remaining pointer induction 9197 // phis. 9198 assert(Phi->getType()->isPointerTy() && 9199 "only pointer phis should be handled here"); 9200 PhiRecipe = new VPWidenPHIRecipe(Phi); 9201 } 9202 9203 return toVPRecipeResult(PhiRecipe); 9204 } 9205 9206 if (isa<TruncInst>(Instr) && 9207 (Recipe = tryToOptimizeInductionTruncate(cast<TruncInst>(Instr), Operands, 9208 Range, *Plan))) 9209 return toVPRecipeResult(Recipe); 9210 9211 if (!shouldWiden(Instr, Range)) 9212 return nullptr; 9213 9214 if (auto GEP = dyn_cast<GetElementPtrInst>(Instr)) 9215 return toVPRecipeResult(new VPWidenGEPRecipe( 9216 GEP, make_range(Operands.begin(), Operands.end()), OrigLoop)); 9217 9218 if (auto *SI = dyn_cast<SelectInst>(Instr)) { 9219 bool InvariantCond = 9220 PSE.getSE()->isLoopInvariant(PSE.getSCEV(SI->getOperand(0)), OrigLoop); 9221 return toVPRecipeResult(new VPWidenSelectRecipe( 9222 *SI, make_range(Operands.begin(), Operands.end()), InvariantCond)); 9223 } 9224 9225 return toVPRecipeResult(tryToWiden(Instr, Operands)); 9226 } 9227 9228 void LoopVectorizationPlanner::buildVPlansWithVPRecipes(ElementCount MinVF, 9229 ElementCount MaxVF) { 9230 assert(OrigLoop->isInnermost() && "Inner loop expected."); 9231 9232 // Collect instructions from the original loop that will become trivially dead 9233 // in the vectorized loop. We don't need to vectorize these instructions. For 9234 // example, original induction update instructions can become dead because we 9235 // separately emit induction "steps" when generating code for the new loop. 9236 // Similarly, we create a new latch condition when setting up the structure 9237 // of the new loop, so the old one can become dead. 9238 SmallPtrSet<Instruction *, 4> DeadInstructions; 9239 collectTriviallyDeadInstructions(DeadInstructions); 9240 9241 // Add assume instructions we need to drop to DeadInstructions, to prevent 9242 // them from being added to the VPlan. 9243 // TODO: We only need to drop assumes in blocks that get flattend. If the 9244 // control flow is preserved, we should keep them. 9245 auto &ConditionalAssumes = Legal->getConditionalAssumes(); 9246 DeadInstructions.insert(ConditionalAssumes.begin(), ConditionalAssumes.end()); 9247 9248 MapVector<Instruction *, Instruction *> &SinkAfter = Legal->getSinkAfter(); 9249 // Dead instructions do not need sinking. Remove them from SinkAfter. 9250 for (Instruction *I : DeadInstructions) 9251 SinkAfter.erase(I); 9252 9253 // Cannot sink instructions after dead instructions (there won't be any 9254 // recipes for them). Instead, find the first non-dead previous instruction. 9255 for (auto &P : Legal->getSinkAfter()) { 9256 Instruction *SinkTarget = P.second; 9257 Instruction *FirstInst = &*SinkTarget->getParent()->begin(); 9258 (void)FirstInst; 9259 while (DeadInstructions.contains(SinkTarget)) { 9260 assert( 9261 SinkTarget != FirstInst && 9262 "Must find a live instruction (at least the one feeding the " 9263 "first-order recurrence PHI) before reaching beginning of the block"); 9264 SinkTarget = SinkTarget->getPrevNode(); 9265 assert(SinkTarget != P.first && 9266 "sink source equals target, no sinking required"); 9267 } 9268 P.second = SinkTarget; 9269 } 9270 9271 auto MaxVFPlusOne = MaxVF.getWithIncrement(1); 9272 for (ElementCount VF = MinVF; ElementCount::isKnownLT(VF, MaxVFPlusOne);) { 9273 VFRange SubRange = {VF, MaxVFPlusOne}; 9274 VPlans.push_back( 9275 buildVPlanWithVPRecipes(SubRange, DeadInstructions, SinkAfter)); 9276 VF = SubRange.End; 9277 } 9278 } 9279 9280 VPlanPtr LoopVectorizationPlanner::buildVPlanWithVPRecipes( 9281 VFRange &Range, SmallPtrSetImpl<Instruction *> &DeadInstructions, 9282 const MapVector<Instruction *, Instruction *> &SinkAfter) { 9283 9284 SmallPtrSet<const InterleaveGroup<Instruction> *, 1> InterleaveGroups; 9285 9286 VPRecipeBuilder RecipeBuilder(OrigLoop, TLI, Legal, CM, PSE, Builder); 9287 9288 // --------------------------------------------------------------------------- 9289 // Pre-construction: record ingredients whose recipes we'll need to further 9290 // process after constructing the initial VPlan. 9291 // --------------------------------------------------------------------------- 9292 9293 // Mark instructions we'll need to sink later and their targets as 9294 // ingredients whose recipe we'll need to record. 9295 for (auto &Entry : SinkAfter) { 9296 RecipeBuilder.recordRecipeOf(Entry.first); 9297 RecipeBuilder.recordRecipeOf(Entry.second); 9298 } 9299 for (auto &Reduction : CM.getInLoopReductionChains()) { 9300 PHINode *Phi = Reduction.first; 9301 RecurKind Kind = Legal->getReductionVars()[Phi].getRecurrenceKind(); 9302 const SmallVector<Instruction *, 4> &ReductionOperations = Reduction.second; 9303 9304 RecipeBuilder.recordRecipeOf(Phi); 9305 for (auto &R : ReductionOperations) { 9306 RecipeBuilder.recordRecipeOf(R); 9307 // For min/max reducitons, where we have a pair of icmp/select, we also 9308 // need to record the ICmp recipe, so it can be removed later. 9309 assert(!RecurrenceDescriptor::isSelectCmpRecurrenceKind(Kind) && 9310 "Only min/max recurrences allowed for inloop reductions"); 9311 if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) 9312 RecipeBuilder.recordRecipeOf(cast<Instruction>(R->getOperand(0))); 9313 } 9314 } 9315 9316 // For each interleave group which is relevant for this (possibly trimmed) 9317 // Range, add it to the set of groups to be later applied to the VPlan and add 9318 // placeholders for its members' Recipes which we'll be replacing with a 9319 // single VPInterleaveRecipe. 9320 for (InterleaveGroup<Instruction> *IG : IAI.getInterleaveGroups()) { 9321 auto applyIG = [IG, this](ElementCount VF) -> bool { 9322 return (VF.isVector() && // Query is illegal for VF == 1 9323 CM.getWideningDecision(IG->getInsertPos(), VF) == 9324 LoopVectorizationCostModel::CM_Interleave); 9325 }; 9326 if (!getDecisionAndClampRange(applyIG, Range)) 9327 continue; 9328 InterleaveGroups.insert(IG); 9329 for (unsigned i = 0; i < IG->getFactor(); i++) 9330 if (Instruction *Member = IG->getMember(i)) 9331 RecipeBuilder.recordRecipeOf(Member); 9332 }; 9333 9334 // --------------------------------------------------------------------------- 9335 // Build initial VPlan: Scan the body of the loop in a topological order to 9336 // visit each basic block after having visited its predecessor basic blocks. 9337 // --------------------------------------------------------------------------- 9338 9339 auto Plan = std::make_unique<VPlan>(); 9340 9341 // Scan the body of the loop in a topological order to visit each basic block 9342 // after having visited its predecessor basic blocks. 9343 LoopBlocksDFS DFS(OrigLoop); 9344 DFS.perform(LI); 9345 9346 VPBasicBlock *VPBB = nullptr; 9347 VPBasicBlock *HeaderVPBB = nullptr; 9348 SmallVector<VPWidenIntOrFpInductionRecipe *> InductionsToMove; 9349 for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) { 9350 // Relevant instructions from basic block BB will be grouped into VPRecipe 9351 // ingredients and fill a new VPBasicBlock. 9352 unsigned VPBBsForBB = 0; 9353 auto *FirstVPBBForBB = new VPBasicBlock(BB->getName()); 9354 if (VPBB) 9355 VPBlockUtils::insertBlockAfter(FirstVPBBForBB, VPBB); 9356 else { 9357 Plan->setEntry(FirstVPBBForBB); 9358 HeaderVPBB = FirstVPBBForBB; 9359 } 9360 VPBB = FirstVPBBForBB; 9361 Builder.setInsertPoint(VPBB); 9362 9363 // Introduce each ingredient into VPlan. 9364 // TODO: Model and preserve debug instrinsics in VPlan. 9365 for (Instruction &I : BB->instructionsWithoutDebug()) { 9366 Instruction *Instr = &I; 9367 9368 // First filter out irrelevant instructions, to ensure no recipes are 9369 // built for them. 9370 if (isa<BranchInst>(Instr) || DeadInstructions.count(Instr)) 9371 continue; 9372 9373 SmallVector<VPValue *, 4> Operands; 9374 auto *Phi = dyn_cast<PHINode>(Instr); 9375 if (Phi && Phi->getParent() == OrigLoop->getHeader()) { 9376 Operands.push_back(Plan->getOrAddVPValue( 9377 Phi->getIncomingValueForBlock(OrigLoop->getLoopPreheader()))); 9378 } else { 9379 auto OpRange = Plan->mapToVPValues(Instr->operands()); 9380 Operands = {OpRange.begin(), OpRange.end()}; 9381 } 9382 if (auto RecipeOrValue = RecipeBuilder.tryToCreateWidenRecipe( 9383 Instr, Operands, Range, Plan)) { 9384 // If Instr can be simplified to an existing VPValue, use it. 9385 if (RecipeOrValue.is<VPValue *>()) { 9386 auto *VPV = RecipeOrValue.get<VPValue *>(); 9387 Plan->addVPValue(Instr, VPV); 9388 // If the re-used value is a recipe, register the recipe for the 9389 // instruction, in case the recipe for Instr needs to be recorded. 9390 if (auto *R = dyn_cast_or_null<VPRecipeBase>(VPV->getDef())) 9391 RecipeBuilder.setRecipe(Instr, R); 9392 continue; 9393 } 9394 // Otherwise, add the new recipe. 9395 VPRecipeBase *Recipe = RecipeOrValue.get<VPRecipeBase *>(); 9396 for (auto *Def : Recipe->definedValues()) { 9397 auto *UV = Def->getUnderlyingValue(); 9398 Plan->addVPValue(UV, Def); 9399 } 9400 9401 if (isa<VPWidenIntOrFpInductionRecipe>(Recipe) && 9402 HeaderVPBB->getFirstNonPhi() != VPBB->end()) { 9403 // Keep track of VPWidenIntOrFpInductionRecipes not in the phi section 9404 // of the header block. That can happen for truncates of induction 9405 // variables. Those recipes are moved to the phi section of the header 9406 // block after applying SinkAfter, which relies on the original 9407 // position of the trunc. 9408 assert(isa<TruncInst>(Instr)); 9409 InductionsToMove.push_back( 9410 cast<VPWidenIntOrFpInductionRecipe>(Recipe)); 9411 } 9412 RecipeBuilder.setRecipe(Instr, Recipe); 9413 VPBB->appendRecipe(Recipe); 9414 continue; 9415 } 9416 9417 // Otherwise, if all widening options failed, Instruction is to be 9418 // replicated. This may create a successor for VPBB. 9419 VPBasicBlock *NextVPBB = 9420 RecipeBuilder.handleReplication(Instr, Range, VPBB, Plan); 9421 if (NextVPBB != VPBB) { 9422 VPBB = NextVPBB; 9423 VPBB->setName(BB->hasName() ? BB->getName() + "." + Twine(VPBBsForBB++) 9424 : ""); 9425 } 9426 } 9427 } 9428 9429 assert(isa<VPBasicBlock>(Plan->getEntry()) && 9430 !Plan->getEntry()->getEntryBasicBlock()->empty() && 9431 "entry block must be set to a non-empty VPBasicBlock"); 9432 RecipeBuilder.fixHeaderPhis(); 9433 9434 // --------------------------------------------------------------------------- 9435 // Transform initial VPlan: Apply previously taken decisions, in order, to 9436 // bring the VPlan to its final state. 9437 // --------------------------------------------------------------------------- 9438 9439 // Apply Sink-After legal constraints. 9440 auto GetReplicateRegion = [](VPRecipeBase *R) -> VPRegionBlock * { 9441 auto *Region = dyn_cast_or_null<VPRegionBlock>(R->getParent()->getParent()); 9442 if (Region && Region->isReplicator()) { 9443 assert(Region->getNumSuccessors() == 1 && 9444 Region->getNumPredecessors() == 1 && "Expected SESE region!"); 9445 assert(R->getParent()->size() == 1 && 9446 "A recipe in an original replicator region must be the only " 9447 "recipe in its block"); 9448 return Region; 9449 } 9450 return nullptr; 9451 }; 9452 for (auto &Entry : SinkAfter) { 9453 VPRecipeBase *Sink = RecipeBuilder.getRecipe(Entry.first); 9454 VPRecipeBase *Target = RecipeBuilder.getRecipe(Entry.second); 9455 9456 auto *TargetRegion = GetReplicateRegion(Target); 9457 auto *SinkRegion = GetReplicateRegion(Sink); 9458 if (!SinkRegion) { 9459 // If the sink source is not a replicate region, sink the recipe directly. 9460 if (TargetRegion) { 9461 // The target is in a replication region, make sure to move Sink to 9462 // the block after it, not into the replication region itself. 9463 VPBasicBlock *NextBlock = 9464 cast<VPBasicBlock>(TargetRegion->getSuccessors().front()); 9465 Sink->moveBefore(*NextBlock, NextBlock->getFirstNonPhi()); 9466 } else 9467 Sink->moveAfter(Target); 9468 continue; 9469 } 9470 9471 // The sink source is in a replicate region. Unhook the region from the CFG. 9472 auto *SinkPred = SinkRegion->getSinglePredecessor(); 9473 auto *SinkSucc = SinkRegion->getSingleSuccessor(); 9474 VPBlockUtils::disconnectBlocks(SinkPred, SinkRegion); 9475 VPBlockUtils::disconnectBlocks(SinkRegion, SinkSucc); 9476 VPBlockUtils::connectBlocks(SinkPred, SinkSucc); 9477 9478 if (TargetRegion) { 9479 // The target recipe is also in a replicate region, move the sink region 9480 // after the target region. 9481 auto *TargetSucc = TargetRegion->getSingleSuccessor(); 9482 VPBlockUtils::disconnectBlocks(TargetRegion, TargetSucc); 9483 VPBlockUtils::connectBlocks(TargetRegion, SinkRegion); 9484 VPBlockUtils::connectBlocks(SinkRegion, TargetSucc); 9485 } else { 9486 // The sink source is in a replicate region, we need to move the whole 9487 // replicate region, which should only contain a single recipe in the 9488 // main block. 9489 auto *SplitBlock = 9490 Target->getParent()->splitAt(std::next(Target->getIterator())); 9491 9492 auto *SplitPred = SplitBlock->getSinglePredecessor(); 9493 9494 VPBlockUtils::disconnectBlocks(SplitPred, SplitBlock); 9495 VPBlockUtils::connectBlocks(SplitPred, SinkRegion); 9496 VPBlockUtils::connectBlocks(SinkRegion, SplitBlock); 9497 if (VPBB == SplitPred) 9498 VPBB = SplitBlock; 9499 } 9500 } 9501 9502 // Now that sink-after is done, move induction recipes for optimized truncates 9503 // to the phi section of the header block. 9504 for (VPWidenIntOrFpInductionRecipe *Ind : InductionsToMove) 9505 Ind->moveBefore(*HeaderVPBB, HeaderVPBB->getFirstNonPhi()); 9506 9507 // Adjust the recipes for any inloop reductions. 9508 adjustRecipesForReductions(VPBB, Plan, RecipeBuilder, Range.Start); 9509 9510 // Introduce a recipe to combine the incoming and previous values of a 9511 // first-order recurrence. 9512 for (VPRecipeBase &R : Plan->getEntry()->getEntryBasicBlock()->phis()) { 9513 auto *RecurPhi = dyn_cast<VPFirstOrderRecurrencePHIRecipe>(&R); 9514 if (!RecurPhi) 9515 continue; 9516 9517 VPRecipeBase *PrevRecipe = RecurPhi->getBackedgeRecipe(); 9518 VPBasicBlock *InsertBlock = PrevRecipe->getParent(); 9519 auto *Region = GetReplicateRegion(PrevRecipe); 9520 if (Region) 9521 InsertBlock = cast<VPBasicBlock>(Region->getSingleSuccessor()); 9522 if (Region || PrevRecipe->isPhi()) 9523 Builder.setInsertPoint(InsertBlock, InsertBlock->getFirstNonPhi()); 9524 else 9525 Builder.setInsertPoint(InsertBlock, std::next(PrevRecipe->getIterator())); 9526 9527 auto *RecurSplice = cast<VPInstruction>( 9528 Builder.createNaryOp(VPInstruction::FirstOrderRecurrenceSplice, 9529 {RecurPhi, RecurPhi->getBackedgeValue()})); 9530 9531 RecurPhi->replaceAllUsesWith(RecurSplice); 9532 // Set the first operand of RecurSplice to RecurPhi again, after replacing 9533 // all users. 9534 RecurSplice->setOperand(0, RecurPhi); 9535 } 9536 9537 // Interleave memory: for each Interleave Group we marked earlier as relevant 9538 // for this VPlan, replace the Recipes widening its memory instructions with a 9539 // single VPInterleaveRecipe at its insertion point. 9540 for (auto IG : InterleaveGroups) { 9541 auto *Recipe = cast<VPWidenMemoryInstructionRecipe>( 9542 RecipeBuilder.getRecipe(IG->getInsertPos())); 9543 SmallVector<VPValue *, 4> StoredValues; 9544 for (unsigned i = 0; i < IG->getFactor(); ++i) 9545 if (auto *SI = dyn_cast_or_null<StoreInst>(IG->getMember(i))) { 9546 auto *StoreR = 9547 cast<VPWidenMemoryInstructionRecipe>(RecipeBuilder.getRecipe(SI)); 9548 StoredValues.push_back(StoreR->getStoredValue()); 9549 } 9550 9551 auto *VPIG = new VPInterleaveRecipe(IG, Recipe->getAddr(), StoredValues, 9552 Recipe->getMask()); 9553 VPIG->insertBefore(Recipe); 9554 unsigned J = 0; 9555 for (unsigned i = 0; i < IG->getFactor(); ++i) 9556 if (Instruction *Member = IG->getMember(i)) { 9557 if (!Member->getType()->isVoidTy()) { 9558 VPValue *OriginalV = Plan->getVPValue(Member); 9559 Plan->removeVPValueFor(Member); 9560 Plan->addVPValue(Member, VPIG->getVPValue(J)); 9561 OriginalV->replaceAllUsesWith(VPIG->getVPValue(J)); 9562 J++; 9563 } 9564 RecipeBuilder.getRecipe(Member)->eraseFromParent(); 9565 } 9566 } 9567 9568 // From this point onwards, VPlan-to-VPlan transformations may change the plan 9569 // in ways that accessing values using original IR values is incorrect. 9570 Plan->disableValue2VPValue(); 9571 9572 VPlanTransforms::sinkScalarOperands(*Plan); 9573 VPlanTransforms::mergeReplicateRegions(*Plan); 9574 9575 std::string PlanName; 9576 raw_string_ostream RSO(PlanName); 9577 ElementCount VF = Range.Start; 9578 Plan->addVF(VF); 9579 RSO << "Initial VPlan for VF={" << VF; 9580 for (VF *= 2; ElementCount::isKnownLT(VF, Range.End); VF *= 2) { 9581 Plan->addVF(VF); 9582 RSO << "," << VF; 9583 } 9584 RSO << "},UF>=1"; 9585 RSO.flush(); 9586 Plan->setName(PlanName); 9587 9588 assert(VPlanVerifier::verifyPlanIsValid(*Plan) && "VPlan is invalid"); 9589 return Plan; 9590 } 9591 9592 VPlanPtr LoopVectorizationPlanner::buildVPlan(VFRange &Range) { 9593 // Outer loop handling: They may require CFG and instruction level 9594 // transformations before even evaluating whether vectorization is profitable. 9595 // Since we cannot modify the incoming IR, we need to build VPlan upfront in 9596 // the vectorization pipeline. 9597 assert(!OrigLoop->isInnermost()); 9598 assert(EnableVPlanNativePath && "VPlan-native path is not enabled."); 9599 9600 // Create new empty VPlan 9601 auto Plan = std::make_unique<VPlan>(); 9602 9603 // Build hierarchical CFG 9604 VPlanHCFGBuilder HCFGBuilder(OrigLoop, LI, *Plan); 9605 HCFGBuilder.buildHierarchicalCFG(); 9606 9607 for (ElementCount VF = Range.Start; ElementCount::isKnownLT(VF, Range.End); 9608 VF *= 2) 9609 Plan->addVF(VF); 9610 9611 if (EnableVPlanPredication) { 9612 VPlanPredicator VPP(*Plan); 9613 VPP.predicate(); 9614 9615 // Avoid running transformation to recipes until masked code generation in 9616 // VPlan-native path is in place. 9617 return Plan; 9618 } 9619 9620 SmallPtrSet<Instruction *, 1> DeadInstructions; 9621 VPlanTransforms::VPInstructionsToVPRecipes(OrigLoop, Plan, 9622 Legal->getInductionVars(), 9623 DeadInstructions, *PSE.getSE()); 9624 return Plan; 9625 } 9626 9627 // Adjust the recipes for reductions. For in-loop reductions the chain of 9628 // instructions leading from the loop exit instr to the phi need to be converted 9629 // to reductions, with one operand being vector and the other being the scalar 9630 // reduction chain. For other reductions, a select is introduced between the phi 9631 // and live-out recipes when folding the tail. 9632 void LoopVectorizationPlanner::adjustRecipesForReductions( 9633 VPBasicBlock *LatchVPBB, VPlanPtr &Plan, VPRecipeBuilder &RecipeBuilder, 9634 ElementCount MinVF) { 9635 for (auto &Reduction : CM.getInLoopReductionChains()) { 9636 PHINode *Phi = Reduction.first; 9637 RecurrenceDescriptor &RdxDesc = Legal->getReductionVars()[Phi]; 9638 const SmallVector<Instruction *, 4> &ReductionOperations = Reduction.second; 9639 9640 if (MinVF.isScalar() && !CM.useOrderedReductions(RdxDesc)) 9641 continue; 9642 9643 // ReductionOperations are orders top-down from the phi's use to the 9644 // LoopExitValue. We keep a track of the previous item (the Chain) to tell 9645 // which of the two operands will remain scalar and which will be reduced. 9646 // For minmax the chain will be the select instructions. 9647 Instruction *Chain = Phi; 9648 for (Instruction *R : ReductionOperations) { 9649 VPRecipeBase *WidenRecipe = RecipeBuilder.getRecipe(R); 9650 RecurKind Kind = RdxDesc.getRecurrenceKind(); 9651 9652 VPValue *ChainOp = Plan->getVPValue(Chain); 9653 unsigned FirstOpId; 9654 assert(!RecurrenceDescriptor::isSelectCmpRecurrenceKind(Kind) && 9655 "Only min/max recurrences allowed for inloop reductions"); 9656 if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) { 9657 assert(isa<VPWidenSelectRecipe>(WidenRecipe) && 9658 "Expected to replace a VPWidenSelectSC"); 9659 FirstOpId = 1; 9660 } else { 9661 assert((MinVF.isScalar() || isa<VPWidenRecipe>(WidenRecipe)) && 9662 "Expected to replace a VPWidenSC"); 9663 FirstOpId = 0; 9664 } 9665 unsigned VecOpId = 9666 R->getOperand(FirstOpId) == Chain ? FirstOpId + 1 : FirstOpId; 9667 VPValue *VecOp = Plan->getVPValue(R->getOperand(VecOpId)); 9668 9669 auto *CondOp = CM.foldTailByMasking() 9670 ? RecipeBuilder.createBlockInMask(R->getParent(), Plan) 9671 : nullptr; 9672 VPReductionRecipe *RedRecipe = new VPReductionRecipe( 9673 &RdxDesc, R, ChainOp, VecOp, CondOp, TTI); 9674 WidenRecipe->getVPSingleValue()->replaceAllUsesWith(RedRecipe); 9675 Plan->removeVPValueFor(R); 9676 Plan->addVPValue(R, RedRecipe); 9677 WidenRecipe->getParent()->insert(RedRecipe, WidenRecipe->getIterator()); 9678 WidenRecipe->getVPSingleValue()->replaceAllUsesWith(RedRecipe); 9679 WidenRecipe->eraseFromParent(); 9680 9681 if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) { 9682 VPRecipeBase *CompareRecipe = 9683 RecipeBuilder.getRecipe(cast<Instruction>(R->getOperand(0))); 9684 assert(isa<VPWidenRecipe>(CompareRecipe) && 9685 "Expected to replace a VPWidenSC"); 9686 assert(cast<VPWidenRecipe>(CompareRecipe)->getNumUsers() == 0 && 9687 "Expected no remaining users"); 9688 CompareRecipe->eraseFromParent(); 9689 } 9690 Chain = R; 9691 } 9692 } 9693 9694 // If tail is folded by masking, introduce selects between the phi 9695 // and the live-out instruction of each reduction, at the end of the latch. 9696 if (CM.foldTailByMasking()) { 9697 for (VPRecipeBase &R : Plan->getEntry()->getEntryBasicBlock()->phis()) { 9698 VPReductionPHIRecipe *PhiR = dyn_cast<VPReductionPHIRecipe>(&R); 9699 if (!PhiR || PhiR->isInLoop()) 9700 continue; 9701 Builder.setInsertPoint(LatchVPBB); 9702 VPValue *Cond = 9703 RecipeBuilder.createBlockInMask(OrigLoop->getHeader(), Plan); 9704 VPValue *Red = PhiR->getBackedgeValue(); 9705 Builder.createNaryOp(Instruction::Select, {Cond, Red, PhiR}); 9706 } 9707 } 9708 } 9709 9710 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 9711 void VPInterleaveRecipe::print(raw_ostream &O, const Twine &Indent, 9712 VPSlotTracker &SlotTracker) const { 9713 O << Indent << "INTERLEAVE-GROUP with factor " << IG->getFactor() << " at "; 9714 IG->getInsertPos()->printAsOperand(O, false); 9715 O << ", "; 9716 getAddr()->printAsOperand(O, SlotTracker); 9717 VPValue *Mask = getMask(); 9718 if (Mask) { 9719 O << ", "; 9720 Mask->printAsOperand(O, SlotTracker); 9721 } 9722 9723 unsigned OpIdx = 0; 9724 for (unsigned i = 0; i < IG->getFactor(); ++i) { 9725 if (!IG->getMember(i)) 9726 continue; 9727 if (getNumStoreOperands() > 0) { 9728 O << "\n" << Indent << " store "; 9729 getOperand(1 + OpIdx)->printAsOperand(O, SlotTracker); 9730 O << " to index " << i; 9731 } else { 9732 O << "\n" << Indent << " "; 9733 getVPValue(OpIdx)->printAsOperand(O, SlotTracker); 9734 O << " = load from index " << i; 9735 } 9736 ++OpIdx; 9737 } 9738 } 9739 #endif 9740 9741 void VPWidenCallRecipe::execute(VPTransformState &State) { 9742 State.ILV->widenCallInstruction(*cast<CallInst>(getUnderlyingInstr()), this, 9743 *this, State); 9744 } 9745 9746 void VPWidenSelectRecipe::execute(VPTransformState &State) { 9747 State.ILV->widenSelectInstruction(*cast<SelectInst>(getUnderlyingInstr()), 9748 this, *this, InvariantCond, State); 9749 } 9750 9751 void VPWidenRecipe::execute(VPTransformState &State) { 9752 State.ILV->widenInstruction(*getUnderlyingInstr(), this, *this, State); 9753 } 9754 9755 void VPWidenGEPRecipe::execute(VPTransformState &State) { 9756 State.ILV->widenGEP(cast<GetElementPtrInst>(getUnderlyingInstr()), this, 9757 *this, State.UF, State.VF, IsPtrLoopInvariant, 9758 IsIndexLoopInvariant, State); 9759 } 9760 9761 void VPWidenIntOrFpInductionRecipe::execute(VPTransformState &State) { 9762 assert(!State.Instance && "Int or FP induction being replicated."); 9763 State.ILV->widenIntOrFpInduction(IV, getStartValue()->getLiveInIRValue(), 9764 getTruncInst(), getVPValue(0), 9765 getCastValue(), State); 9766 } 9767 9768 void VPWidenPHIRecipe::execute(VPTransformState &State) { 9769 State.ILV->widenPHIInstruction(cast<PHINode>(getUnderlyingValue()), this, 9770 State); 9771 } 9772 9773 void VPBlendRecipe::execute(VPTransformState &State) { 9774 State.ILV->setDebugLocFromInst(Phi, &State.Builder); 9775 // We know that all PHIs in non-header blocks are converted into 9776 // selects, so we don't have to worry about the insertion order and we 9777 // can just use the builder. 9778 // At this point we generate the predication tree. There may be 9779 // duplications since this is a simple recursive scan, but future 9780 // optimizations will clean it up. 9781 9782 unsigned NumIncoming = getNumIncomingValues(); 9783 9784 // Generate a sequence of selects of the form: 9785 // SELECT(Mask3, In3, 9786 // SELECT(Mask2, In2, 9787 // SELECT(Mask1, In1, 9788 // In0))) 9789 // Note that Mask0 is never used: lanes for which no path reaches this phi and 9790 // are essentially undef are taken from In0. 9791 InnerLoopVectorizer::VectorParts Entry(State.UF); 9792 for (unsigned In = 0; In < NumIncoming; ++In) { 9793 for (unsigned Part = 0; Part < State.UF; ++Part) { 9794 // We might have single edge PHIs (blocks) - use an identity 9795 // 'select' for the first PHI operand. 9796 Value *In0 = State.get(getIncomingValue(In), Part); 9797 if (In == 0) 9798 Entry[Part] = In0; // Initialize with the first incoming value. 9799 else { 9800 // Select between the current value and the previous incoming edge 9801 // based on the incoming mask. 9802 Value *Cond = State.get(getMask(In), Part); 9803 Entry[Part] = 9804 State.Builder.CreateSelect(Cond, In0, Entry[Part], "predphi"); 9805 } 9806 } 9807 } 9808 for (unsigned Part = 0; Part < State.UF; ++Part) 9809 State.set(this, Entry[Part], Part); 9810 } 9811 9812 void VPInterleaveRecipe::execute(VPTransformState &State) { 9813 assert(!State.Instance && "Interleave group being replicated."); 9814 State.ILV->vectorizeInterleaveGroup(IG, definedValues(), State, getAddr(), 9815 getStoredValues(), getMask()); 9816 } 9817 9818 void VPReductionRecipe::execute(VPTransformState &State) { 9819 assert(!State.Instance && "Reduction being replicated."); 9820 Value *PrevInChain = State.get(getChainOp(), 0); 9821 RecurKind Kind = RdxDesc->getRecurrenceKind(); 9822 bool IsOrdered = State.ILV->useOrderedReductions(*RdxDesc); 9823 // Propagate the fast-math flags carried by the underlying instruction. 9824 IRBuilderBase::FastMathFlagGuard FMFGuard(State.Builder); 9825 State.Builder.setFastMathFlags(RdxDesc->getFastMathFlags()); 9826 for (unsigned Part = 0; Part < State.UF; ++Part) { 9827 Value *NewVecOp = State.get(getVecOp(), Part); 9828 if (VPValue *Cond = getCondOp()) { 9829 Value *NewCond = State.get(Cond, Part); 9830 VectorType *VecTy = cast<VectorType>(NewVecOp->getType()); 9831 Value *Iden = RdxDesc->getRecurrenceIdentity( 9832 Kind, VecTy->getElementType(), RdxDesc->getFastMathFlags()); 9833 Value *IdenVec = 9834 State.Builder.CreateVectorSplat(VecTy->getElementCount(), Iden); 9835 Value *Select = State.Builder.CreateSelect(NewCond, NewVecOp, IdenVec); 9836 NewVecOp = Select; 9837 } 9838 Value *NewRed; 9839 Value *NextInChain; 9840 if (IsOrdered) { 9841 if (State.VF.isVector()) 9842 NewRed = createOrderedReduction(State.Builder, *RdxDesc, NewVecOp, 9843 PrevInChain); 9844 else 9845 NewRed = State.Builder.CreateBinOp( 9846 (Instruction::BinaryOps)RdxDesc->getOpcode(Kind), PrevInChain, 9847 NewVecOp); 9848 PrevInChain = NewRed; 9849 } else { 9850 PrevInChain = State.get(getChainOp(), Part); 9851 NewRed = createTargetReduction(State.Builder, TTI, *RdxDesc, NewVecOp); 9852 } 9853 if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) { 9854 NextInChain = 9855 createMinMaxOp(State.Builder, RdxDesc->getRecurrenceKind(), 9856 NewRed, PrevInChain); 9857 } else if (IsOrdered) 9858 NextInChain = NewRed; 9859 else 9860 NextInChain = State.Builder.CreateBinOp( 9861 (Instruction::BinaryOps)RdxDesc->getOpcode(Kind), NewRed, 9862 PrevInChain); 9863 State.set(this, NextInChain, Part); 9864 } 9865 } 9866 9867 void VPReplicateRecipe::execute(VPTransformState &State) { 9868 if (State.Instance) { // Generate a single instance. 9869 assert(!State.VF.isScalable() && "Can't scalarize a scalable vector"); 9870 State.ILV->scalarizeInstruction(getUnderlyingInstr(), this, *this, 9871 *State.Instance, IsPredicated, State); 9872 // Insert scalar instance packing it into a vector. 9873 if (AlsoPack && State.VF.isVector()) { 9874 // If we're constructing lane 0, initialize to start from poison. 9875 if (State.Instance->Lane.isFirstLane()) { 9876 assert(!State.VF.isScalable() && "VF is assumed to be non scalable."); 9877 Value *Poison = PoisonValue::get( 9878 VectorType::get(getUnderlyingValue()->getType(), State.VF)); 9879 State.set(this, Poison, State.Instance->Part); 9880 } 9881 State.ILV->packScalarIntoVectorValue(this, *State.Instance, State); 9882 } 9883 return; 9884 } 9885 9886 // Generate scalar instances for all VF lanes of all UF parts, unless the 9887 // instruction is uniform inwhich case generate only the first lane for each 9888 // of the UF parts. 9889 unsigned EndLane = IsUniform ? 1 : State.VF.getKnownMinValue(); 9890 assert((!State.VF.isScalable() || IsUniform) && 9891 "Can't scalarize a scalable vector"); 9892 for (unsigned Part = 0; Part < State.UF; ++Part) 9893 for (unsigned Lane = 0; Lane < EndLane; ++Lane) 9894 State.ILV->scalarizeInstruction(getUnderlyingInstr(), this, *this, 9895 VPIteration(Part, Lane), IsPredicated, 9896 State); 9897 } 9898 9899 void VPBranchOnMaskRecipe::execute(VPTransformState &State) { 9900 assert(State.Instance && "Branch on Mask works only on single instance."); 9901 9902 unsigned Part = State.Instance->Part; 9903 unsigned Lane = State.Instance->Lane.getKnownLane(); 9904 9905 Value *ConditionBit = nullptr; 9906 VPValue *BlockInMask = getMask(); 9907 if (BlockInMask) { 9908 ConditionBit = State.get(BlockInMask, Part); 9909 if (ConditionBit->getType()->isVectorTy()) 9910 ConditionBit = State.Builder.CreateExtractElement( 9911 ConditionBit, State.Builder.getInt32(Lane)); 9912 } else // Block in mask is all-one. 9913 ConditionBit = State.Builder.getTrue(); 9914 9915 // Replace the temporary unreachable terminator with a new conditional branch, 9916 // whose two destinations will be set later when they are created. 9917 auto *CurrentTerminator = State.CFG.PrevBB->getTerminator(); 9918 assert(isa<UnreachableInst>(CurrentTerminator) && 9919 "Expected to replace unreachable terminator with conditional branch."); 9920 auto *CondBr = BranchInst::Create(State.CFG.PrevBB, nullptr, ConditionBit); 9921 CondBr->setSuccessor(0, nullptr); 9922 ReplaceInstWithInst(CurrentTerminator, CondBr); 9923 } 9924 9925 void VPPredInstPHIRecipe::execute(VPTransformState &State) { 9926 assert(State.Instance && "Predicated instruction PHI works per instance."); 9927 Instruction *ScalarPredInst = 9928 cast<Instruction>(State.get(getOperand(0), *State.Instance)); 9929 BasicBlock *PredicatedBB = ScalarPredInst->getParent(); 9930 BasicBlock *PredicatingBB = PredicatedBB->getSinglePredecessor(); 9931 assert(PredicatingBB && "Predicated block has no single predecessor."); 9932 assert(isa<VPReplicateRecipe>(getOperand(0)) && 9933 "operand must be VPReplicateRecipe"); 9934 9935 // By current pack/unpack logic we need to generate only a single phi node: if 9936 // a vector value for the predicated instruction exists at this point it means 9937 // the instruction has vector users only, and a phi for the vector value is 9938 // needed. In this case the recipe of the predicated instruction is marked to 9939 // also do that packing, thereby "hoisting" the insert-element sequence. 9940 // Otherwise, a phi node for the scalar value is needed. 9941 unsigned Part = State.Instance->Part; 9942 if (State.hasVectorValue(getOperand(0), Part)) { 9943 Value *VectorValue = State.get(getOperand(0), Part); 9944 InsertElementInst *IEI = cast<InsertElementInst>(VectorValue); 9945 PHINode *VPhi = State.Builder.CreatePHI(IEI->getType(), 2); 9946 VPhi->addIncoming(IEI->getOperand(0), PredicatingBB); // Unmodified vector. 9947 VPhi->addIncoming(IEI, PredicatedBB); // New vector with inserted element. 9948 if (State.hasVectorValue(this, Part)) 9949 State.reset(this, VPhi, Part); 9950 else 9951 State.set(this, VPhi, Part); 9952 // NOTE: Currently we need to update the value of the operand, so the next 9953 // predicated iteration inserts its generated value in the correct vector. 9954 State.reset(getOperand(0), VPhi, Part); 9955 } else { 9956 Type *PredInstType = getOperand(0)->getUnderlyingValue()->getType(); 9957 PHINode *Phi = State.Builder.CreatePHI(PredInstType, 2); 9958 Phi->addIncoming(PoisonValue::get(ScalarPredInst->getType()), 9959 PredicatingBB); 9960 Phi->addIncoming(ScalarPredInst, PredicatedBB); 9961 if (State.hasScalarValue(this, *State.Instance)) 9962 State.reset(this, Phi, *State.Instance); 9963 else 9964 State.set(this, Phi, *State.Instance); 9965 // NOTE: Currently we need to update the value of the operand, so the next 9966 // predicated iteration inserts its generated value in the correct vector. 9967 State.reset(getOperand(0), Phi, *State.Instance); 9968 } 9969 } 9970 9971 void VPWidenMemoryInstructionRecipe::execute(VPTransformState &State) { 9972 VPValue *StoredValue = isStore() ? getStoredValue() : nullptr; 9973 State.ILV->vectorizeMemoryInstruction( 9974 &Ingredient, State, StoredValue ? nullptr : getVPSingleValue(), getAddr(), 9975 StoredValue, getMask(), Consecutive, Reverse); 9976 } 9977 9978 // Determine how to lower the scalar epilogue, which depends on 1) optimising 9979 // for minimum code-size, 2) predicate compiler options, 3) loop hints forcing 9980 // predication, and 4) a TTI hook that analyses whether the loop is suitable 9981 // for predication. 9982 static ScalarEpilogueLowering getScalarEpilogueLowering( 9983 Function *F, Loop *L, LoopVectorizeHints &Hints, ProfileSummaryInfo *PSI, 9984 BlockFrequencyInfo *BFI, TargetTransformInfo *TTI, TargetLibraryInfo *TLI, 9985 AssumptionCache *AC, LoopInfo *LI, ScalarEvolution *SE, DominatorTree *DT, 9986 LoopVectorizationLegality &LVL) { 9987 // 1) OptSize takes precedence over all other options, i.e. if this is set, 9988 // don't look at hints or options, and don't request a scalar epilogue. 9989 // (For PGSO, as shouldOptimizeForSize isn't currently accessible from 9990 // LoopAccessInfo (due to code dependency and not being able to reliably get 9991 // PSI/BFI from a loop analysis under NPM), we cannot suppress the collection 9992 // of strides in LoopAccessInfo::analyzeLoop() and vectorize without 9993 // versioning when the vectorization is forced, unlike hasOptSize. So revert 9994 // back to the old way and vectorize with versioning when forced. See D81345.) 9995 if (F->hasOptSize() || (llvm::shouldOptimizeForSize(L->getHeader(), PSI, BFI, 9996 PGSOQueryType::IRPass) && 9997 Hints.getForce() != LoopVectorizeHints::FK_Enabled)) 9998 return CM_ScalarEpilogueNotAllowedOptSize; 9999 10000 // 2) If set, obey the directives 10001 if (PreferPredicateOverEpilogue.getNumOccurrences()) { 10002 switch (PreferPredicateOverEpilogue) { 10003 case PreferPredicateTy::ScalarEpilogue: 10004 return CM_ScalarEpilogueAllowed; 10005 case PreferPredicateTy::PredicateElseScalarEpilogue: 10006 return CM_ScalarEpilogueNotNeededUsePredicate; 10007 case PreferPredicateTy::PredicateOrDontVectorize: 10008 return CM_ScalarEpilogueNotAllowedUsePredicate; 10009 }; 10010 } 10011 10012 // 3) If set, obey the hints 10013 switch (Hints.getPredicate()) { 10014 case LoopVectorizeHints::FK_Enabled: 10015 return CM_ScalarEpilogueNotNeededUsePredicate; 10016 case LoopVectorizeHints::FK_Disabled: 10017 return CM_ScalarEpilogueAllowed; 10018 }; 10019 10020 // 4) if the TTI hook indicates this is profitable, request predication. 10021 if (TTI->preferPredicateOverEpilogue(L, LI, *SE, *AC, TLI, DT, 10022 LVL.getLAI())) 10023 return CM_ScalarEpilogueNotNeededUsePredicate; 10024 10025 return CM_ScalarEpilogueAllowed; 10026 } 10027 10028 Value *VPTransformState::get(VPValue *Def, unsigned Part) { 10029 // If Values have been set for this Def return the one relevant for \p Part. 10030 if (hasVectorValue(Def, Part)) 10031 return Data.PerPartOutput[Def][Part]; 10032 10033 if (!hasScalarValue(Def, {Part, 0})) { 10034 Value *IRV = Def->getLiveInIRValue(); 10035 Value *B = ILV->getBroadcastInstrs(IRV); 10036 set(Def, B, Part); 10037 return B; 10038 } 10039 10040 Value *ScalarValue = get(Def, {Part, 0}); 10041 // If we aren't vectorizing, we can just copy the scalar map values over 10042 // to the vector map. 10043 if (VF.isScalar()) { 10044 set(Def, ScalarValue, Part); 10045 return ScalarValue; 10046 } 10047 10048 auto *RepR = dyn_cast<VPReplicateRecipe>(Def); 10049 bool IsUniform = RepR && RepR->isUniform(); 10050 10051 unsigned LastLane = IsUniform ? 0 : VF.getKnownMinValue() - 1; 10052 // Check if there is a scalar value for the selected lane. 10053 if (!hasScalarValue(Def, {Part, LastLane})) { 10054 // At the moment, VPWidenIntOrFpInductionRecipes can also be uniform. 10055 assert(isa<VPWidenIntOrFpInductionRecipe>(Def->getDef()) && 10056 "unexpected recipe found to be invariant"); 10057 IsUniform = true; 10058 LastLane = 0; 10059 } 10060 10061 auto *LastInst = cast<Instruction>(get(Def, {Part, LastLane})); 10062 // Set the insert point after the last scalarized instruction or after the 10063 // last PHI, if LastInst is a PHI. This ensures the insertelement sequence 10064 // will directly follow the scalar definitions. 10065 auto OldIP = Builder.saveIP(); 10066 auto NewIP = 10067 isa<PHINode>(LastInst) 10068 ? BasicBlock::iterator(LastInst->getParent()->getFirstNonPHI()) 10069 : std::next(BasicBlock::iterator(LastInst)); 10070 Builder.SetInsertPoint(&*NewIP); 10071 10072 // However, if we are vectorizing, we need to construct the vector values. 10073 // If the value is known to be uniform after vectorization, we can just 10074 // broadcast the scalar value corresponding to lane zero for each unroll 10075 // iteration. Otherwise, we construct the vector values using 10076 // insertelement instructions. Since the resulting vectors are stored in 10077 // State, we will only generate the insertelements once. 10078 Value *VectorValue = nullptr; 10079 if (IsUniform) { 10080 VectorValue = ILV->getBroadcastInstrs(ScalarValue); 10081 set(Def, VectorValue, Part); 10082 } else { 10083 // Initialize packing with insertelements to start from undef. 10084 assert(!VF.isScalable() && "VF is assumed to be non scalable."); 10085 Value *Undef = PoisonValue::get(VectorType::get(LastInst->getType(), VF)); 10086 set(Def, Undef, Part); 10087 for (unsigned Lane = 0; Lane < VF.getKnownMinValue(); ++Lane) 10088 ILV->packScalarIntoVectorValue(Def, {Part, Lane}, *this); 10089 VectorValue = get(Def, Part); 10090 } 10091 Builder.restoreIP(OldIP); 10092 return VectorValue; 10093 } 10094 10095 // Process the loop in the VPlan-native vectorization path. This path builds 10096 // VPlan upfront in the vectorization pipeline, which allows to apply 10097 // VPlan-to-VPlan transformations from the very beginning without modifying the 10098 // input LLVM IR. 10099 static bool processLoopInVPlanNativePath( 10100 Loop *L, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT, 10101 LoopVectorizationLegality *LVL, TargetTransformInfo *TTI, 10102 TargetLibraryInfo *TLI, DemandedBits *DB, AssumptionCache *AC, 10103 OptimizationRemarkEmitter *ORE, BlockFrequencyInfo *BFI, 10104 ProfileSummaryInfo *PSI, LoopVectorizeHints &Hints, 10105 LoopVectorizationRequirements &Requirements) { 10106 10107 if (isa<SCEVCouldNotCompute>(PSE.getBackedgeTakenCount())) { 10108 LLVM_DEBUG(dbgs() << "LV: cannot compute the outer-loop trip count\n"); 10109 return false; 10110 } 10111 assert(EnableVPlanNativePath && "VPlan-native path is disabled."); 10112 Function *F = L->getHeader()->getParent(); 10113 InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL->getLAI()); 10114 10115 ScalarEpilogueLowering SEL = getScalarEpilogueLowering( 10116 F, L, Hints, PSI, BFI, TTI, TLI, AC, LI, PSE.getSE(), DT, *LVL); 10117 10118 LoopVectorizationCostModel CM(SEL, L, PSE, LI, LVL, *TTI, TLI, DB, AC, ORE, F, 10119 &Hints, IAI); 10120 // Use the planner for outer loop vectorization. 10121 // TODO: CM is not used at this point inside the planner. Turn CM into an 10122 // optional argument if we don't need it in the future. 10123 LoopVectorizationPlanner LVP(L, LI, TLI, TTI, LVL, CM, IAI, PSE, Hints, 10124 Requirements, ORE); 10125 10126 // Get user vectorization factor. 10127 ElementCount UserVF = Hints.getWidth(); 10128 10129 CM.collectElementTypesForWidening(); 10130 10131 // Plan how to best vectorize, return the best VF and its cost. 10132 const VectorizationFactor VF = LVP.planInVPlanNativePath(UserVF); 10133 10134 // If we are stress testing VPlan builds, do not attempt to generate vector 10135 // code. Masked vector code generation support will follow soon. 10136 // Also, do not attempt to vectorize if no vector code will be produced. 10137 if (VPlanBuildStressTest || EnableVPlanPredication || 10138 VectorizationFactor::Disabled() == VF) 10139 return false; 10140 10141 VPlan &BestPlan = LVP.getBestPlanFor(VF.Width); 10142 10143 { 10144 GeneratedRTChecks Checks(*PSE.getSE(), DT, LI, 10145 F->getParent()->getDataLayout()); 10146 InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, 1, LVL, 10147 &CM, BFI, PSI, Checks); 10148 LLVM_DEBUG(dbgs() << "Vectorizing outer loop in \"" 10149 << L->getHeader()->getParent()->getName() << "\"\n"); 10150 LVP.executePlan(VF.Width, 1, BestPlan, LB, DT); 10151 } 10152 10153 // Mark the loop as already vectorized to avoid vectorizing again. 10154 Hints.setAlreadyVectorized(); 10155 assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs())); 10156 return true; 10157 } 10158 10159 // Emit a remark if there are stores to floats that required a floating point 10160 // extension. If the vectorized loop was generated with floating point there 10161 // will be a performance penalty from the conversion overhead and the change in 10162 // the vector width. 10163 static void checkMixedPrecision(Loop *L, OptimizationRemarkEmitter *ORE) { 10164 SmallVector<Instruction *, 4> Worklist; 10165 for (BasicBlock *BB : L->getBlocks()) { 10166 for (Instruction &Inst : *BB) { 10167 if (auto *S = dyn_cast<StoreInst>(&Inst)) { 10168 if (S->getValueOperand()->getType()->isFloatTy()) 10169 Worklist.push_back(S); 10170 } 10171 } 10172 } 10173 10174 // Traverse the floating point stores upwards searching, for floating point 10175 // conversions. 10176 SmallPtrSet<const Instruction *, 4> Visited; 10177 SmallPtrSet<const Instruction *, 4> EmittedRemark; 10178 while (!Worklist.empty()) { 10179 auto *I = Worklist.pop_back_val(); 10180 if (!L->contains(I)) 10181 continue; 10182 if (!Visited.insert(I).second) 10183 continue; 10184 10185 // Emit a remark if the floating point store required a floating 10186 // point conversion. 10187 // TODO: More work could be done to identify the root cause such as a 10188 // constant or a function return type and point the user to it. 10189 if (isa<FPExtInst>(I) && EmittedRemark.insert(I).second) 10190 ORE->emit([&]() { 10191 return OptimizationRemarkAnalysis(LV_NAME, "VectorMixedPrecision", 10192 I->getDebugLoc(), L->getHeader()) 10193 << "floating point conversion changes vector width. " 10194 << "Mixed floating point precision requires an up/down " 10195 << "cast that will negatively impact performance."; 10196 }); 10197 10198 for (Use &Op : I->operands()) 10199 if (auto *OpI = dyn_cast<Instruction>(Op)) 10200 Worklist.push_back(OpI); 10201 } 10202 } 10203 10204 LoopVectorizePass::LoopVectorizePass(LoopVectorizeOptions Opts) 10205 : InterleaveOnlyWhenForced(Opts.InterleaveOnlyWhenForced || 10206 !EnableLoopInterleaving), 10207 VectorizeOnlyWhenForced(Opts.VectorizeOnlyWhenForced || 10208 !EnableLoopVectorization) {} 10209 10210 bool LoopVectorizePass::processLoop(Loop *L) { 10211 assert((EnableVPlanNativePath || L->isInnermost()) && 10212 "VPlan-native path is not enabled. Only process inner loops."); 10213 10214 #ifndef NDEBUG 10215 const std::string DebugLocStr = getDebugLocString(L); 10216 #endif /* NDEBUG */ 10217 10218 LLVM_DEBUG(dbgs() << "\nLV: Checking a loop in \"" 10219 << L->getHeader()->getParent()->getName() << "\" from " 10220 << DebugLocStr << "\n"); 10221 10222 LoopVectorizeHints Hints(L, InterleaveOnlyWhenForced, *ORE); 10223 10224 LLVM_DEBUG( 10225 dbgs() << "LV: Loop hints:" 10226 << " force=" 10227 << (Hints.getForce() == LoopVectorizeHints::FK_Disabled 10228 ? "disabled" 10229 : (Hints.getForce() == LoopVectorizeHints::FK_Enabled 10230 ? "enabled" 10231 : "?")) 10232 << " width=" << Hints.getWidth() 10233 << " interleave=" << Hints.getInterleave() << "\n"); 10234 10235 // Function containing loop 10236 Function *F = L->getHeader()->getParent(); 10237 10238 // Looking at the diagnostic output is the only way to determine if a loop 10239 // was vectorized (other than looking at the IR or machine code), so it 10240 // is important to generate an optimization remark for each loop. Most of 10241 // these messages are generated as OptimizationRemarkAnalysis. Remarks 10242 // generated as OptimizationRemark and OptimizationRemarkMissed are 10243 // less verbose reporting vectorized loops and unvectorized loops that may 10244 // benefit from vectorization, respectively. 10245 10246 if (!Hints.allowVectorization(F, L, VectorizeOnlyWhenForced)) { 10247 LLVM_DEBUG(dbgs() << "LV: Loop hints prevent vectorization.\n"); 10248 return false; 10249 } 10250 10251 PredicatedScalarEvolution PSE(*SE, *L); 10252 10253 // Check if it is legal to vectorize the loop. 10254 LoopVectorizationRequirements Requirements; 10255 LoopVectorizationLegality LVL(L, PSE, DT, TTI, TLI, AA, F, GetLAA, LI, ORE, 10256 &Requirements, &Hints, DB, AC, BFI, PSI); 10257 if (!LVL.canVectorize(EnableVPlanNativePath)) { 10258 LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Cannot prove legality.\n"); 10259 Hints.emitRemarkWithHints(); 10260 return false; 10261 } 10262 10263 // Check the function attributes and profiles to find out if this function 10264 // should be optimized for size. 10265 ScalarEpilogueLowering SEL = getScalarEpilogueLowering( 10266 F, L, Hints, PSI, BFI, TTI, TLI, AC, LI, PSE.getSE(), DT, LVL); 10267 10268 // Entrance to the VPlan-native vectorization path. Outer loops are processed 10269 // here. They may require CFG and instruction level transformations before 10270 // even evaluating whether vectorization is profitable. Since we cannot modify 10271 // the incoming IR, we need to build VPlan upfront in the vectorization 10272 // pipeline. 10273 if (!L->isInnermost()) 10274 return processLoopInVPlanNativePath(L, PSE, LI, DT, &LVL, TTI, TLI, DB, AC, 10275 ORE, BFI, PSI, Hints, Requirements); 10276 10277 assert(L->isInnermost() && "Inner loop expected."); 10278 10279 // Check the loop for a trip count threshold: vectorize loops with a tiny trip 10280 // count by optimizing for size, to minimize overheads. 10281 auto ExpectedTC = getSmallBestKnownTC(*SE, L); 10282 if (ExpectedTC && *ExpectedTC < TinyTripCountVectorThreshold) { 10283 LLVM_DEBUG(dbgs() << "LV: Found a loop with a very small trip count. " 10284 << "This loop is worth vectorizing only if no scalar " 10285 << "iteration overheads are incurred."); 10286 if (Hints.getForce() == LoopVectorizeHints::FK_Enabled) 10287 LLVM_DEBUG(dbgs() << " But vectorizing was explicitly forced.\n"); 10288 else { 10289 LLVM_DEBUG(dbgs() << "\n"); 10290 SEL = CM_ScalarEpilogueNotAllowedLowTripLoop; 10291 } 10292 } 10293 10294 // Check the function attributes to see if implicit floats are allowed. 10295 // FIXME: This check doesn't seem possibly correct -- what if the loop is 10296 // an integer loop and the vector instructions selected are purely integer 10297 // vector instructions? 10298 if (F->hasFnAttribute(Attribute::NoImplicitFloat)) { 10299 reportVectorizationFailure( 10300 "Can't vectorize when the NoImplicitFloat attribute is used", 10301 "loop not vectorized due to NoImplicitFloat attribute", 10302 "NoImplicitFloat", ORE, L); 10303 Hints.emitRemarkWithHints(); 10304 return false; 10305 } 10306 10307 // Check if the target supports potentially unsafe FP vectorization. 10308 // FIXME: Add a check for the type of safety issue (denormal, signaling) 10309 // for the target we're vectorizing for, to make sure none of the 10310 // additional fp-math flags can help. 10311 if (Hints.isPotentiallyUnsafe() && 10312 TTI->isFPVectorizationPotentiallyUnsafe()) { 10313 reportVectorizationFailure( 10314 "Potentially unsafe FP op prevents vectorization", 10315 "loop not vectorized due to unsafe FP support.", 10316 "UnsafeFP", ORE, L); 10317 Hints.emitRemarkWithHints(); 10318 return false; 10319 } 10320 10321 bool AllowOrderedReductions; 10322 // If the flag is set, use that instead and override the TTI behaviour. 10323 if (ForceOrderedReductions.getNumOccurrences() > 0) 10324 AllowOrderedReductions = ForceOrderedReductions; 10325 else 10326 AllowOrderedReductions = TTI->enableOrderedReductions(); 10327 if (!LVL.canVectorizeFPMath(AllowOrderedReductions)) { 10328 ORE->emit([&]() { 10329 auto *ExactFPMathInst = Requirements.getExactFPInst(); 10330 return OptimizationRemarkAnalysisFPCommute(DEBUG_TYPE, "CantReorderFPOps", 10331 ExactFPMathInst->getDebugLoc(), 10332 ExactFPMathInst->getParent()) 10333 << "loop not vectorized: cannot prove it is safe to reorder " 10334 "floating-point operations"; 10335 }); 10336 LLVM_DEBUG(dbgs() << "LV: loop not vectorized: cannot prove it is safe to " 10337 "reorder floating-point operations\n"); 10338 Hints.emitRemarkWithHints(); 10339 return false; 10340 } 10341 10342 bool UseInterleaved = TTI->enableInterleavedAccessVectorization(); 10343 InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL.getLAI()); 10344 10345 // If an override option has been passed in for interleaved accesses, use it. 10346 if (EnableInterleavedMemAccesses.getNumOccurrences() > 0) 10347 UseInterleaved = EnableInterleavedMemAccesses; 10348 10349 // Analyze interleaved memory accesses. 10350 if (UseInterleaved) { 10351 IAI.analyzeInterleaving(useMaskedInterleavedAccesses(*TTI)); 10352 } 10353 10354 // Use the cost model. 10355 LoopVectorizationCostModel CM(SEL, L, PSE, LI, &LVL, *TTI, TLI, DB, AC, ORE, 10356 F, &Hints, IAI); 10357 CM.collectValuesToIgnore(); 10358 CM.collectElementTypesForWidening(); 10359 10360 // Use the planner for vectorization. 10361 LoopVectorizationPlanner LVP(L, LI, TLI, TTI, &LVL, CM, IAI, PSE, Hints, 10362 Requirements, ORE); 10363 10364 // Get user vectorization factor and interleave count. 10365 ElementCount UserVF = Hints.getWidth(); 10366 unsigned UserIC = Hints.getInterleave(); 10367 10368 // Plan how to best vectorize, return the best VF and its cost. 10369 Optional<VectorizationFactor> MaybeVF = LVP.plan(UserVF, UserIC); 10370 10371 VectorizationFactor VF = VectorizationFactor::Disabled(); 10372 unsigned IC = 1; 10373 10374 if (MaybeVF) { 10375 VF = *MaybeVF; 10376 // Select the interleave count. 10377 IC = CM.selectInterleaveCount(VF.Width, *VF.Cost.getValue()); 10378 } 10379 10380 // Identify the diagnostic messages that should be produced. 10381 std::pair<StringRef, std::string> VecDiagMsg, IntDiagMsg; 10382 bool VectorizeLoop = true, InterleaveLoop = true; 10383 if (VF.Width.isScalar()) { 10384 LLVM_DEBUG(dbgs() << "LV: Vectorization is possible but not beneficial.\n"); 10385 VecDiagMsg = std::make_pair( 10386 "VectorizationNotBeneficial", 10387 "the cost-model indicates that vectorization is not beneficial"); 10388 VectorizeLoop = false; 10389 } 10390 10391 if (!MaybeVF && UserIC > 1) { 10392 // Tell the user interleaving was avoided up-front, despite being explicitly 10393 // requested. 10394 LLVM_DEBUG(dbgs() << "LV: Ignoring UserIC, because vectorization and " 10395 "interleaving should be avoided up front\n"); 10396 IntDiagMsg = std::make_pair( 10397 "InterleavingAvoided", 10398 "Ignoring UserIC, because interleaving was avoided up front"); 10399 InterleaveLoop = false; 10400 } else if (IC == 1 && UserIC <= 1) { 10401 // Tell the user interleaving is not beneficial. 10402 LLVM_DEBUG(dbgs() << "LV: Interleaving is not beneficial.\n"); 10403 IntDiagMsg = std::make_pair( 10404 "InterleavingNotBeneficial", 10405 "the cost-model indicates that interleaving is not beneficial"); 10406 InterleaveLoop = false; 10407 if (UserIC == 1) { 10408 IntDiagMsg.first = "InterleavingNotBeneficialAndDisabled"; 10409 IntDiagMsg.second += 10410 " and is explicitly disabled or interleave count is set to 1"; 10411 } 10412 } else if (IC > 1 && UserIC == 1) { 10413 // Tell the user interleaving is beneficial, but it explicitly disabled. 10414 LLVM_DEBUG( 10415 dbgs() << "LV: Interleaving is beneficial but is explicitly disabled."); 10416 IntDiagMsg = std::make_pair( 10417 "InterleavingBeneficialButDisabled", 10418 "the cost-model indicates that interleaving is beneficial " 10419 "but is explicitly disabled or interleave count is set to 1"); 10420 InterleaveLoop = false; 10421 } 10422 10423 // Override IC if user provided an interleave count. 10424 IC = UserIC > 0 ? UserIC : IC; 10425 10426 // Emit diagnostic messages, if any. 10427 const char *VAPassName = Hints.vectorizeAnalysisPassName(); 10428 if (!VectorizeLoop && !InterleaveLoop) { 10429 // Do not vectorize or interleaving the loop. 10430 ORE->emit([&]() { 10431 return OptimizationRemarkMissed(VAPassName, VecDiagMsg.first, 10432 L->getStartLoc(), L->getHeader()) 10433 << VecDiagMsg.second; 10434 }); 10435 ORE->emit([&]() { 10436 return OptimizationRemarkMissed(LV_NAME, IntDiagMsg.first, 10437 L->getStartLoc(), L->getHeader()) 10438 << IntDiagMsg.second; 10439 }); 10440 return false; 10441 } else if (!VectorizeLoop && InterleaveLoop) { 10442 LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n'); 10443 ORE->emit([&]() { 10444 return OptimizationRemarkAnalysis(VAPassName, VecDiagMsg.first, 10445 L->getStartLoc(), L->getHeader()) 10446 << VecDiagMsg.second; 10447 }); 10448 } else if (VectorizeLoop && !InterleaveLoop) { 10449 LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width 10450 << ") in " << DebugLocStr << '\n'); 10451 ORE->emit([&]() { 10452 return OptimizationRemarkAnalysis(LV_NAME, IntDiagMsg.first, 10453 L->getStartLoc(), L->getHeader()) 10454 << IntDiagMsg.second; 10455 }); 10456 } else if (VectorizeLoop && InterleaveLoop) { 10457 LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width 10458 << ") in " << DebugLocStr << '\n'); 10459 LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n'); 10460 } 10461 10462 bool DisableRuntimeUnroll = false; 10463 MDNode *OrigLoopID = L->getLoopID(); 10464 { 10465 // Optimistically generate runtime checks. Drop them if they turn out to not 10466 // be profitable. Limit the scope of Checks, so the cleanup happens 10467 // immediately after vector codegeneration is done. 10468 GeneratedRTChecks Checks(*PSE.getSE(), DT, LI, 10469 F->getParent()->getDataLayout()); 10470 if (!VF.Width.isScalar() || IC > 1) 10471 Checks.Create(L, *LVL.getLAI(), PSE.getUnionPredicate()); 10472 10473 using namespace ore; 10474 if (!VectorizeLoop) { 10475 assert(IC > 1 && "interleave count should not be 1 or 0"); 10476 // If we decided that it is not legal to vectorize the loop, then 10477 // interleave it. 10478 InnerLoopUnroller Unroller(L, PSE, LI, DT, TLI, TTI, AC, ORE, IC, &LVL, 10479 &CM, BFI, PSI, Checks); 10480 10481 VPlan &BestPlan = LVP.getBestPlanFor(VF.Width); 10482 LVP.executePlan(VF.Width, IC, BestPlan, Unroller, DT); 10483 10484 ORE->emit([&]() { 10485 return OptimizationRemark(LV_NAME, "Interleaved", L->getStartLoc(), 10486 L->getHeader()) 10487 << "interleaved loop (interleaved count: " 10488 << NV("InterleaveCount", IC) << ")"; 10489 }); 10490 } else { 10491 // If we decided that it is *legal* to vectorize the loop, then do it. 10492 10493 // Consider vectorizing the epilogue too if it's profitable. 10494 VectorizationFactor EpilogueVF = 10495 CM.selectEpilogueVectorizationFactor(VF.Width, LVP); 10496 if (EpilogueVF.Width.isVector()) { 10497 10498 // The first pass vectorizes the main loop and creates a scalar epilogue 10499 // to be vectorized by executing the plan (potentially with a different 10500 // factor) again shortly afterwards. 10501 EpilogueLoopVectorizationInfo EPI(VF.Width, IC, EpilogueVF.Width, 1); 10502 EpilogueVectorizerMainLoop MainILV(L, PSE, LI, DT, TLI, TTI, AC, ORE, 10503 EPI, &LVL, &CM, BFI, PSI, Checks); 10504 10505 VPlan &BestMainPlan = LVP.getBestPlanFor(EPI.MainLoopVF); 10506 LVP.executePlan(EPI.MainLoopVF, EPI.MainLoopUF, BestMainPlan, MainILV, 10507 DT); 10508 ++LoopsVectorized; 10509 10510 simplifyLoop(L, DT, LI, SE, AC, nullptr, false /* PreserveLCSSA */); 10511 formLCSSARecursively(*L, *DT, LI, SE); 10512 10513 // Second pass vectorizes the epilogue and adjusts the control flow 10514 // edges from the first pass. 10515 EPI.MainLoopVF = EPI.EpilogueVF; 10516 EPI.MainLoopUF = EPI.EpilogueUF; 10517 EpilogueVectorizerEpilogueLoop EpilogILV(L, PSE, LI, DT, TLI, TTI, AC, 10518 ORE, EPI, &LVL, &CM, BFI, PSI, 10519 Checks); 10520 10521 VPlan &BestEpiPlan = LVP.getBestPlanFor(EPI.EpilogueVF); 10522 LVP.executePlan(EPI.EpilogueVF, EPI.EpilogueUF, BestEpiPlan, EpilogILV, 10523 DT); 10524 ++LoopsEpilogueVectorized; 10525 10526 if (!MainILV.areSafetyChecksAdded()) 10527 DisableRuntimeUnroll = true; 10528 } else { 10529 InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, IC, 10530 &LVL, &CM, BFI, PSI, Checks); 10531 10532 VPlan &BestPlan = LVP.getBestPlanFor(VF.Width); 10533 LVP.executePlan(VF.Width, IC, BestPlan, LB, DT); 10534 ++LoopsVectorized; 10535 10536 // Add metadata to disable runtime unrolling a scalar loop when there 10537 // are no runtime checks about strides and memory. A scalar loop that is 10538 // rarely used is not worth unrolling. 10539 if (!LB.areSafetyChecksAdded()) 10540 DisableRuntimeUnroll = true; 10541 } 10542 // Report the vectorization decision. 10543 ORE->emit([&]() { 10544 return OptimizationRemark(LV_NAME, "Vectorized", L->getStartLoc(), 10545 L->getHeader()) 10546 << "vectorized loop (vectorization width: " 10547 << NV("VectorizationFactor", VF.Width) 10548 << ", interleaved count: " << NV("InterleaveCount", IC) << ")"; 10549 }); 10550 } 10551 10552 if (ORE->allowExtraAnalysis(LV_NAME)) 10553 checkMixedPrecision(L, ORE); 10554 } 10555 10556 Optional<MDNode *> RemainderLoopID = 10557 makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll, 10558 LLVMLoopVectorizeFollowupEpilogue}); 10559 if (RemainderLoopID.hasValue()) { 10560 L->setLoopID(RemainderLoopID.getValue()); 10561 } else { 10562 if (DisableRuntimeUnroll) 10563 AddRuntimeUnrollDisableMetaData(L); 10564 10565 // Mark the loop as already vectorized to avoid vectorizing again. 10566 Hints.setAlreadyVectorized(); 10567 } 10568 10569 assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs())); 10570 return true; 10571 } 10572 10573 LoopVectorizeResult LoopVectorizePass::runImpl( 10574 Function &F, ScalarEvolution &SE_, LoopInfo &LI_, TargetTransformInfo &TTI_, 10575 DominatorTree &DT_, BlockFrequencyInfo &BFI_, TargetLibraryInfo *TLI_, 10576 DemandedBits &DB_, AAResults &AA_, AssumptionCache &AC_, 10577 std::function<const LoopAccessInfo &(Loop &)> &GetLAA_, 10578 OptimizationRemarkEmitter &ORE_, ProfileSummaryInfo *PSI_) { 10579 SE = &SE_; 10580 LI = &LI_; 10581 TTI = &TTI_; 10582 DT = &DT_; 10583 BFI = &BFI_; 10584 TLI = TLI_; 10585 AA = &AA_; 10586 AC = &AC_; 10587 GetLAA = &GetLAA_; 10588 DB = &DB_; 10589 ORE = &ORE_; 10590 PSI = PSI_; 10591 10592 // Don't attempt if 10593 // 1. the target claims to have no vector registers, and 10594 // 2. interleaving won't help ILP. 10595 // 10596 // The second condition is necessary because, even if the target has no 10597 // vector registers, loop vectorization may still enable scalar 10598 // interleaving. 10599 if (!TTI->getNumberOfRegisters(TTI->getRegisterClassForType(true)) && 10600 TTI->getMaxInterleaveFactor(1) < 2) 10601 return LoopVectorizeResult(false, false); 10602 10603 bool Changed = false, CFGChanged = false; 10604 10605 // The vectorizer requires loops to be in simplified form. 10606 // Since simplification may add new inner loops, it has to run before the 10607 // legality and profitability checks. This means running the loop vectorizer 10608 // will simplify all loops, regardless of whether anything end up being 10609 // vectorized. 10610 for (auto &L : *LI) 10611 Changed |= CFGChanged |= 10612 simplifyLoop(L, DT, LI, SE, AC, nullptr, false /* PreserveLCSSA */); 10613 10614 // Build up a worklist of inner-loops to vectorize. This is necessary as 10615 // the act of vectorizing or partially unrolling a loop creates new loops 10616 // and can invalidate iterators across the loops. 10617 SmallVector<Loop *, 8> Worklist; 10618 10619 for (Loop *L : *LI) 10620 collectSupportedLoops(*L, LI, ORE, Worklist); 10621 10622 LoopsAnalyzed += Worklist.size(); 10623 10624 // Now walk the identified inner loops. 10625 while (!Worklist.empty()) { 10626 Loop *L = Worklist.pop_back_val(); 10627 10628 // For the inner loops we actually process, form LCSSA to simplify the 10629 // transform. 10630 Changed |= formLCSSARecursively(*L, *DT, LI, SE); 10631 10632 Changed |= CFGChanged |= processLoop(L); 10633 } 10634 10635 // Process each loop nest in the function. 10636 return LoopVectorizeResult(Changed, CFGChanged); 10637 } 10638 10639 PreservedAnalyses LoopVectorizePass::run(Function &F, 10640 FunctionAnalysisManager &AM) { 10641 auto &SE = AM.getResult<ScalarEvolutionAnalysis>(F); 10642 auto &LI = AM.getResult<LoopAnalysis>(F); 10643 auto &TTI = AM.getResult<TargetIRAnalysis>(F); 10644 auto &DT = AM.getResult<DominatorTreeAnalysis>(F); 10645 auto &BFI = AM.getResult<BlockFrequencyAnalysis>(F); 10646 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F); 10647 auto &AA = AM.getResult<AAManager>(F); 10648 auto &AC = AM.getResult<AssumptionAnalysis>(F); 10649 auto &DB = AM.getResult<DemandedBitsAnalysis>(F); 10650 auto &ORE = AM.getResult<OptimizationRemarkEmitterAnalysis>(F); 10651 10652 auto &LAM = AM.getResult<LoopAnalysisManagerFunctionProxy>(F).getManager(); 10653 std::function<const LoopAccessInfo &(Loop &)> GetLAA = 10654 [&](Loop &L) -> const LoopAccessInfo & { 10655 LoopStandardAnalysisResults AR = {AA, AC, DT, LI, SE, 10656 TLI, TTI, nullptr, nullptr, nullptr}; 10657 return LAM.getResult<LoopAccessAnalysis>(L, AR); 10658 }; 10659 auto &MAMProxy = AM.getResult<ModuleAnalysisManagerFunctionProxy>(F); 10660 ProfileSummaryInfo *PSI = 10661 MAMProxy.getCachedResult<ProfileSummaryAnalysis>(*F.getParent()); 10662 LoopVectorizeResult Result = 10663 runImpl(F, SE, LI, TTI, DT, BFI, &TLI, DB, AA, AC, GetLAA, ORE, PSI); 10664 if (!Result.MadeAnyChange) 10665 return PreservedAnalyses::all(); 10666 PreservedAnalyses PA; 10667 10668 // We currently do not preserve loopinfo/dominator analyses with outer loop 10669 // vectorization. Until this is addressed, mark these analyses as preserved 10670 // only for non-VPlan-native path. 10671 // TODO: Preserve Loop and Dominator analyses for VPlan-native path. 10672 if (!EnableVPlanNativePath) { 10673 PA.preserve<LoopAnalysis>(); 10674 PA.preserve<DominatorTreeAnalysis>(); 10675 } 10676 if (!Result.MadeCFGChange) 10677 PA.preserveSet<CFGAnalyses>(); 10678 return PA; 10679 } 10680 10681 void LoopVectorizePass::printPipeline( 10682 raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) { 10683 static_cast<PassInfoMixin<LoopVectorizePass> *>(this)->printPipeline( 10684 OS, MapClassName2PassName); 10685 10686 OS << "<"; 10687 OS << (InterleaveOnlyWhenForced ? "" : "no-") << "interleave-forced-only;"; 10688 OS << (VectorizeOnlyWhenForced ? "" : "no-") << "vectorize-forced-only;"; 10689 OS << ">"; 10690 } 10691