1 //===- LoopVectorize.cpp - A Loop Vectorizer ------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This is the LLVM loop vectorizer. This pass modifies 'vectorizable' loops 10 // and generates target-independent LLVM-IR. 11 // The vectorizer uses the TargetTransformInfo analysis to estimate the costs 12 // of instructions in order to estimate the profitability of vectorization. 13 // 14 // The loop vectorizer combines consecutive loop iterations into a single 15 // 'wide' iteration. After this transformation the index is incremented 16 // by the SIMD vector width, and not by one. 17 // 18 // This pass has three parts: 19 // 1. The main loop pass that drives the different parts. 20 // 2. LoopVectorizationLegality - A unit that checks for the legality 21 // of the vectorization. 22 // 3. InnerLoopVectorizer - A unit that performs the actual 23 // widening of instructions. 24 // 4. LoopVectorizationCostModel - A unit that checks for the profitability 25 // of vectorization. It decides on the optimal vector width, which 26 // can be one, if vectorization is not profitable. 27 // 28 // There is a development effort going on to migrate loop vectorizer to the 29 // VPlan infrastructure and to introduce outer loop vectorization support (see 30 // docs/Proposal/VectorizationPlan.rst and 31 // http://lists.llvm.org/pipermail/llvm-dev/2017-December/119523.html). For this 32 // purpose, we temporarily introduced the VPlan-native vectorization path: an 33 // alternative vectorization path that is natively implemented on top of the 34 // VPlan infrastructure. See EnableVPlanNativePath for enabling. 35 // 36 //===----------------------------------------------------------------------===// 37 // 38 // The reduction-variable vectorization is based on the paper: 39 // D. Nuzman and R. Henderson. Multi-platform Auto-vectorization. 40 // 41 // Variable uniformity checks are inspired by: 42 // Karrenberg, R. and Hack, S. Whole Function Vectorization. 43 // 44 // The interleaved access vectorization is based on the paper: 45 // Dorit Nuzman, Ira Rosen and Ayal Zaks. Auto-Vectorization of Interleaved 46 // Data for SIMD 47 // 48 // Other ideas/concepts are from: 49 // A. Zaks and D. Nuzman. Autovectorization in GCC-two years later. 50 // 51 // S. Maleki, Y. Gao, M. Garzaran, T. Wong and D. Padua. An Evaluation of 52 // Vectorizing Compilers. 53 // 54 //===----------------------------------------------------------------------===// 55 56 #include "llvm/Transforms/Vectorize/LoopVectorize.h" 57 #include "LoopVectorizationPlanner.h" 58 #include "VPRecipeBuilder.h" 59 #include "VPlan.h" 60 #include "VPlanHCFGBuilder.h" 61 #include "VPlanTransforms.h" 62 #include "llvm/ADT/APInt.h" 63 #include "llvm/ADT/ArrayRef.h" 64 #include "llvm/ADT/DenseMap.h" 65 #include "llvm/ADT/DenseMapInfo.h" 66 #include "llvm/ADT/Hashing.h" 67 #include "llvm/ADT/MapVector.h" 68 #include "llvm/ADT/None.h" 69 #include "llvm/ADT/Optional.h" 70 #include "llvm/ADT/STLExtras.h" 71 #include "llvm/ADT/SmallPtrSet.h" 72 #include "llvm/ADT/SmallSet.h" 73 #include "llvm/ADT/SmallVector.h" 74 #include "llvm/ADT/Statistic.h" 75 #include "llvm/ADT/StringRef.h" 76 #include "llvm/ADT/Twine.h" 77 #include "llvm/ADT/iterator_range.h" 78 #include "llvm/Analysis/AssumptionCache.h" 79 #include "llvm/Analysis/BasicAliasAnalysis.h" 80 #include "llvm/Analysis/BlockFrequencyInfo.h" 81 #include "llvm/Analysis/CFG.h" 82 #include "llvm/Analysis/CodeMetrics.h" 83 #include "llvm/Analysis/DemandedBits.h" 84 #include "llvm/Analysis/GlobalsModRef.h" 85 #include "llvm/Analysis/LoopAccessAnalysis.h" 86 #include "llvm/Analysis/LoopAnalysisManager.h" 87 #include "llvm/Analysis/LoopInfo.h" 88 #include "llvm/Analysis/LoopIterator.h" 89 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 90 #include "llvm/Analysis/ProfileSummaryInfo.h" 91 #include "llvm/Analysis/ScalarEvolution.h" 92 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 93 #include "llvm/Analysis/TargetLibraryInfo.h" 94 #include "llvm/Analysis/TargetTransformInfo.h" 95 #include "llvm/Analysis/ValueTracking.h" 96 #include "llvm/Analysis/VectorUtils.h" 97 #include "llvm/IR/Attributes.h" 98 #include "llvm/IR/BasicBlock.h" 99 #include "llvm/IR/CFG.h" 100 #include "llvm/IR/Constant.h" 101 #include "llvm/IR/Constants.h" 102 #include "llvm/IR/DataLayout.h" 103 #include "llvm/IR/DebugInfoMetadata.h" 104 #include "llvm/IR/DebugLoc.h" 105 #include "llvm/IR/DerivedTypes.h" 106 #include "llvm/IR/DiagnosticInfo.h" 107 #include "llvm/IR/Dominators.h" 108 #include "llvm/IR/Function.h" 109 #include "llvm/IR/IRBuilder.h" 110 #include "llvm/IR/InstrTypes.h" 111 #include "llvm/IR/Instruction.h" 112 #include "llvm/IR/Instructions.h" 113 #include "llvm/IR/IntrinsicInst.h" 114 #include "llvm/IR/Intrinsics.h" 115 #include "llvm/IR/Metadata.h" 116 #include "llvm/IR/Module.h" 117 #include "llvm/IR/Operator.h" 118 #include "llvm/IR/PatternMatch.h" 119 #include "llvm/IR/Type.h" 120 #include "llvm/IR/Use.h" 121 #include "llvm/IR/User.h" 122 #include "llvm/IR/Value.h" 123 #include "llvm/IR/ValueHandle.h" 124 #include "llvm/IR/Verifier.h" 125 #include "llvm/InitializePasses.h" 126 #include "llvm/Pass.h" 127 #include "llvm/Support/Casting.h" 128 #include "llvm/Support/CommandLine.h" 129 #include "llvm/Support/Compiler.h" 130 #include "llvm/Support/Debug.h" 131 #include "llvm/Support/ErrorHandling.h" 132 #include "llvm/Support/InstructionCost.h" 133 #include "llvm/Support/MathExtras.h" 134 #include "llvm/Support/raw_ostream.h" 135 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 136 #include "llvm/Transforms/Utils/InjectTLIMappings.h" 137 #include "llvm/Transforms/Utils/LoopSimplify.h" 138 #include "llvm/Transforms/Utils/LoopUtils.h" 139 #include "llvm/Transforms/Utils/LoopVersioning.h" 140 #include "llvm/Transforms/Utils/ScalarEvolutionExpander.h" 141 #include "llvm/Transforms/Utils/SizeOpts.h" 142 #include "llvm/Transforms/Vectorize/LoopVectorizationLegality.h" 143 #include <algorithm> 144 #include <cassert> 145 #include <cstdint> 146 #include <functional> 147 #include <iterator> 148 #include <limits> 149 #include <map> 150 #include <memory> 151 #include <string> 152 #include <tuple> 153 #include <utility> 154 155 using namespace llvm; 156 157 #define LV_NAME "loop-vectorize" 158 #define DEBUG_TYPE LV_NAME 159 160 #ifndef NDEBUG 161 const char VerboseDebug[] = DEBUG_TYPE "-verbose"; 162 #endif 163 164 /// @{ 165 /// Metadata attribute names 166 const char LLVMLoopVectorizeFollowupAll[] = "llvm.loop.vectorize.followup_all"; 167 const char LLVMLoopVectorizeFollowupVectorized[] = 168 "llvm.loop.vectorize.followup_vectorized"; 169 const char LLVMLoopVectorizeFollowupEpilogue[] = 170 "llvm.loop.vectorize.followup_epilogue"; 171 /// @} 172 173 STATISTIC(LoopsVectorized, "Number of loops vectorized"); 174 STATISTIC(LoopsAnalyzed, "Number of loops analyzed for vectorization"); 175 STATISTIC(LoopsEpilogueVectorized, "Number of epilogues vectorized"); 176 177 static cl::opt<bool> EnableEpilogueVectorization( 178 "enable-epilogue-vectorization", cl::init(true), cl::Hidden, 179 cl::desc("Enable vectorization of epilogue loops.")); 180 181 static cl::opt<unsigned> EpilogueVectorizationForceVF( 182 "epilogue-vectorization-force-VF", cl::init(1), cl::Hidden, 183 cl::desc("When epilogue vectorization is enabled, and a value greater than " 184 "1 is specified, forces the given VF for all applicable epilogue " 185 "loops.")); 186 187 static cl::opt<unsigned> EpilogueVectorizationMinVF( 188 "epilogue-vectorization-minimum-VF", cl::init(16), cl::Hidden, 189 cl::desc("Only loops with vectorization factor equal to or larger than " 190 "the specified value are considered for epilogue vectorization.")); 191 192 /// Loops with a known constant trip count below this number are vectorized only 193 /// if no scalar iteration overheads are incurred. 194 static cl::opt<unsigned> TinyTripCountVectorThreshold( 195 "vectorizer-min-trip-count", cl::init(16), cl::Hidden, 196 cl::desc("Loops with a constant trip count that is smaller than this " 197 "value are vectorized only if no scalar iteration overheads " 198 "are incurred.")); 199 200 static cl::opt<unsigned> VectorizeMemoryCheckThreshold( 201 "vectorize-memory-check-threshold", cl::init(128), cl::Hidden, 202 cl::desc("The maximum allowed number of runtime memory checks")); 203 204 // Option prefer-predicate-over-epilogue indicates that an epilogue is undesired, 205 // that predication is preferred, and this lists all options. I.e., the 206 // vectorizer will try to fold the tail-loop (epilogue) into the vector body 207 // and predicate the instructions accordingly. If tail-folding fails, there are 208 // different fallback strategies depending on these values: 209 namespace PreferPredicateTy { 210 enum Option { 211 ScalarEpilogue = 0, 212 PredicateElseScalarEpilogue, 213 PredicateOrDontVectorize 214 }; 215 } // namespace PreferPredicateTy 216 217 static cl::opt<PreferPredicateTy::Option> PreferPredicateOverEpilogue( 218 "prefer-predicate-over-epilogue", 219 cl::init(PreferPredicateTy::ScalarEpilogue), 220 cl::Hidden, 221 cl::desc("Tail-folding and predication preferences over creating a scalar " 222 "epilogue loop."), 223 cl::values(clEnumValN(PreferPredicateTy::ScalarEpilogue, 224 "scalar-epilogue", 225 "Don't tail-predicate loops, create scalar epilogue"), 226 clEnumValN(PreferPredicateTy::PredicateElseScalarEpilogue, 227 "predicate-else-scalar-epilogue", 228 "prefer tail-folding, create scalar epilogue if tail " 229 "folding fails."), 230 clEnumValN(PreferPredicateTy::PredicateOrDontVectorize, 231 "predicate-dont-vectorize", 232 "prefers tail-folding, don't attempt vectorization if " 233 "tail-folding fails."))); 234 235 static cl::opt<bool> MaximizeBandwidth( 236 "vectorizer-maximize-bandwidth", cl::init(false), cl::Hidden, 237 cl::desc("Maximize bandwidth when selecting vectorization factor which " 238 "will be determined by the smallest type in loop.")); 239 240 static cl::opt<bool> EnableInterleavedMemAccesses( 241 "enable-interleaved-mem-accesses", cl::init(false), cl::Hidden, 242 cl::desc("Enable vectorization on interleaved memory accesses in a loop")); 243 244 /// An interleave-group may need masking if it resides in a block that needs 245 /// predication, or in order to mask away gaps. 246 static cl::opt<bool> EnableMaskedInterleavedMemAccesses( 247 "enable-masked-interleaved-mem-accesses", cl::init(false), cl::Hidden, 248 cl::desc("Enable vectorization on masked interleaved memory accesses in a loop")); 249 250 static cl::opt<unsigned> TinyTripCountInterleaveThreshold( 251 "tiny-trip-count-interleave-threshold", cl::init(128), cl::Hidden, 252 cl::desc("We don't interleave loops with a estimated constant trip count " 253 "below this number")); 254 255 static cl::opt<unsigned> ForceTargetNumScalarRegs( 256 "force-target-num-scalar-regs", cl::init(0), cl::Hidden, 257 cl::desc("A flag that overrides the target's number of scalar registers.")); 258 259 static cl::opt<unsigned> ForceTargetNumVectorRegs( 260 "force-target-num-vector-regs", cl::init(0), cl::Hidden, 261 cl::desc("A flag that overrides the target's number of vector registers.")); 262 263 static cl::opt<unsigned> ForceTargetMaxScalarInterleaveFactor( 264 "force-target-max-scalar-interleave", cl::init(0), cl::Hidden, 265 cl::desc("A flag that overrides the target's max interleave factor for " 266 "scalar loops.")); 267 268 static cl::opt<unsigned> ForceTargetMaxVectorInterleaveFactor( 269 "force-target-max-vector-interleave", cl::init(0), cl::Hidden, 270 cl::desc("A flag that overrides the target's max interleave factor for " 271 "vectorized loops.")); 272 273 static cl::opt<unsigned> ForceTargetInstructionCost( 274 "force-target-instruction-cost", cl::init(0), cl::Hidden, 275 cl::desc("A flag that overrides the target's expected cost for " 276 "an instruction to a single constant value. Mostly " 277 "useful for getting consistent testing.")); 278 279 static cl::opt<bool> ForceTargetSupportsScalableVectors( 280 "force-target-supports-scalable-vectors", cl::init(false), cl::Hidden, 281 cl::desc( 282 "Pretend that scalable vectors are supported, even if the target does " 283 "not support them. This flag should only be used for testing.")); 284 285 static cl::opt<unsigned> SmallLoopCost( 286 "small-loop-cost", cl::init(20), cl::Hidden, 287 cl::desc( 288 "The cost of a loop that is considered 'small' by the interleaver.")); 289 290 static cl::opt<bool> LoopVectorizeWithBlockFrequency( 291 "loop-vectorize-with-block-frequency", cl::init(true), cl::Hidden, 292 cl::desc("Enable the use of the block frequency analysis to access PGO " 293 "heuristics minimizing code growth in cold regions and being more " 294 "aggressive in hot regions.")); 295 296 // Runtime interleave loops for load/store throughput. 297 static cl::opt<bool> EnableLoadStoreRuntimeInterleave( 298 "enable-loadstore-runtime-interleave", cl::init(true), cl::Hidden, 299 cl::desc( 300 "Enable runtime interleaving until load/store ports are saturated")); 301 302 /// Interleave small loops with scalar reductions. 303 static cl::opt<bool> InterleaveSmallLoopScalarReduction( 304 "interleave-small-loop-scalar-reduction", cl::init(false), cl::Hidden, 305 cl::desc("Enable interleaving for loops with small iteration counts that " 306 "contain scalar reductions to expose ILP.")); 307 308 /// The number of stores in a loop that are allowed to need predication. 309 static cl::opt<unsigned> NumberOfStoresToPredicate( 310 "vectorize-num-stores-pred", cl::init(1), cl::Hidden, 311 cl::desc("Max number of stores to be predicated behind an if.")); 312 313 static cl::opt<bool> EnableIndVarRegisterHeur( 314 "enable-ind-var-reg-heur", cl::init(true), cl::Hidden, 315 cl::desc("Count the induction variable only once when interleaving")); 316 317 static cl::opt<bool> EnableCondStoresVectorization( 318 "enable-cond-stores-vec", cl::init(true), cl::Hidden, 319 cl::desc("Enable if predication of stores during vectorization.")); 320 321 static cl::opt<unsigned> MaxNestedScalarReductionIC( 322 "max-nested-scalar-reduction-interleave", cl::init(2), cl::Hidden, 323 cl::desc("The maximum interleave count to use when interleaving a scalar " 324 "reduction in a nested loop.")); 325 326 static cl::opt<bool> 327 PreferInLoopReductions("prefer-inloop-reductions", cl::init(false), 328 cl::Hidden, 329 cl::desc("Prefer in-loop vector reductions, " 330 "overriding the targets preference.")); 331 332 static cl::opt<bool> ForceOrderedReductions( 333 "force-ordered-reductions", cl::init(false), cl::Hidden, 334 cl::desc("Enable the vectorisation of loops with in-order (strict) " 335 "FP reductions")); 336 337 static cl::opt<bool> PreferPredicatedReductionSelect( 338 "prefer-predicated-reduction-select", cl::init(false), cl::Hidden, 339 cl::desc( 340 "Prefer predicating a reduction operation over an after loop select.")); 341 342 cl::opt<bool> EnableVPlanNativePath( 343 "enable-vplan-native-path", cl::init(false), cl::Hidden, 344 cl::desc("Enable VPlan-native vectorization path with " 345 "support for outer loop vectorization.")); 346 347 // This flag enables the stress testing of the VPlan H-CFG construction in the 348 // VPlan-native vectorization path. It must be used in conjuction with 349 // -enable-vplan-native-path. -vplan-verify-hcfg can also be used to enable the 350 // verification of the H-CFGs built. 351 static cl::opt<bool> VPlanBuildStressTest( 352 "vplan-build-stress-test", cl::init(false), cl::Hidden, 353 cl::desc( 354 "Build VPlan for every supported loop nest in the function and bail " 355 "out right after the build (stress test the VPlan H-CFG construction " 356 "in the VPlan-native vectorization path).")); 357 358 cl::opt<bool> llvm::EnableLoopInterleaving( 359 "interleave-loops", cl::init(true), cl::Hidden, 360 cl::desc("Enable loop interleaving in Loop vectorization passes")); 361 cl::opt<bool> llvm::EnableLoopVectorization( 362 "vectorize-loops", cl::init(true), cl::Hidden, 363 cl::desc("Run the Loop vectorization passes")); 364 365 cl::opt<bool> PrintVPlansInDotFormat( 366 "vplan-print-in-dot-format", cl::init(false), cl::Hidden, 367 cl::desc("Use dot format instead of plain text when dumping VPlans")); 368 369 /// A helper function that returns true if the given type is irregular. The 370 /// type is irregular if its allocated size doesn't equal the store size of an 371 /// element of the corresponding vector type. 372 static bool hasIrregularType(Type *Ty, const DataLayout &DL) { 373 // Determine if an array of N elements of type Ty is "bitcast compatible" 374 // with a <N x Ty> vector. 375 // This is only true if there is no padding between the array elements. 376 return DL.getTypeAllocSizeInBits(Ty) != DL.getTypeSizeInBits(Ty); 377 } 378 379 /// A helper function that returns the reciprocal of the block probability of 380 /// predicated blocks. If we return X, we are assuming the predicated block 381 /// will execute once for every X iterations of the loop header. 382 /// 383 /// TODO: We should use actual block probability here, if available. Currently, 384 /// we always assume predicated blocks have a 50% chance of executing. 385 static unsigned getReciprocalPredBlockProb() { return 2; } 386 387 /// A helper function that returns an integer or floating-point constant with 388 /// value C. 389 static Constant *getSignedIntOrFpConstant(Type *Ty, int64_t C) { 390 return Ty->isIntegerTy() ? ConstantInt::getSigned(Ty, C) 391 : ConstantFP::get(Ty, C); 392 } 393 394 /// Returns "best known" trip count for the specified loop \p L as defined by 395 /// the following procedure: 396 /// 1) Returns exact trip count if it is known. 397 /// 2) Returns expected trip count according to profile data if any. 398 /// 3) Returns upper bound estimate if it is known. 399 /// 4) Returns None if all of the above failed. 400 static Optional<unsigned> getSmallBestKnownTC(ScalarEvolution &SE, Loop *L) { 401 // Check if exact trip count is known. 402 if (unsigned ExpectedTC = SE.getSmallConstantTripCount(L)) 403 return ExpectedTC; 404 405 // Check if there is an expected trip count available from profile data. 406 if (LoopVectorizeWithBlockFrequency) 407 if (auto EstimatedTC = getLoopEstimatedTripCount(L)) 408 return EstimatedTC; 409 410 // Check if upper bound estimate is known. 411 if (unsigned ExpectedTC = SE.getSmallConstantMaxTripCount(L)) 412 return ExpectedTC; 413 414 return None; 415 } 416 417 // Forward declare GeneratedRTChecks. 418 class GeneratedRTChecks; 419 420 namespace llvm { 421 422 AnalysisKey ShouldRunExtraVectorPasses::Key; 423 424 /// InnerLoopVectorizer vectorizes loops which contain only one basic 425 /// block to a specified vectorization factor (VF). 426 /// This class performs the widening of scalars into vectors, or multiple 427 /// scalars. This class also implements the following features: 428 /// * It inserts an epilogue loop for handling loops that don't have iteration 429 /// counts that are known to be a multiple of the vectorization factor. 430 /// * It handles the code generation for reduction variables. 431 /// * Scalarization (implementation using scalars) of un-vectorizable 432 /// instructions. 433 /// InnerLoopVectorizer does not perform any vectorization-legality 434 /// checks, and relies on the caller to check for the different legality 435 /// aspects. The InnerLoopVectorizer relies on the 436 /// LoopVectorizationLegality class to provide information about the induction 437 /// and reduction variables that were found to a given vectorization factor. 438 class InnerLoopVectorizer { 439 public: 440 InnerLoopVectorizer(Loop *OrigLoop, PredicatedScalarEvolution &PSE, 441 LoopInfo *LI, DominatorTree *DT, 442 const TargetLibraryInfo *TLI, 443 const TargetTransformInfo *TTI, AssumptionCache *AC, 444 OptimizationRemarkEmitter *ORE, ElementCount VecWidth, 445 ElementCount MinProfitableTripCount, 446 unsigned UnrollFactor, LoopVectorizationLegality *LVL, 447 LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI, 448 ProfileSummaryInfo *PSI, GeneratedRTChecks &RTChecks) 449 : OrigLoop(OrigLoop), PSE(PSE), LI(LI), DT(DT), TLI(TLI), TTI(TTI), 450 AC(AC), ORE(ORE), VF(VecWidth), UF(UnrollFactor), 451 Builder(PSE.getSE()->getContext()), Legal(LVL), Cost(CM), BFI(BFI), 452 PSI(PSI), RTChecks(RTChecks) { 453 // Query this against the original loop and save it here because the profile 454 // of the original loop header may change as the transformation happens. 455 OptForSizeBasedOnProfile = llvm::shouldOptimizeForSize( 456 OrigLoop->getHeader(), PSI, BFI, PGSOQueryType::IRPass); 457 458 if (MinProfitableTripCount.isZero()) 459 this->MinProfitableTripCount = VecWidth; 460 else 461 this->MinProfitableTripCount = MinProfitableTripCount; 462 } 463 464 virtual ~InnerLoopVectorizer() = default; 465 466 /// Create a new empty loop that will contain vectorized instructions later 467 /// on, while the old loop will be used as the scalar remainder. Control flow 468 /// is generated around the vectorized (and scalar epilogue) loops consisting 469 /// of various checks and bypasses. Return the pre-header block of the new 470 /// loop and the start value for the canonical induction, if it is != 0. The 471 /// latter is the case when vectorizing the epilogue loop. In the case of 472 /// epilogue vectorization, this function is overriden to handle the more 473 /// complex control flow around the loops. 474 virtual std::pair<BasicBlock *, Value *> createVectorizedLoopSkeleton(); 475 476 /// Widen a single call instruction within the innermost loop. 477 void widenCallInstruction(CallInst &CI, VPValue *Def, VPUser &ArgOperands, 478 VPTransformState &State); 479 480 /// Fix the vectorized code, taking care of header phi's, live-outs, and more. 481 void fixVectorizedLoop(VPTransformState &State, VPlan &Plan); 482 483 // Return true if any runtime check is added. 484 bool areSafetyChecksAdded() { return AddedSafetyChecks; } 485 486 /// A type for vectorized values in the new loop. Each value from the 487 /// original loop, when vectorized, is represented by UF vector values in the 488 /// new unrolled loop, where UF is the unroll factor. 489 using VectorParts = SmallVector<Value *, 2>; 490 491 /// A helper function to scalarize a single Instruction in the innermost loop. 492 /// Generates a sequence of scalar instances for each lane between \p MinLane 493 /// and \p MaxLane, times each part between \p MinPart and \p MaxPart, 494 /// inclusive. Uses the VPValue operands from \p RepRecipe instead of \p 495 /// Instr's operands. 496 void scalarizeInstruction(Instruction *Instr, VPReplicateRecipe *RepRecipe, 497 const VPIteration &Instance, bool IfPredicateInstr, 498 VPTransformState &State); 499 500 /// Construct the vector value of a scalarized value \p V one lane at a time. 501 void packScalarIntoVectorValue(VPValue *Def, const VPIteration &Instance, 502 VPTransformState &State); 503 504 /// Try to vectorize interleaved access group \p Group with the base address 505 /// given in \p Addr, optionally masking the vector operations if \p 506 /// BlockInMask is non-null. Use \p State to translate given VPValues to IR 507 /// values in the vectorized loop. 508 void vectorizeInterleaveGroup(const InterleaveGroup<Instruction> *Group, 509 ArrayRef<VPValue *> VPDefs, 510 VPTransformState &State, VPValue *Addr, 511 ArrayRef<VPValue *> StoredValues, 512 VPValue *BlockInMask = nullptr); 513 514 /// Fix the non-induction PHIs in \p Plan. 515 void fixNonInductionPHIs(VPlan &Plan, VPTransformState &State); 516 517 /// Returns true if the reordering of FP operations is not allowed, but we are 518 /// able to vectorize with strict in-order reductions for the given RdxDesc. 519 bool useOrderedReductions(const RecurrenceDescriptor &RdxDesc); 520 521 /// Create a broadcast instruction. This method generates a broadcast 522 /// instruction (shuffle) for loop invariant values and for the induction 523 /// value. If this is the induction variable then we extend it to N, N+1, ... 524 /// this is needed because each iteration in the loop corresponds to a SIMD 525 /// element. 526 virtual Value *getBroadcastInstrs(Value *V); 527 528 // Returns the resume value (bc.merge.rdx) for a reduction as 529 // generated by fixReduction. 530 PHINode *getReductionResumeValue(const RecurrenceDescriptor &RdxDesc); 531 532 protected: 533 friend class LoopVectorizationPlanner; 534 535 /// A small list of PHINodes. 536 using PhiVector = SmallVector<PHINode *, 4>; 537 538 /// A type for scalarized values in the new loop. Each value from the 539 /// original loop, when scalarized, is represented by UF x VF scalar values 540 /// in the new unrolled loop, where UF is the unroll factor and VF is the 541 /// vectorization factor. 542 using ScalarParts = SmallVector<SmallVector<Value *, 4>, 2>; 543 544 /// Set up the values of the IVs correctly when exiting the vector loop. 545 void fixupIVUsers(PHINode *OrigPhi, const InductionDescriptor &II, 546 Value *VectorTripCount, Value *EndValue, 547 BasicBlock *MiddleBlock, BasicBlock *VectorHeader, 548 VPlan &Plan); 549 550 /// Handle all cross-iteration phis in the header. 551 void fixCrossIterationPHIs(VPTransformState &State); 552 553 /// Create the exit value of first order recurrences in the middle block and 554 /// update their users. 555 void fixFirstOrderRecurrence(VPFirstOrderRecurrencePHIRecipe *PhiR, 556 VPTransformState &State); 557 558 /// Create code for the loop exit value of the reduction. 559 void fixReduction(VPReductionPHIRecipe *Phi, VPTransformState &State); 560 561 /// Clear NSW/NUW flags from reduction instructions if necessary. 562 void clearReductionWrapFlags(VPReductionPHIRecipe *PhiR, 563 VPTransformState &State); 564 565 /// Iteratively sink the scalarized operands of a predicated instruction into 566 /// the block that was created for it. 567 void sinkScalarOperands(Instruction *PredInst); 568 569 /// Shrinks vector element sizes to the smallest bitwidth they can be legally 570 /// represented as. 571 void truncateToMinimalBitwidths(VPTransformState &State); 572 573 /// Returns (and creates if needed) the original loop trip count. 574 Value *getOrCreateTripCount(BasicBlock *InsertBlock); 575 576 /// Returns (and creates if needed) the trip count of the widened loop. 577 Value *getOrCreateVectorTripCount(BasicBlock *InsertBlock); 578 579 /// Returns a bitcasted value to the requested vector type. 580 /// Also handles bitcasts of vector<float> <-> vector<pointer> types. 581 Value *createBitOrPointerCast(Value *V, VectorType *DstVTy, 582 const DataLayout &DL); 583 584 /// Emit a bypass check to see if the vector trip count is zero, including if 585 /// it overflows. 586 void emitIterationCountCheck(BasicBlock *Bypass); 587 588 /// Emit a bypass check to see if all of the SCEV assumptions we've 589 /// had to make are correct. Returns the block containing the checks or 590 /// nullptr if no checks have been added. 591 BasicBlock *emitSCEVChecks(BasicBlock *Bypass); 592 593 /// Emit bypass checks to check any memory assumptions we may have made. 594 /// Returns the block containing the checks or nullptr if no checks have been 595 /// added. 596 BasicBlock *emitMemRuntimeChecks(BasicBlock *Bypass); 597 598 /// Emit basic blocks (prefixed with \p Prefix) for the iteration check, 599 /// vector loop preheader, middle block and scalar preheader. 600 void createVectorLoopSkeleton(StringRef Prefix); 601 602 /// Create new phi nodes for the induction variables to resume iteration count 603 /// in the scalar epilogue, from where the vectorized loop left off. 604 /// In cases where the loop skeleton is more complicated (eg. epilogue 605 /// vectorization) and the resume values can come from an additional bypass 606 /// block, the \p AdditionalBypass pair provides information about the bypass 607 /// block and the end value on the edge from bypass to this loop. 608 void createInductionResumeValues( 609 std::pair<BasicBlock *, Value *> AdditionalBypass = {nullptr, nullptr}); 610 611 /// Complete the loop skeleton by adding debug MDs, creating appropriate 612 /// conditional branches in the middle block, preparing the builder and 613 /// running the verifier. Return the preheader of the completed vector loop. 614 BasicBlock *completeLoopSkeleton(MDNode *OrigLoopID); 615 616 /// Collect poison-generating recipes that may generate a poison value that is 617 /// used after vectorization, even when their operands are not poison. Those 618 /// recipes meet the following conditions: 619 /// * Contribute to the address computation of a recipe generating a widen 620 /// memory load/store (VPWidenMemoryInstructionRecipe or 621 /// VPInterleaveRecipe). 622 /// * Such a widen memory load/store has at least one underlying Instruction 623 /// that is in a basic block that needs predication and after vectorization 624 /// the generated instruction won't be predicated. 625 void collectPoisonGeneratingRecipes(VPTransformState &State); 626 627 /// Allow subclasses to override and print debug traces before/after vplan 628 /// execution, when trace information is requested. 629 virtual void printDebugTracesAtStart(){}; 630 virtual void printDebugTracesAtEnd(){}; 631 632 /// The original loop. 633 Loop *OrigLoop; 634 635 /// A wrapper around ScalarEvolution used to add runtime SCEV checks. Applies 636 /// dynamic knowledge to simplify SCEV expressions and converts them to a 637 /// more usable form. 638 PredicatedScalarEvolution &PSE; 639 640 /// Loop Info. 641 LoopInfo *LI; 642 643 /// Dominator Tree. 644 DominatorTree *DT; 645 646 /// Alias Analysis. 647 AAResults *AA; 648 649 /// Target Library Info. 650 const TargetLibraryInfo *TLI; 651 652 /// Target Transform Info. 653 const TargetTransformInfo *TTI; 654 655 /// Assumption Cache. 656 AssumptionCache *AC; 657 658 /// Interface to emit optimization remarks. 659 OptimizationRemarkEmitter *ORE; 660 661 /// The vectorization SIMD factor to use. Each vector will have this many 662 /// vector elements. 663 ElementCount VF; 664 665 ElementCount MinProfitableTripCount; 666 667 /// The vectorization unroll factor to use. Each scalar is vectorized to this 668 /// many different vector instructions. 669 unsigned UF; 670 671 /// The builder that we use 672 IRBuilder<> Builder; 673 674 // --- Vectorization state --- 675 676 /// The vector-loop preheader. 677 BasicBlock *LoopVectorPreHeader; 678 679 /// The scalar-loop preheader. 680 BasicBlock *LoopScalarPreHeader; 681 682 /// Middle Block between the vector and the scalar. 683 BasicBlock *LoopMiddleBlock; 684 685 /// The unique ExitBlock of the scalar loop if one exists. Note that 686 /// there can be multiple exiting edges reaching this block. 687 BasicBlock *LoopExitBlock; 688 689 /// The scalar loop body. 690 BasicBlock *LoopScalarBody; 691 692 /// A list of all bypass blocks. The first block is the entry of the loop. 693 SmallVector<BasicBlock *, 4> LoopBypassBlocks; 694 695 /// Store instructions that were predicated. 696 SmallVector<Instruction *, 4> PredicatedInstructions; 697 698 /// Trip count of the original loop. 699 Value *TripCount = nullptr; 700 701 /// Trip count of the widened loop (TripCount - TripCount % (VF*UF)) 702 Value *VectorTripCount = nullptr; 703 704 /// The legality analysis. 705 LoopVectorizationLegality *Legal; 706 707 /// The profitablity analysis. 708 LoopVectorizationCostModel *Cost; 709 710 // Record whether runtime checks are added. 711 bool AddedSafetyChecks = false; 712 713 // Holds the end values for each induction variable. We save the end values 714 // so we can later fix-up the external users of the induction variables. 715 DenseMap<PHINode *, Value *> IVEndValues; 716 717 /// BFI and PSI are used to check for profile guided size optimizations. 718 BlockFrequencyInfo *BFI; 719 ProfileSummaryInfo *PSI; 720 721 // Whether this loop should be optimized for size based on profile guided size 722 // optimizatios. 723 bool OptForSizeBasedOnProfile; 724 725 /// Structure to hold information about generated runtime checks, responsible 726 /// for cleaning the checks, if vectorization turns out unprofitable. 727 GeneratedRTChecks &RTChecks; 728 729 // Holds the resume values for reductions in the loops, used to set the 730 // correct start value of reduction PHIs when vectorizing the epilogue. 731 SmallMapVector<const RecurrenceDescriptor *, PHINode *, 4> 732 ReductionResumeValues; 733 }; 734 735 class InnerLoopUnroller : public InnerLoopVectorizer { 736 public: 737 InnerLoopUnroller(Loop *OrigLoop, PredicatedScalarEvolution &PSE, 738 LoopInfo *LI, DominatorTree *DT, 739 const TargetLibraryInfo *TLI, 740 const TargetTransformInfo *TTI, AssumptionCache *AC, 741 OptimizationRemarkEmitter *ORE, unsigned UnrollFactor, 742 LoopVectorizationLegality *LVL, 743 LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI, 744 ProfileSummaryInfo *PSI, GeneratedRTChecks &Check) 745 : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 746 ElementCount::getFixed(1), 747 ElementCount::getFixed(1), UnrollFactor, LVL, CM, 748 BFI, PSI, Check) {} 749 750 private: 751 Value *getBroadcastInstrs(Value *V) override; 752 }; 753 754 /// Encapsulate information regarding vectorization of a loop and its epilogue. 755 /// This information is meant to be updated and used across two stages of 756 /// epilogue vectorization. 757 struct EpilogueLoopVectorizationInfo { 758 ElementCount MainLoopVF = ElementCount::getFixed(0); 759 unsigned MainLoopUF = 0; 760 ElementCount EpilogueVF = ElementCount::getFixed(0); 761 unsigned EpilogueUF = 0; 762 BasicBlock *MainLoopIterationCountCheck = nullptr; 763 BasicBlock *EpilogueIterationCountCheck = nullptr; 764 BasicBlock *SCEVSafetyCheck = nullptr; 765 BasicBlock *MemSafetyCheck = nullptr; 766 Value *TripCount = nullptr; 767 Value *VectorTripCount = nullptr; 768 769 EpilogueLoopVectorizationInfo(ElementCount MVF, unsigned MUF, 770 ElementCount EVF, unsigned EUF) 771 : MainLoopVF(MVF), MainLoopUF(MUF), EpilogueVF(EVF), EpilogueUF(EUF) { 772 assert(EUF == 1 && 773 "A high UF for the epilogue loop is likely not beneficial."); 774 } 775 }; 776 777 /// An extension of the inner loop vectorizer that creates a skeleton for a 778 /// vectorized loop that has its epilogue (residual) also vectorized. 779 /// The idea is to run the vplan on a given loop twice, firstly to setup the 780 /// skeleton and vectorize the main loop, and secondly to complete the skeleton 781 /// from the first step and vectorize the epilogue. This is achieved by 782 /// deriving two concrete strategy classes from this base class and invoking 783 /// them in succession from the loop vectorizer planner. 784 class InnerLoopAndEpilogueVectorizer : public InnerLoopVectorizer { 785 public: 786 InnerLoopAndEpilogueVectorizer( 787 Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, 788 DominatorTree *DT, const TargetLibraryInfo *TLI, 789 const TargetTransformInfo *TTI, AssumptionCache *AC, 790 OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI, 791 LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM, 792 BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI, 793 GeneratedRTChecks &Checks) 794 : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 795 EPI.MainLoopVF, EPI.MainLoopVF, EPI.MainLoopUF, LVL, 796 CM, BFI, PSI, Checks), 797 EPI(EPI) {} 798 799 // Override this function to handle the more complex control flow around the 800 // three loops. 801 std::pair<BasicBlock *, Value *> 802 createVectorizedLoopSkeleton() final override { 803 return createEpilogueVectorizedLoopSkeleton(); 804 } 805 806 /// The interface for creating a vectorized skeleton using one of two 807 /// different strategies, each corresponding to one execution of the vplan 808 /// as described above. 809 virtual std::pair<BasicBlock *, Value *> 810 createEpilogueVectorizedLoopSkeleton() = 0; 811 812 /// Holds and updates state information required to vectorize the main loop 813 /// and its epilogue in two separate passes. This setup helps us avoid 814 /// regenerating and recomputing runtime safety checks. It also helps us to 815 /// shorten the iteration-count-check path length for the cases where the 816 /// iteration count of the loop is so small that the main vector loop is 817 /// completely skipped. 818 EpilogueLoopVectorizationInfo &EPI; 819 }; 820 821 /// A specialized derived class of inner loop vectorizer that performs 822 /// vectorization of *main* loops in the process of vectorizing loops and their 823 /// epilogues. 824 class EpilogueVectorizerMainLoop : public InnerLoopAndEpilogueVectorizer { 825 public: 826 EpilogueVectorizerMainLoop( 827 Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, 828 DominatorTree *DT, const TargetLibraryInfo *TLI, 829 const TargetTransformInfo *TTI, AssumptionCache *AC, 830 OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI, 831 LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM, 832 BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI, 833 GeneratedRTChecks &Check) 834 : InnerLoopAndEpilogueVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 835 EPI, LVL, CM, BFI, PSI, Check) {} 836 /// Implements the interface for creating a vectorized skeleton using the 837 /// *main loop* strategy (ie the first pass of vplan execution). 838 std::pair<BasicBlock *, Value *> 839 createEpilogueVectorizedLoopSkeleton() final override; 840 841 protected: 842 /// Emits an iteration count bypass check once for the main loop (when \p 843 /// ForEpilogue is false) and once for the epilogue loop (when \p 844 /// ForEpilogue is true). 845 BasicBlock *emitIterationCountCheck(BasicBlock *Bypass, bool ForEpilogue); 846 void printDebugTracesAtStart() override; 847 void printDebugTracesAtEnd() override; 848 }; 849 850 // A specialized derived class of inner loop vectorizer that performs 851 // vectorization of *epilogue* loops in the process of vectorizing loops and 852 // their epilogues. 853 class EpilogueVectorizerEpilogueLoop : public InnerLoopAndEpilogueVectorizer { 854 public: 855 EpilogueVectorizerEpilogueLoop( 856 Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI, 857 DominatorTree *DT, const TargetLibraryInfo *TLI, 858 const TargetTransformInfo *TTI, AssumptionCache *AC, 859 OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI, 860 LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM, 861 BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI, 862 GeneratedRTChecks &Checks) 863 : InnerLoopAndEpilogueVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE, 864 EPI, LVL, CM, BFI, PSI, Checks) { 865 TripCount = EPI.TripCount; 866 } 867 /// Implements the interface for creating a vectorized skeleton using the 868 /// *epilogue loop* strategy (ie the second pass of vplan execution). 869 std::pair<BasicBlock *, Value *> 870 createEpilogueVectorizedLoopSkeleton() final override; 871 872 protected: 873 /// Emits an iteration count bypass check after the main vector loop has 874 /// finished to see if there are any iterations left to execute by either 875 /// the vector epilogue or the scalar epilogue. 876 BasicBlock *emitMinimumVectorEpilogueIterCountCheck( 877 BasicBlock *Bypass, 878 BasicBlock *Insert); 879 void printDebugTracesAtStart() override; 880 void printDebugTracesAtEnd() override; 881 }; 882 } // end namespace llvm 883 884 /// Look for a meaningful debug location on the instruction or it's 885 /// operands. 886 static Instruction *getDebugLocFromInstOrOperands(Instruction *I) { 887 if (!I) 888 return I; 889 890 DebugLoc Empty; 891 if (I->getDebugLoc() != Empty) 892 return I; 893 894 for (Use &Op : I->operands()) { 895 if (Instruction *OpInst = dyn_cast<Instruction>(Op)) 896 if (OpInst->getDebugLoc() != Empty) 897 return OpInst; 898 } 899 900 return I; 901 } 902 903 /// Write a \p DebugMsg about vectorization to the debug output stream. If \p I 904 /// is passed, the message relates to that particular instruction. 905 #ifndef NDEBUG 906 static void debugVectorizationMessage(const StringRef Prefix, 907 const StringRef DebugMsg, 908 Instruction *I) { 909 dbgs() << "LV: " << Prefix << DebugMsg; 910 if (I != nullptr) 911 dbgs() << " " << *I; 912 else 913 dbgs() << '.'; 914 dbgs() << '\n'; 915 } 916 #endif 917 918 /// Create an analysis remark that explains why vectorization failed 919 /// 920 /// \p PassName is the name of the pass (e.g. can be AlwaysPrint). \p 921 /// RemarkName is the identifier for the remark. If \p I is passed it is an 922 /// instruction that prevents vectorization. Otherwise \p TheLoop is used for 923 /// the location of the remark. \return the remark object that can be 924 /// streamed to. 925 static OptimizationRemarkAnalysis createLVAnalysis(const char *PassName, 926 StringRef RemarkName, Loop *TheLoop, Instruction *I) { 927 Value *CodeRegion = TheLoop->getHeader(); 928 DebugLoc DL = TheLoop->getStartLoc(); 929 930 if (I) { 931 CodeRegion = I->getParent(); 932 // If there is no debug location attached to the instruction, revert back to 933 // using the loop's. 934 if (I->getDebugLoc()) 935 DL = I->getDebugLoc(); 936 } 937 938 return OptimizationRemarkAnalysis(PassName, RemarkName, DL, CodeRegion); 939 } 940 941 namespace llvm { 942 943 /// Return a value for Step multiplied by VF. 944 Value *createStepForVF(IRBuilderBase &B, Type *Ty, ElementCount VF, 945 int64_t Step) { 946 assert(Ty->isIntegerTy() && "Expected an integer step"); 947 Constant *StepVal = ConstantInt::get(Ty, Step * VF.getKnownMinValue()); 948 return VF.isScalable() ? B.CreateVScale(StepVal) : StepVal; 949 } 950 951 /// Return the runtime value for VF. 952 Value *getRuntimeVF(IRBuilderBase &B, Type *Ty, ElementCount VF) { 953 Constant *EC = ConstantInt::get(Ty, VF.getKnownMinValue()); 954 return VF.isScalable() ? B.CreateVScale(EC) : EC; 955 } 956 957 static Value *getRuntimeVFAsFloat(IRBuilderBase &B, Type *FTy, 958 ElementCount VF) { 959 assert(FTy->isFloatingPointTy() && "Expected floating point type!"); 960 Type *IntTy = IntegerType::get(FTy->getContext(), FTy->getScalarSizeInBits()); 961 Value *RuntimeVF = getRuntimeVF(B, IntTy, VF); 962 return B.CreateUIToFP(RuntimeVF, FTy); 963 } 964 965 void reportVectorizationFailure(const StringRef DebugMsg, 966 const StringRef OREMsg, const StringRef ORETag, 967 OptimizationRemarkEmitter *ORE, Loop *TheLoop, 968 Instruction *I) { 969 LLVM_DEBUG(debugVectorizationMessage("Not vectorizing: ", DebugMsg, I)); 970 LoopVectorizeHints Hints(TheLoop, true /* doesn't matter */, *ORE); 971 ORE->emit( 972 createLVAnalysis(Hints.vectorizeAnalysisPassName(), ORETag, TheLoop, I) 973 << "loop not vectorized: " << OREMsg); 974 } 975 976 void reportVectorizationInfo(const StringRef Msg, const StringRef ORETag, 977 OptimizationRemarkEmitter *ORE, Loop *TheLoop, 978 Instruction *I) { 979 LLVM_DEBUG(debugVectorizationMessage("", Msg, I)); 980 LoopVectorizeHints Hints(TheLoop, true /* doesn't matter */, *ORE); 981 ORE->emit( 982 createLVAnalysis(Hints.vectorizeAnalysisPassName(), ORETag, TheLoop, I) 983 << Msg); 984 } 985 986 } // end namespace llvm 987 988 #ifndef NDEBUG 989 /// \return string containing a file name and a line # for the given loop. 990 static std::string getDebugLocString(const Loop *L) { 991 std::string Result; 992 if (L) { 993 raw_string_ostream OS(Result); 994 if (const DebugLoc LoopDbgLoc = L->getStartLoc()) 995 LoopDbgLoc.print(OS); 996 else 997 // Just print the module name. 998 OS << L->getHeader()->getParent()->getParent()->getModuleIdentifier(); 999 OS.flush(); 1000 } 1001 return Result; 1002 } 1003 #endif 1004 1005 void InnerLoopVectorizer::collectPoisonGeneratingRecipes( 1006 VPTransformState &State) { 1007 1008 // Collect recipes in the backward slice of `Root` that may generate a poison 1009 // value that is used after vectorization. 1010 SmallPtrSet<VPRecipeBase *, 16> Visited; 1011 auto collectPoisonGeneratingInstrsInBackwardSlice([&](VPRecipeBase *Root) { 1012 SmallVector<VPRecipeBase *, 16> Worklist; 1013 Worklist.push_back(Root); 1014 1015 // Traverse the backward slice of Root through its use-def chain. 1016 while (!Worklist.empty()) { 1017 VPRecipeBase *CurRec = Worklist.back(); 1018 Worklist.pop_back(); 1019 1020 if (!Visited.insert(CurRec).second) 1021 continue; 1022 1023 // Prune search if we find another recipe generating a widen memory 1024 // instruction. Widen memory instructions involved in address computation 1025 // will lead to gather/scatter instructions, which don't need to be 1026 // handled. 1027 if (isa<VPWidenMemoryInstructionRecipe>(CurRec) || 1028 isa<VPInterleaveRecipe>(CurRec) || 1029 isa<VPScalarIVStepsRecipe>(CurRec) || 1030 isa<VPCanonicalIVPHIRecipe>(CurRec) || 1031 isa<VPActiveLaneMaskPHIRecipe>(CurRec)) 1032 continue; 1033 1034 // This recipe contributes to the address computation of a widen 1035 // load/store. Collect recipe if its underlying instruction has 1036 // poison-generating flags. 1037 Instruction *Instr = CurRec->getUnderlyingInstr(); 1038 if (Instr && Instr->hasPoisonGeneratingFlags()) 1039 State.MayGeneratePoisonRecipes.insert(CurRec); 1040 1041 // Add new definitions to the worklist. 1042 for (VPValue *operand : CurRec->operands()) 1043 if (VPDef *OpDef = operand->getDef()) 1044 Worklist.push_back(cast<VPRecipeBase>(OpDef)); 1045 } 1046 }); 1047 1048 // Traverse all the recipes in the VPlan and collect the poison-generating 1049 // recipes in the backward slice starting at the address of a VPWidenRecipe or 1050 // VPInterleaveRecipe. 1051 auto Iter = depth_first( 1052 VPBlockRecursiveTraversalWrapper<VPBlockBase *>(State.Plan->getEntry())); 1053 for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(Iter)) { 1054 for (VPRecipeBase &Recipe : *VPBB) { 1055 if (auto *WidenRec = dyn_cast<VPWidenMemoryInstructionRecipe>(&Recipe)) { 1056 Instruction &UnderlyingInstr = WidenRec->getIngredient(); 1057 VPDef *AddrDef = WidenRec->getAddr()->getDef(); 1058 if (AddrDef && WidenRec->isConsecutive() && 1059 Legal->blockNeedsPredication(UnderlyingInstr.getParent())) 1060 collectPoisonGeneratingInstrsInBackwardSlice( 1061 cast<VPRecipeBase>(AddrDef)); 1062 } else if (auto *InterleaveRec = dyn_cast<VPInterleaveRecipe>(&Recipe)) { 1063 VPDef *AddrDef = InterleaveRec->getAddr()->getDef(); 1064 if (AddrDef) { 1065 // Check if any member of the interleave group needs predication. 1066 const InterleaveGroup<Instruction> *InterGroup = 1067 InterleaveRec->getInterleaveGroup(); 1068 bool NeedPredication = false; 1069 for (int I = 0, NumMembers = InterGroup->getNumMembers(); 1070 I < NumMembers; ++I) { 1071 Instruction *Member = InterGroup->getMember(I); 1072 if (Member) 1073 NeedPredication |= 1074 Legal->blockNeedsPredication(Member->getParent()); 1075 } 1076 1077 if (NeedPredication) 1078 collectPoisonGeneratingInstrsInBackwardSlice( 1079 cast<VPRecipeBase>(AddrDef)); 1080 } 1081 } 1082 } 1083 } 1084 } 1085 1086 PHINode *InnerLoopVectorizer::getReductionResumeValue( 1087 const RecurrenceDescriptor &RdxDesc) { 1088 auto It = ReductionResumeValues.find(&RdxDesc); 1089 assert(It != ReductionResumeValues.end() && 1090 "Expected to find a resume value for the reduction."); 1091 return It->second; 1092 } 1093 1094 namespace llvm { 1095 1096 // Loop vectorization cost-model hints how the scalar epilogue loop should be 1097 // lowered. 1098 enum ScalarEpilogueLowering { 1099 1100 // The default: allowing scalar epilogues. 1101 CM_ScalarEpilogueAllowed, 1102 1103 // Vectorization with OptForSize: don't allow epilogues. 1104 CM_ScalarEpilogueNotAllowedOptSize, 1105 1106 // A special case of vectorisation with OptForSize: loops with a very small 1107 // trip count are considered for vectorization under OptForSize, thereby 1108 // making sure the cost of their loop body is dominant, free of runtime 1109 // guards and scalar iteration overheads. 1110 CM_ScalarEpilogueNotAllowedLowTripLoop, 1111 1112 // Loop hint predicate indicating an epilogue is undesired. 1113 CM_ScalarEpilogueNotNeededUsePredicate, 1114 1115 // Directive indicating we must either tail fold or not vectorize 1116 CM_ScalarEpilogueNotAllowedUsePredicate 1117 }; 1118 1119 /// ElementCountComparator creates a total ordering for ElementCount 1120 /// for the purposes of using it in a set structure. 1121 struct ElementCountComparator { 1122 bool operator()(const ElementCount &LHS, const ElementCount &RHS) const { 1123 return std::make_tuple(LHS.isScalable(), LHS.getKnownMinValue()) < 1124 std::make_tuple(RHS.isScalable(), RHS.getKnownMinValue()); 1125 } 1126 }; 1127 using ElementCountSet = SmallSet<ElementCount, 16, ElementCountComparator>; 1128 1129 /// LoopVectorizationCostModel - estimates the expected speedups due to 1130 /// vectorization. 1131 /// In many cases vectorization is not profitable. This can happen because of 1132 /// a number of reasons. In this class we mainly attempt to predict the 1133 /// expected speedup/slowdowns due to the supported instruction set. We use the 1134 /// TargetTransformInfo to query the different backends for the cost of 1135 /// different operations. 1136 class LoopVectorizationCostModel { 1137 public: 1138 LoopVectorizationCostModel(ScalarEpilogueLowering SEL, Loop *L, 1139 PredicatedScalarEvolution &PSE, LoopInfo *LI, 1140 LoopVectorizationLegality *Legal, 1141 const TargetTransformInfo &TTI, 1142 const TargetLibraryInfo *TLI, DemandedBits *DB, 1143 AssumptionCache *AC, 1144 OptimizationRemarkEmitter *ORE, const Function *F, 1145 const LoopVectorizeHints *Hints, 1146 InterleavedAccessInfo &IAI) 1147 : ScalarEpilogueStatus(SEL), TheLoop(L), PSE(PSE), LI(LI), Legal(Legal), 1148 TTI(TTI), TLI(TLI), DB(DB), AC(AC), ORE(ORE), TheFunction(F), 1149 Hints(Hints), InterleaveInfo(IAI) {} 1150 1151 /// \return An upper bound for the vectorization factors (both fixed and 1152 /// scalable). If the factors are 0, vectorization and interleaving should be 1153 /// avoided up front. 1154 FixedScalableVFPair computeMaxVF(ElementCount UserVF, unsigned UserIC); 1155 1156 /// \return True if runtime checks are required for vectorization, and false 1157 /// otherwise. 1158 bool runtimeChecksRequired(); 1159 1160 /// \return The most profitable vectorization factor and the cost of that VF. 1161 /// This method checks every VF in \p CandidateVFs. If UserVF is not ZERO 1162 /// then this vectorization factor will be selected if vectorization is 1163 /// possible. 1164 VectorizationFactor 1165 selectVectorizationFactor(const ElementCountSet &CandidateVFs); 1166 1167 VectorizationFactor 1168 selectEpilogueVectorizationFactor(const ElementCount MaxVF, 1169 const LoopVectorizationPlanner &LVP); 1170 1171 /// Setup cost-based decisions for user vectorization factor. 1172 /// \return true if the UserVF is a feasible VF to be chosen. 1173 bool selectUserVectorizationFactor(ElementCount UserVF) { 1174 collectUniformsAndScalars(UserVF); 1175 collectInstsToScalarize(UserVF); 1176 return expectedCost(UserVF).first.isValid(); 1177 } 1178 1179 /// \return The size (in bits) of the smallest and widest types in the code 1180 /// that needs to be vectorized. We ignore values that remain scalar such as 1181 /// 64 bit loop indices. 1182 std::pair<unsigned, unsigned> getSmallestAndWidestTypes(); 1183 1184 /// \return The desired interleave count. 1185 /// If interleave count has been specified by metadata it will be returned. 1186 /// Otherwise, the interleave count is computed and returned. VF and LoopCost 1187 /// are the selected vectorization factor and the cost of the selected VF. 1188 unsigned selectInterleaveCount(ElementCount VF, unsigned LoopCost); 1189 1190 /// Memory access instruction may be vectorized in more than one way. 1191 /// Form of instruction after vectorization depends on cost. 1192 /// This function takes cost-based decisions for Load/Store instructions 1193 /// and collects them in a map. This decisions map is used for building 1194 /// the lists of loop-uniform and loop-scalar instructions. 1195 /// The calculated cost is saved with widening decision in order to 1196 /// avoid redundant calculations. 1197 void setCostBasedWideningDecision(ElementCount VF); 1198 1199 /// A struct that represents some properties of the register usage 1200 /// of a loop. 1201 struct RegisterUsage { 1202 /// Holds the number of loop invariant values that are used in the loop. 1203 /// The key is ClassID of target-provided register class. 1204 SmallMapVector<unsigned, unsigned, 4> LoopInvariantRegs; 1205 /// Holds the maximum number of concurrent live intervals in the loop. 1206 /// The key is ClassID of target-provided register class. 1207 SmallMapVector<unsigned, unsigned, 4> MaxLocalUsers; 1208 }; 1209 1210 /// \return Returns information about the register usages of the loop for the 1211 /// given vectorization factors. 1212 SmallVector<RegisterUsage, 8> 1213 calculateRegisterUsage(ArrayRef<ElementCount> VFs); 1214 1215 /// Collect values we want to ignore in the cost model. 1216 void collectValuesToIgnore(); 1217 1218 /// Collect all element types in the loop for which widening is needed. 1219 void collectElementTypesForWidening(); 1220 1221 /// Split reductions into those that happen in the loop, and those that happen 1222 /// outside. In loop reductions are collected into InLoopReductionChains. 1223 void collectInLoopReductions(); 1224 1225 /// Returns true if we should use strict in-order reductions for the given 1226 /// RdxDesc. This is true if the -enable-strict-reductions flag is passed, 1227 /// the IsOrdered flag of RdxDesc is set and we do not allow reordering 1228 /// of FP operations. 1229 bool useOrderedReductions(const RecurrenceDescriptor &RdxDesc) const { 1230 return !Hints->allowReordering() && RdxDesc.isOrdered(); 1231 } 1232 1233 /// \returns The smallest bitwidth each instruction can be represented with. 1234 /// The vector equivalents of these instructions should be truncated to this 1235 /// type. 1236 const MapVector<Instruction *, uint64_t> &getMinimalBitwidths() const { 1237 return MinBWs; 1238 } 1239 1240 /// \returns True if it is more profitable to scalarize instruction \p I for 1241 /// vectorization factor \p VF. 1242 bool isProfitableToScalarize(Instruction *I, ElementCount VF) const { 1243 assert(VF.isVector() && 1244 "Profitable to scalarize relevant only for VF > 1."); 1245 1246 // Cost model is not run in the VPlan-native path - return conservative 1247 // result until this changes. 1248 if (EnableVPlanNativePath) 1249 return false; 1250 1251 auto Scalars = InstsToScalarize.find(VF); 1252 assert(Scalars != InstsToScalarize.end() && 1253 "VF not yet analyzed for scalarization profitability"); 1254 return Scalars->second.find(I) != Scalars->second.end(); 1255 } 1256 1257 /// Returns true if \p I is known to be uniform after vectorization. 1258 bool isUniformAfterVectorization(Instruction *I, ElementCount VF) const { 1259 if (VF.isScalar()) 1260 return true; 1261 1262 // Cost model is not run in the VPlan-native path - return conservative 1263 // result until this changes. 1264 if (EnableVPlanNativePath) 1265 return false; 1266 1267 auto UniformsPerVF = Uniforms.find(VF); 1268 assert(UniformsPerVF != Uniforms.end() && 1269 "VF not yet analyzed for uniformity"); 1270 return UniformsPerVF->second.count(I); 1271 } 1272 1273 /// Returns true if \p I is known to be scalar after vectorization. 1274 bool isScalarAfterVectorization(Instruction *I, ElementCount VF) const { 1275 if (VF.isScalar()) 1276 return true; 1277 1278 // Cost model is not run in the VPlan-native path - return conservative 1279 // result until this changes. 1280 if (EnableVPlanNativePath) 1281 return false; 1282 1283 auto ScalarsPerVF = Scalars.find(VF); 1284 assert(ScalarsPerVF != Scalars.end() && 1285 "Scalar values are not calculated for VF"); 1286 return ScalarsPerVF->second.count(I); 1287 } 1288 1289 /// \returns True if instruction \p I can be truncated to a smaller bitwidth 1290 /// for vectorization factor \p VF. 1291 bool canTruncateToMinimalBitwidth(Instruction *I, ElementCount VF) const { 1292 return VF.isVector() && MinBWs.find(I) != MinBWs.end() && 1293 !isProfitableToScalarize(I, VF) && 1294 !isScalarAfterVectorization(I, VF); 1295 } 1296 1297 /// Decision that was taken during cost calculation for memory instruction. 1298 enum InstWidening { 1299 CM_Unknown, 1300 CM_Widen, // For consecutive accesses with stride +1. 1301 CM_Widen_Reverse, // For consecutive accesses with stride -1. 1302 CM_Interleave, 1303 CM_GatherScatter, 1304 CM_Scalarize 1305 }; 1306 1307 /// Save vectorization decision \p W and \p Cost taken by the cost model for 1308 /// instruction \p I and vector width \p VF. 1309 void setWideningDecision(Instruction *I, ElementCount VF, InstWidening W, 1310 InstructionCost Cost) { 1311 assert(VF.isVector() && "Expected VF >=2"); 1312 WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost); 1313 } 1314 1315 /// Save vectorization decision \p W and \p Cost taken by the cost model for 1316 /// interleaving group \p Grp and vector width \p VF. 1317 void setWideningDecision(const InterleaveGroup<Instruction> *Grp, 1318 ElementCount VF, InstWidening W, 1319 InstructionCost Cost) { 1320 assert(VF.isVector() && "Expected VF >=2"); 1321 /// Broadcast this decicion to all instructions inside the group. 1322 /// But the cost will be assigned to one instruction only. 1323 for (unsigned i = 0; i < Grp->getFactor(); ++i) { 1324 if (auto *I = Grp->getMember(i)) { 1325 if (Grp->getInsertPos() == I) 1326 WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost); 1327 else 1328 WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, 0); 1329 } 1330 } 1331 } 1332 1333 /// Return the cost model decision for the given instruction \p I and vector 1334 /// width \p VF. Return CM_Unknown if this instruction did not pass 1335 /// through the cost modeling. 1336 InstWidening getWideningDecision(Instruction *I, ElementCount VF) const { 1337 assert(VF.isVector() && "Expected VF to be a vector VF"); 1338 // Cost model is not run in the VPlan-native path - return conservative 1339 // result until this changes. 1340 if (EnableVPlanNativePath) 1341 return CM_GatherScatter; 1342 1343 std::pair<Instruction *, ElementCount> InstOnVF = std::make_pair(I, VF); 1344 auto Itr = WideningDecisions.find(InstOnVF); 1345 if (Itr == WideningDecisions.end()) 1346 return CM_Unknown; 1347 return Itr->second.first; 1348 } 1349 1350 /// Return the vectorization cost for the given instruction \p I and vector 1351 /// width \p VF. 1352 InstructionCost getWideningCost(Instruction *I, ElementCount VF) { 1353 assert(VF.isVector() && "Expected VF >=2"); 1354 std::pair<Instruction *, ElementCount> InstOnVF = std::make_pair(I, VF); 1355 assert(WideningDecisions.find(InstOnVF) != WideningDecisions.end() && 1356 "The cost is not calculated"); 1357 return WideningDecisions[InstOnVF].second; 1358 } 1359 1360 /// Return True if instruction \p I is an optimizable truncate whose operand 1361 /// is an induction variable. Such a truncate will be removed by adding a new 1362 /// induction variable with the destination type. 1363 bool isOptimizableIVTruncate(Instruction *I, ElementCount VF) { 1364 // If the instruction is not a truncate, return false. 1365 auto *Trunc = dyn_cast<TruncInst>(I); 1366 if (!Trunc) 1367 return false; 1368 1369 // Get the source and destination types of the truncate. 1370 Type *SrcTy = ToVectorTy(cast<CastInst>(I)->getSrcTy(), VF); 1371 Type *DestTy = ToVectorTy(cast<CastInst>(I)->getDestTy(), VF); 1372 1373 // If the truncate is free for the given types, return false. Replacing a 1374 // free truncate with an induction variable would add an induction variable 1375 // update instruction to each iteration of the loop. We exclude from this 1376 // check the primary induction variable since it will need an update 1377 // instruction regardless. 1378 Value *Op = Trunc->getOperand(0); 1379 if (Op != Legal->getPrimaryInduction() && TTI.isTruncateFree(SrcTy, DestTy)) 1380 return false; 1381 1382 // If the truncated value is not an induction variable, return false. 1383 return Legal->isInductionPhi(Op); 1384 } 1385 1386 /// Collects the instructions to scalarize for each predicated instruction in 1387 /// the loop. 1388 void collectInstsToScalarize(ElementCount VF); 1389 1390 /// Collect Uniform and Scalar values for the given \p VF. 1391 /// The sets depend on CM decision for Load/Store instructions 1392 /// that may be vectorized as interleave, gather-scatter or scalarized. 1393 void collectUniformsAndScalars(ElementCount VF) { 1394 // Do the analysis once. 1395 if (VF.isScalar() || Uniforms.find(VF) != Uniforms.end()) 1396 return; 1397 setCostBasedWideningDecision(VF); 1398 collectLoopUniforms(VF); 1399 collectLoopScalars(VF); 1400 } 1401 1402 /// Returns true if the target machine supports masked store operation 1403 /// for the given \p DataType and kind of access to \p Ptr. 1404 bool isLegalMaskedStore(Type *DataType, Value *Ptr, Align Alignment) const { 1405 return Legal->isConsecutivePtr(DataType, Ptr) && 1406 TTI.isLegalMaskedStore(DataType, Alignment); 1407 } 1408 1409 /// Returns true if the target machine supports masked load operation 1410 /// for the given \p DataType and kind of access to \p Ptr. 1411 bool isLegalMaskedLoad(Type *DataType, Value *Ptr, Align Alignment) const { 1412 return Legal->isConsecutivePtr(DataType, Ptr) && 1413 TTI.isLegalMaskedLoad(DataType, Alignment); 1414 } 1415 1416 /// Returns true if the target machine can represent \p V as a masked gather 1417 /// or scatter operation. 1418 bool isLegalGatherOrScatter(Value *V, 1419 ElementCount VF = ElementCount::getFixed(1)) { 1420 bool LI = isa<LoadInst>(V); 1421 bool SI = isa<StoreInst>(V); 1422 if (!LI && !SI) 1423 return false; 1424 auto *Ty = getLoadStoreType(V); 1425 Align Align = getLoadStoreAlignment(V); 1426 if (VF.isVector()) 1427 Ty = VectorType::get(Ty, VF); 1428 return (LI && TTI.isLegalMaskedGather(Ty, Align)) || 1429 (SI && TTI.isLegalMaskedScatter(Ty, Align)); 1430 } 1431 1432 /// Returns true if the target machine supports all of the reduction 1433 /// variables found for the given VF. 1434 bool canVectorizeReductions(ElementCount VF) const { 1435 return (all_of(Legal->getReductionVars(), [&](auto &Reduction) -> bool { 1436 const RecurrenceDescriptor &RdxDesc = Reduction.second; 1437 return TTI.isLegalToVectorizeReduction(RdxDesc, VF); 1438 })); 1439 } 1440 1441 /// Returns true if \p I is an instruction that will be scalarized with 1442 /// predication when vectorizing \p I with vectorization factor \p VF. Such 1443 /// instructions include conditional stores and instructions that may divide 1444 /// by zero. 1445 bool isScalarWithPredication(Instruction *I, ElementCount VF) const; 1446 1447 // Returns true if \p I is an instruction that will be predicated either 1448 // through scalar predication or masked load/store or masked gather/scatter. 1449 // \p VF is the vectorization factor that will be used to vectorize \p I. 1450 // Superset of instructions that return true for isScalarWithPredication. 1451 bool isPredicatedInst(Instruction *I, ElementCount VF) { 1452 // When we know the load's address is loop invariant and the instruction 1453 // in the original scalar loop was unconditionally executed then we 1454 // don't need to mark it as a predicated instruction. Tail folding may 1455 // introduce additional predication, but we're guaranteed to always have 1456 // at least one active lane. We call Legal->blockNeedsPredication here 1457 // because it doesn't query tail-folding. 1458 if (Legal->isUniformMemOp(*I) && isa<LoadInst>(I) && 1459 !Legal->blockNeedsPredication(I->getParent())) 1460 return false; 1461 if (!blockNeedsPredicationForAnyReason(I->getParent())) 1462 return false; 1463 // Loads and stores that need some form of masked operation are predicated 1464 // instructions. 1465 if (isa<LoadInst>(I) || isa<StoreInst>(I)) 1466 return Legal->isMaskRequired(I); 1467 return isScalarWithPredication(I, VF); 1468 } 1469 1470 /// Returns true if \p I is a memory instruction with consecutive memory 1471 /// access that can be widened. 1472 bool 1473 memoryInstructionCanBeWidened(Instruction *I, 1474 ElementCount VF = ElementCount::getFixed(1)); 1475 1476 /// Returns true if \p I is a memory instruction in an interleaved-group 1477 /// of memory accesses that can be vectorized with wide vector loads/stores 1478 /// and shuffles. 1479 bool 1480 interleavedAccessCanBeWidened(Instruction *I, 1481 ElementCount VF = ElementCount::getFixed(1)); 1482 1483 /// Check if \p Instr belongs to any interleaved access group. 1484 bool isAccessInterleaved(Instruction *Instr) { 1485 return InterleaveInfo.isInterleaved(Instr); 1486 } 1487 1488 /// Get the interleaved access group that \p Instr belongs to. 1489 const InterleaveGroup<Instruction> * 1490 getInterleavedAccessGroup(Instruction *Instr) { 1491 return InterleaveInfo.getInterleaveGroup(Instr); 1492 } 1493 1494 /// Returns true if we're required to use a scalar epilogue for at least 1495 /// the final iteration of the original loop. 1496 bool requiresScalarEpilogue(ElementCount VF) const { 1497 if (!isScalarEpilogueAllowed()) 1498 return false; 1499 // If we might exit from anywhere but the latch, must run the exiting 1500 // iteration in scalar form. 1501 if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch()) 1502 return true; 1503 return VF.isVector() && InterleaveInfo.requiresScalarEpilogue(); 1504 } 1505 1506 /// Returns true if a scalar epilogue is not allowed due to optsize or a 1507 /// loop hint annotation. 1508 bool isScalarEpilogueAllowed() const { 1509 return ScalarEpilogueStatus == CM_ScalarEpilogueAllowed; 1510 } 1511 1512 /// Returns true if all loop blocks should be masked to fold tail loop. 1513 bool foldTailByMasking() const { return FoldTailByMasking; } 1514 1515 /// Returns true if were tail-folding and want to use the active lane mask 1516 /// for vector loop control flow. 1517 bool useActiveLaneMaskForControlFlow() const { 1518 return FoldTailByMasking && 1519 TTI.emitGetActiveLaneMask() == PredicationStyle::DataAndControlFlow; 1520 } 1521 1522 /// Returns true if the instructions in this block requires predication 1523 /// for any reason, e.g. because tail folding now requires a predicate 1524 /// or because the block in the original loop was predicated. 1525 bool blockNeedsPredicationForAnyReason(BasicBlock *BB) const { 1526 return foldTailByMasking() || Legal->blockNeedsPredication(BB); 1527 } 1528 1529 /// A SmallMapVector to store the InLoop reduction op chains, mapping phi 1530 /// nodes to the chain of instructions representing the reductions. Uses a 1531 /// MapVector to ensure deterministic iteration order. 1532 using ReductionChainMap = 1533 SmallMapVector<PHINode *, SmallVector<Instruction *, 4>, 4>; 1534 1535 /// Return the chain of instructions representing an inloop reduction. 1536 const ReductionChainMap &getInLoopReductionChains() const { 1537 return InLoopReductionChains; 1538 } 1539 1540 /// Returns true if the Phi is part of an inloop reduction. 1541 bool isInLoopReduction(PHINode *Phi) const { 1542 return InLoopReductionChains.count(Phi); 1543 } 1544 1545 /// Estimate cost of an intrinsic call instruction CI if it were vectorized 1546 /// with factor VF. Return the cost of the instruction, including 1547 /// scalarization overhead if it's needed. 1548 InstructionCost getVectorIntrinsicCost(CallInst *CI, ElementCount VF) const; 1549 1550 /// Estimate cost of a call instruction CI if it were vectorized with factor 1551 /// VF. Return the cost of the instruction, including scalarization overhead 1552 /// if it's needed. The flag NeedToScalarize shows if the call needs to be 1553 /// scalarized - 1554 /// i.e. either vector version isn't available, or is too expensive. 1555 InstructionCost getVectorCallCost(CallInst *CI, ElementCount VF, 1556 bool &NeedToScalarize) const; 1557 1558 /// Returns true if the per-lane cost of VectorizationFactor A is lower than 1559 /// that of B. 1560 bool isMoreProfitable(const VectorizationFactor &A, 1561 const VectorizationFactor &B) const; 1562 1563 /// Invalidates decisions already taken by the cost model. 1564 void invalidateCostModelingDecisions() { 1565 WideningDecisions.clear(); 1566 Uniforms.clear(); 1567 Scalars.clear(); 1568 } 1569 1570 /// Convenience function that returns the value of vscale_range iff 1571 /// vscale_range.min == vscale_range.max or otherwise returns the value 1572 /// returned by the corresponding TLI method. 1573 Optional<unsigned> getVScaleForTuning() const; 1574 1575 private: 1576 unsigned NumPredStores = 0; 1577 1578 /// \return An upper bound for the vectorization factors for both 1579 /// fixed and scalable vectorization, where the minimum-known number of 1580 /// elements is a power-of-2 larger than zero. If scalable vectorization is 1581 /// disabled or unsupported, then the scalable part will be equal to 1582 /// ElementCount::getScalable(0). 1583 FixedScalableVFPair computeFeasibleMaxVF(unsigned ConstTripCount, 1584 ElementCount UserVF, 1585 bool FoldTailByMasking); 1586 1587 /// \return the maximized element count based on the targets vector 1588 /// registers and the loop trip-count, but limited to a maximum safe VF. 1589 /// This is a helper function of computeFeasibleMaxVF. 1590 ElementCount getMaximizedVFForTarget(unsigned ConstTripCount, 1591 unsigned SmallestType, 1592 unsigned WidestType, 1593 ElementCount MaxSafeVF, 1594 bool FoldTailByMasking); 1595 1596 /// \return the maximum legal scalable VF, based on the safe max number 1597 /// of elements. 1598 ElementCount getMaxLegalScalableVF(unsigned MaxSafeElements); 1599 1600 /// The vectorization cost is a combination of the cost itself and a boolean 1601 /// indicating whether any of the contributing operations will actually 1602 /// operate on vector values after type legalization in the backend. If this 1603 /// latter value is false, then all operations will be scalarized (i.e. no 1604 /// vectorization has actually taken place). 1605 using VectorizationCostTy = std::pair<InstructionCost, bool>; 1606 1607 /// Returns the expected execution cost. The unit of the cost does 1608 /// not matter because we use the 'cost' units to compare different 1609 /// vector widths. The cost that is returned is *not* normalized by 1610 /// the factor width. If \p Invalid is not nullptr, this function 1611 /// will add a pair(Instruction*, ElementCount) to \p Invalid for 1612 /// each instruction that has an Invalid cost for the given VF. 1613 using InstructionVFPair = std::pair<Instruction *, ElementCount>; 1614 VectorizationCostTy 1615 expectedCost(ElementCount VF, 1616 SmallVectorImpl<InstructionVFPair> *Invalid = nullptr); 1617 1618 /// Returns the execution time cost of an instruction for a given vector 1619 /// width. Vector width of one means scalar. 1620 VectorizationCostTy getInstructionCost(Instruction *I, ElementCount VF); 1621 1622 /// The cost-computation logic from getInstructionCost which provides 1623 /// the vector type as an output parameter. 1624 InstructionCost getInstructionCost(Instruction *I, ElementCount VF, 1625 Type *&VectorTy); 1626 1627 /// Return the cost of instructions in an inloop reduction pattern, if I is 1628 /// part of that pattern. 1629 Optional<InstructionCost> 1630 getReductionPatternCost(Instruction *I, ElementCount VF, Type *VectorTy, 1631 TTI::TargetCostKind CostKind); 1632 1633 /// Calculate vectorization cost of memory instruction \p I. 1634 InstructionCost getMemoryInstructionCost(Instruction *I, ElementCount VF); 1635 1636 /// The cost computation for scalarized memory instruction. 1637 InstructionCost getMemInstScalarizationCost(Instruction *I, ElementCount VF); 1638 1639 /// The cost computation for interleaving group of memory instructions. 1640 InstructionCost getInterleaveGroupCost(Instruction *I, ElementCount VF); 1641 1642 /// The cost computation for Gather/Scatter instruction. 1643 InstructionCost getGatherScatterCost(Instruction *I, ElementCount VF); 1644 1645 /// The cost computation for widening instruction \p I with consecutive 1646 /// memory access. 1647 InstructionCost getConsecutiveMemOpCost(Instruction *I, ElementCount VF); 1648 1649 /// The cost calculation for Load/Store instruction \p I with uniform pointer - 1650 /// Load: scalar load + broadcast. 1651 /// Store: scalar store + (loop invariant value stored? 0 : extract of last 1652 /// element) 1653 InstructionCost getUniformMemOpCost(Instruction *I, ElementCount VF); 1654 1655 /// Estimate the overhead of scalarizing an instruction. This is a 1656 /// convenience wrapper for the type-based getScalarizationOverhead API. 1657 InstructionCost getScalarizationOverhead(Instruction *I, 1658 ElementCount VF) const; 1659 1660 /// Returns true if an artificially high cost for emulated masked memrefs 1661 /// should be used. 1662 bool useEmulatedMaskMemRefHack(Instruction *I, ElementCount VF); 1663 1664 /// Map of scalar integer values to the smallest bitwidth they can be legally 1665 /// represented as. The vector equivalents of these values should be truncated 1666 /// to this type. 1667 MapVector<Instruction *, uint64_t> MinBWs; 1668 1669 /// A type representing the costs for instructions if they were to be 1670 /// scalarized rather than vectorized. The entries are Instruction-Cost 1671 /// pairs. 1672 using ScalarCostsTy = DenseMap<Instruction *, InstructionCost>; 1673 1674 /// A set containing all BasicBlocks that are known to present after 1675 /// vectorization as a predicated block. 1676 DenseMap<ElementCount, SmallPtrSet<BasicBlock *, 4>> 1677 PredicatedBBsAfterVectorization; 1678 1679 /// Records whether it is allowed to have the original scalar loop execute at 1680 /// least once. This may be needed as a fallback loop in case runtime 1681 /// aliasing/dependence checks fail, or to handle the tail/remainder 1682 /// iterations when the trip count is unknown or doesn't divide by the VF, 1683 /// or as a peel-loop to handle gaps in interleave-groups. 1684 /// Under optsize and when the trip count is very small we don't allow any 1685 /// iterations to execute in the scalar loop. 1686 ScalarEpilogueLowering ScalarEpilogueStatus = CM_ScalarEpilogueAllowed; 1687 1688 /// All blocks of loop are to be masked to fold tail of scalar iterations. 1689 bool FoldTailByMasking = false; 1690 1691 /// A map holding scalar costs for different vectorization factors. The 1692 /// presence of a cost for an instruction in the mapping indicates that the 1693 /// instruction will be scalarized when vectorizing with the associated 1694 /// vectorization factor. The entries are VF-ScalarCostTy pairs. 1695 DenseMap<ElementCount, ScalarCostsTy> InstsToScalarize; 1696 1697 /// Holds the instructions known to be uniform after vectorization. 1698 /// The data is collected per VF. 1699 DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> Uniforms; 1700 1701 /// Holds the instructions known to be scalar after vectorization. 1702 /// The data is collected per VF. 1703 DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> Scalars; 1704 1705 /// Holds the instructions (address computations) that are forced to be 1706 /// scalarized. 1707 DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> ForcedScalars; 1708 1709 /// PHINodes of the reductions that should be expanded in-loop along with 1710 /// their associated chains of reduction operations, in program order from top 1711 /// (PHI) to bottom 1712 ReductionChainMap InLoopReductionChains; 1713 1714 /// A Map of inloop reduction operations and their immediate chain operand. 1715 /// FIXME: This can be removed once reductions can be costed correctly in 1716 /// vplan. This was added to allow quick lookup to the inloop operations, 1717 /// without having to loop through InLoopReductionChains. 1718 DenseMap<Instruction *, Instruction *> InLoopReductionImmediateChains; 1719 1720 /// Returns the expected difference in cost from scalarizing the expression 1721 /// feeding a predicated instruction \p PredInst. The instructions to 1722 /// scalarize and their scalar costs are collected in \p ScalarCosts. A 1723 /// non-negative return value implies the expression will be scalarized. 1724 /// Currently, only single-use chains are considered for scalarization. 1725 int computePredInstDiscount(Instruction *PredInst, ScalarCostsTy &ScalarCosts, 1726 ElementCount VF); 1727 1728 /// Collect the instructions that are uniform after vectorization. An 1729 /// instruction is uniform if we represent it with a single scalar value in 1730 /// the vectorized loop corresponding to each vector iteration. Examples of 1731 /// uniform instructions include pointer operands of consecutive or 1732 /// interleaved memory accesses. Note that although uniformity implies an 1733 /// instruction will be scalar, the reverse is not true. In general, a 1734 /// scalarized instruction will be represented by VF scalar values in the 1735 /// vectorized loop, each corresponding to an iteration of the original 1736 /// scalar loop. 1737 void collectLoopUniforms(ElementCount VF); 1738 1739 /// Collect the instructions that are scalar after vectorization. An 1740 /// instruction is scalar if it is known to be uniform or will be scalarized 1741 /// during vectorization. collectLoopScalars should only add non-uniform nodes 1742 /// to the list if they are used by a load/store instruction that is marked as 1743 /// CM_Scalarize. Non-uniform scalarized instructions will be represented by 1744 /// VF values in the vectorized loop, each corresponding to an iteration of 1745 /// the original scalar loop. 1746 void collectLoopScalars(ElementCount VF); 1747 1748 /// Keeps cost model vectorization decision and cost for instructions. 1749 /// Right now it is used for memory instructions only. 1750 using DecisionList = DenseMap<std::pair<Instruction *, ElementCount>, 1751 std::pair<InstWidening, InstructionCost>>; 1752 1753 DecisionList WideningDecisions; 1754 1755 /// Returns true if \p V is expected to be vectorized and it needs to be 1756 /// extracted. 1757 bool needsExtract(Value *V, ElementCount VF) const { 1758 Instruction *I = dyn_cast<Instruction>(V); 1759 if (VF.isScalar() || !I || !TheLoop->contains(I) || 1760 TheLoop->isLoopInvariant(I)) 1761 return false; 1762 1763 // Assume we can vectorize V (and hence we need extraction) if the 1764 // scalars are not computed yet. This can happen, because it is called 1765 // via getScalarizationOverhead from setCostBasedWideningDecision, before 1766 // the scalars are collected. That should be a safe assumption in most 1767 // cases, because we check if the operands have vectorizable types 1768 // beforehand in LoopVectorizationLegality. 1769 return Scalars.find(VF) == Scalars.end() || 1770 !isScalarAfterVectorization(I, VF); 1771 }; 1772 1773 /// Returns a range containing only operands needing to be extracted. 1774 SmallVector<Value *, 4> filterExtractingOperands(Instruction::op_range Ops, 1775 ElementCount VF) const { 1776 return SmallVector<Value *, 4>(make_filter_range( 1777 Ops, [this, VF](Value *V) { return this->needsExtract(V, VF); })); 1778 } 1779 1780 /// Determines if we have the infrastructure to vectorize loop \p L and its 1781 /// epilogue, assuming the main loop is vectorized by \p VF. 1782 bool isCandidateForEpilogueVectorization(const Loop &L, 1783 const ElementCount VF) const; 1784 1785 /// Returns true if epilogue vectorization is considered profitable, and 1786 /// false otherwise. 1787 /// \p VF is the vectorization factor chosen for the original loop. 1788 bool isEpilogueVectorizationProfitable(const ElementCount VF) const; 1789 1790 public: 1791 /// The loop that we evaluate. 1792 Loop *TheLoop; 1793 1794 /// Predicated scalar evolution analysis. 1795 PredicatedScalarEvolution &PSE; 1796 1797 /// Loop Info analysis. 1798 LoopInfo *LI; 1799 1800 /// Vectorization legality. 1801 LoopVectorizationLegality *Legal; 1802 1803 /// Vector target information. 1804 const TargetTransformInfo &TTI; 1805 1806 /// Target Library Info. 1807 const TargetLibraryInfo *TLI; 1808 1809 /// Demanded bits analysis. 1810 DemandedBits *DB; 1811 1812 /// Assumption cache. 1813 AssumptionCache *AC; 1814 1815 /// Interface to emit optimization remarks. 1816 OptimizationRemarkEmitter *ORE; 1817 1818 const Function *TheFunction; 1819 1820 /// Loop Vectorize Hint. 1821 const LoopVectorizeHints *Hints; 1822 1823 /// The interleave access information contains groups of interleaved accesses 1824 /// with the same stride and close to each other. 1825 InterleavedAccessInfo &InterleaveInfo; 1826 1827 /// Values to ignore in the cost model. 1828 SmallPtrSet<const Value *, 16> ValuesToIgnore; 1829 1830 /// Values to ignore in the cost model when VF > 1. 1831 SmallPtrSet<const Value *, 16> VecValuesToIgnore; 1832 1833 /// All element types found in the loop. 1834 SmallPtrSet<Type *, 16> ElementTypesInLoop; 1835 1836 /// Profitable vector factors. 1837 SmallVector<VectorizationFactor, 8> ProfitableVFs; 1838 }; 1839 } // end namespace llvm 1840 1841 /// Helper struct to manage generating runtime checks for vectorization. 1842 /// 1843 /// The runtime checks are created up-front in temporary blocks to allow better 1844 /// estimating the cost and un-linked from the existing IR. After deciding to 1845 /// vectorize, the checks are moved back. If deciding not to vectorize, the 1846 /// temporary blocks are completely removed. 1847 class GeneratedRTChecks { 1848 /// Basic block which contains the generated SCEV checks, if any. 1849 BasicBlock *SCEVCheckBlock = nullptr; 1850 1851 /// The value representing the result of the generated SCEV checks. If it is 1852 /// nullptr, either no SCEV checks have been generated or they have been used. 1853 Value *SCEVCheckCond = nullptr; 1854 1855 /// Basic block which contains the generated memory runtime checks, if any. 1856 BasicBlock *MemCheckBlock = nullptr; 1857 1858 /// The value representing the result of the generated memory runtime checks. 1859 /// If it is nullptr, either no memory runtime checks have been generated or 1860 /// they have been used. 1861 Value *MemRuntimeCheckCond = nullptr; 1862 1863 DominatorTree *DT; 1864 LoopInfo *LI; 1865 TargetTransformInfo *TTI; 1866 1867 SCEVExpander SCEVExp; 1868 SCEVExpander MemCheckExp; 1869 1870 bool CostTooHigh = false; 1871 1872 public: 1873 GeneratedRTChecks(ScalarEvolution &SE, DominatorTree *DT, LoopInfo *LI, 1874 TargetTransformInfo *TTI, const DataLayout &DL) 1875 : DT(DT), LI(LI), TTI(TTI), SCEVExp(SE, DL, "scev.check"), 1876 MemCheckExp(SE, DL, "scev.check") {} 1877 1878 /// Generate runtime checks in SCEVCheckBlock and MemCheckBlock, so we can 1879 /// accurately estimate the cost of the runtime checks. The blocks are 1880 /// un-linked from the IR and is added back during vector code generation. If 1881 /// there is no vector code generation, the check blocks are removed 1882 /// completely. 1883 void Create(Loop *L, const LoopAccessInfo &LAI, 1884 const SCEVPredicate &UnionPred, ElementCount VF, unsigned IC) { 1885 1886 // Hard cutoff to limit compile-time increase in case a very large number of 1887 // runtime checks needs to be generated. 1888 // TODO: Skip cutoff if the loop is guaranteed to execute, e.g. due to 1889 // profile info. 1890 CostTooHigh = 1891 LAI.getNumRuntimePointerChecks() > VectorizeMemoryCheckThreshold; 1892 if (CostTooHigh) 1893 return; 1894 1895 BasicBlock *LoopHeader = L->getHeader(); 1896 BasicBlock *Preheader = L->getLoopPreheader(); 1897 1898 // Use SplitBlock to create blocks for SCEV & memory runtime checks to 1899 // ensure the blocks are properly added to LoopInfo & DominatorTree. Those 1900 // may be used by SCEVExpander. The blocks will be un-linked from their 1901 // predecessors and removed from LI & DT at the end of the function. 1902 if (!UnionPred.isAlwaysTrue()) { 1903 SCEVCheckBlock = SplitBlock(Preheader, Preheader->getTerminator(), DT, LI, 1904 nullptr, "vector.scevcheck"); 1905 1906 SCEVCheckCond = SCEVExp.expandCodeForPredicate( 1907 &UnionPred, SCEVCheckBlock->getTerminator()); 1908 } 1909 1910 const auto &RtPtrChecking = *LAI.getRuntimePointerChecking(); 1911 if (RtPtrChecking.Need) { 1912 auto *Pred = SCEVCheckBlock ? SCEVCheckBlock : Preheader; 1913 MemCheckBlock = SplitBlock(Pred, Pred->getTerminator(), DT, LI, nullptr, 1914 "vector.memcheck"); 1915 1916 auto DiffChecks = RtPtrChecking.getDiffChecks(); 1917 if (DiffChecks) { 1918 Value *RuntimeVF = nullptr; 1919 MemRuntimeCheckCond = addDiffRuntimeChecks( 1920 MemCheckBlock->getTerminator(), L, *DiffChecks, MemCheckExp, 1921 [VF, &RuntimeVF](IRBuilderBase &B, unsigned Bits) { 1922 if (!RuntimeVF) 1923 RuntimeVF = getRuntimeVF(B, B.getIntNTy(Bits), VF); 1924 return RuntimeVF; 1925 }, 1926 IC); 1927 } else { 1928 MemRuntimeCheckCond = 1929 addRuntimeChecks(MemCheckBlock->getTerminator(), L, 1930 RtPtrChecking.getChecks(), MemCheckExp); 1931 } 1932 assert(MemRuntimeCheckCond && 1933 "no RT checks generated although RtPtrChecking " 1934 "claimed checks are required"); 1935 } 1936 1937 if (!MemCheckBlock && !SCEVCheckBlock) 1938 return; 1939 1940 // Unhook the temporary block with the checks, update various places 1941 // accordingly. 1942 if (SCEVCheckBlock) 1943 SCEVCheckBlock->replaceAllUsesWith(Preheader); 1944 if (MemCheckBlock) 1945 MemCheckBlock->replaceAllUsesWith(Preheader); 1946 1947 if (SCEVCheckBlock) { 1948 SCEVCheckBlock->getTerminator()->moveBefore(Preheader->getTerminator()); 1949 new UnreachableInst(Preheader->getContext(), SCEVCheckBlock); 1950 Preheader->getTerminator()->eraseFromParent(); 1951 } 1952 if (MemCheckBlock) { 1953 MemCheckBlock->getTerminator()->moveBefore(Preheader->getTerminator()); 1954 new UnreachableInst(Preheader->getContext(), MemCheckBlock); 1955 Preheader->getTerminator()->eraseFromParent(); 1956 } 1957 1958 DT->changeImmediateDominator(LoopHeader, Preheader); 1959 if (MemCheckBlock) { 1960 DT->eraseNode(MemCheckBlock); 1961 LI->removeBlock(MemCheckBlock); 1962 } 1963 if (SCEVCheckBlock) { 1964 DT->eraseNode(SCEVCheckBlock); 1965 LI->removeBlock(SCEVCheckBlock); 1966 } 1967 } 1968 1969 InstructionCost getCost() { 1970 if (SCEVCheckBlock || MemCheckBlock) 1971 LLVM_DEBUG(dbgs() << "Calculating cost of runtime checks:\n"); 1972 1973 if (CostTooHigh) { 1974 InstructionCost Cost; 1975 Cost.setInvalid(); 1976 LLVM_DEBUG(dbgs() << " number of checks exceeded threshold\n"); 1977 return Cost; 1978 } 1979 1980 InstructionCost RTCheckCost = 0; 1981 if (SCEVCheckBlock) 1982 for (Instruction &I : *SCEVCheckBlock) { 1983 if (SCEVCheckBlock->getTerminator() == &I) 1984 continue; 1985 InstructionCost C = 1986 TTI->getInstructionCost(&I, TTI::TCK_RecipThroughput); 1987 LLVM_DEBUG(dbgs() << " " << C << " for " << I << "\n"); 1988 RTCheckCost += C; 1989 } 1990 if (MemCheckBlock) 1991 for (Instruction &I : *MemCheckBlock) { 1992 if (MemCheckBlock->getTerminator() == &I) 1993 continue; 1994 InstructionCost C = 1995 TTI->getInstructionCost(&I, TTI::TCK_RecipThroughput); 1996 LLVM_DEBUG(dbgs() << " " << C << " for " << I << "\n"); 1997 RTCheckCost += C; 1998 } 1999 2000 if (SCEVCheckBlock || MemCheckBlock) 2001 LLVM_DEBUG(dbgs() << "Total cost of runtime checks: " << RTCheckCost 2002 << "\n"); 2003 2004 return RTCheckCost; 2005 } 2006 2007 /// Remove the created SCEV & memory runtime check blocks & instructions, if 2008 /// unused. 2009 ~GeneratedRTChecks() { 2010 SCEVExpanderCleaner SCEVCleaner(SCEVExp); 2011 SCEVExpanderCleaner MemCheckCleaner(MemCheckExp); 2012 if (!SCEVCheckCond) 2013 SCEVCleaner.markResultUsed(); 2014 2015 if (!MemRuntimeCheckCond) 2016 MemCheckCleaner.markResultUsed(); 2017 2018 if (MemRuntimeCheckCond) { 2019 auto &SE = *MemCheckExp.getSE(); 2020 // Memory runtime check generation creates compares that use expanded 2021 // values. Remove them before running the SCEVExpanderCleaners. 2022 for (auto &I : make_early_inc_range(reverse(*MemCheckBlock))) { 2023 if (MemCheckExp.isInsertedInstruction(&I)) 2024 continue; 2025 SE.forgetValue(&I); 2026 I.eraseFromParent(); 2027 } 2028 } 2029 MemCheckCleaner.cleanup(); 2030 SCEVCleaner.cleanup(); 2031 2032 if (SCEVCheckCond) 2033 SCEVCheckBlock->eraseFromParent(); 2034 if (MemRuntimeCheckCond) 2035 MemCheckBlock->eraseFromParent(); 2036 } 2037 2038 /// Adds the generated SCEVCheckBlock before \p LoopVectorPreHeader and 2039 /// adjusts the branches to branch to the vector preheader or \p Bypass, 2040 /// depending on the generated condition. 2041 BasicBlock *emitSCEVChecks(BasicBlock *Bypass, 2042 BasicBlock *LoopVectorPreHeader, 2043 BasicBlock *LoopExitBlock) { 2044 if (!SCEVCheckCond) 2045 return nullptr; 2046 2047 Value *Cond = SCEVCheckCond; 2048 // Mark the check as used, to prevent it from being removed during cleanup. 2049 SCEVCheckCond = nullptr; 2050 if (auto *C = dyn_cast<ConstantInt>(Cond)) 2051 if (C->isZero()) 2052 return nullptr; 2053 2054 auto *Pred = LoopVectorPreHeader->getSinglePredecessor(); 2055 2056 BranchInst::Create(LoopVectorPreHeader, SCEVCheckBlock); 2057 // Create new preheader for vector loop. 2058 if (auto *PL = LI->getLoopFor(LoopVectorPreHeader)) 2059 PL->addBasicBlockToLoop(SCEVCheckBlock, *LI); 2060 2061 SCEVCheckBlock->getTerminator()->eraseFromParent(); 2062 SCEVCheckBlock->moveBefore(LoopVectorPreHeader); 2063 Pred->getTerminator()->replaceSuccessorWith(LoopVectorPreHeader, 2064 SCEVCheckBlock); 2065 2066 DT->addNewBlock(SCEVCheckBlock, Pred); 2067 DT->changeImmediateDominator(LoopVectorPreHeader, SCEVCheckBlock); 2068 2069 ReplaceInstWithInst(SCEVCheckBlock->getTerminator(), 2070 BranchInst::Create(Bypass, LoopVectorPreHeader, Cond)); 2071 return SCEVCheckBlock; 2072 } 2073 2074 /// Adds the generated MemCheckBlock before \p LoopVectorPreHeader and adjusts 2075 /// the branches to branch to the vector preheader or \p Bypass, depending on 2076 /// the generated condition. 2077 BasicBlock *emitMemRuntimeChecks(BasicBlock *Bypass, 2078 BasicBlock *LoopVectorPreHeader) { 2079 // Check if we generated code that checks in runtime if arrays overlap. 2080 if (!MemRuntimeCheckCond) 2081 return nullptr; 2082 2083 auto *Pred = LoopVectorPreHeader->getSinglePredecessor(); 2084 Pred->getTerminator()->replaceSuccessorWith(LoopVectorPreHeader, 2085 MemCheckBlock); 2086 2087 DT->addNewBlock(MemCheckBlock, Pred); 2088 DT->changeImmediateDominator(LoopVectorPreHeader, MemCheckBlock); 2089 MemCheckBlock->moveBefore(LoopVectorPreHeader); 2090 2091 if (auto *PL = LI->getLoopFor(LoopVectorPreHeader)) 2092 PL->addBasicBlockToLoop(MemCheckBlock, *LI); 2093 2094 ReplaceInstWithInst( 2095 MemCheckBlock->getTerminator(), 2096 BranchInst::Create(Bypass, LoopVectorPreHeader, MemRuntimeCheckCond)); 2097 MemCheckBlock->getTerminator()->setDebugLoc( 2098 Pred->getTerminator()->getDebugLoc()); 2099 2100 // Mark the check as used, to prevent it from being removed during cleanup. 2101 MemRuntimeCheckCond = nullptr; 2102 return MemCheckBlock; 2103 } 2104 }; 2105 2106 // Return true if \p OuterLp is an outer loop annotated with hints for explicit 2107 // vectorization. The loop needs to be annotated with #pragma omp simd 2108 // simdlen(#) or #pragma clang vectorize(enable) vectorize_width(#). If the 2109 // vector length information is not provided, vectorization is not considered 2110 // explicit. Interleave hints are not allowed either. These limitations will be 2111 // relaxed in the future. 2112 // Please, note that we are currently forced to abuse the pragma 'clang 2113 // vectorize' semantics. This pragma provides *auto-vectorization hints* 2114 // (i.e., LV must check that vectorization is legal) whereas pragma 'omp simd' 2115 // provides *explicit vectorization hints* (LV can bypass legal checks and 2116 // assume that vectorization is legal). However, both hints are implemented 2117 // using the same metadata (llvm.loop.vectorize, processed by 2118 // LoopVectorizeHints). This will be fixed in the future when the native IR 2119 // representation for pragma 'omp simd' is introduced. 2120 static bool isExplicitVecOuterLoop(Loop *OuterLp, 2121 OptimizationRemarkEmitter *ORE) { 2122 assert(!OuterLp->isInnermost() && "This is not an outer loop"); 2123 LoopVectorizeHints Hints(OuterLp, true /*DisableInterleaving*/, *ORE); 2124 2125 // Only outer loops with an explicit vectorization hint are supported. 2126 // Unannotated outer loops are ignored. 2127 if (Hints.getForce() == LoopVectorizeHints::FK_Undefined) 2128 return false; 2129 2130 Function *Fn = OuterLp->getHeader()->getParent(); 2131 if (!Hints.allowVectorization(Fn, OuterLp, 2132 true /*VectorizeOnlyWhenForced*/)) { 2133 LLVM_DEBUG(dbgs() << "LV: Loop hints prevent outer loop vectorization.\n"); 2134 return false; 2135 } 2136 2137 if (Hints.getInterleave() > 1) { 2138 // TODO: Interleave support is future work. 2139 LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Interleave is not supported for " 2140 "outer loops.\n"); 2141 Hints.emitRemarkWithHints(); 2142 return false; 2143 } 2144 2145 return true; 2146 } 2147 2148 static void collectSupportedLoops(Loop &L, LoopInfo *LI, 2149 OptimizationRemarkEmitter *ORE, 2150 SmallVectorImpl<Loop *> &V) { 2151 // Collect inner loops and outer loops without irreducible control flow. For 2152 // now, only collect outer loops that have explicit vectorization hints. If we 2153 // are stress testing the VPlan H-CFG construction, we collect the outermost 2154 // loop of every loop nest. 2155 if (L.isInnermost() || VPlanBuildStressTest || 2156 (EnableVPlanNativePath && isExplicitVecOuterLoop(&L, ORE))) { 2157 LoopBlocksRPO RPOT(&L); 2158 RPOT.perform(LI); 2159 if (!containsIrreducibleCFG<const BasicBlock *>(RPOT, *LI)) { 2160 V.push_back(&L); 2161 // TODO: Collect inner loops inside marked outer loops in case 2162 // vectorization fails for the outer loop. Do not invoke 2163 // 'containsIrreducibleCFG' again for inner loops when the outer loop is 2164 // already known to be reducible. We can use an inherited attribute for 2165 // that. 2166 return; 2167 } 2168 } 2169 for (Loop *InnerL : L) 2170 collectSupportedLoops(*InnerL, LI, ORE, V); 2171 } 2172 2173 namespace { 2174 2175 /// The LoopVectorize Pass. 2176 struct LoopVectorize : public FunctionPass { 2177 /// Pass identification, replacement for typeid 2178 static char ID; 2179 2180 LoopVectorizePass Impl; 2181 2182 explicit LoopVectorize(bool InterleaveOnlyWhenForced = false, 2183 bool VectorizeOnlyWhenForced = false) 2184 : FunctionPass(ID), 2185 Impl({InterleaveOnlyWhenForced, VectorizeOnlyWhenForced}) { 2186 initializeLoopVectorizePass(*PassRegistry::getPassRegistry()); 2187 } 2188 2189 bool runOnFunction(Function &F) override { 2190 if (skipFunction(F)) 2191 return false; 2192 2193 auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE(); 2194 auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); 2195 auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); 2196 auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 2197 auto *BFI = &getAnalysis<BlockFrequencyInfoWrapperPass>().getBFI(); 2198 auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>(); 2199 auto *TLI = TLIP ? &TLIP->getTLI(F) : nullptr; 2200 auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); 2201 auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); 2202 auto *LAA = &getAnalysis<LoopAccessLegacyAnalysis>(); 2203 auto *DB = &getAnalysis<DemandedBitsWrapperPass>().getDemandedBits(); 2204 auto *ORE = &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE(); 2205 auto *PSI = &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI(); 2206 2207 std::function<const LoopAccessInfo &(Loop &)> GetLAA = 2208 [&](Loop &L) -> const LoopAccessInfo & { return LAA->getInfo(&L); }; 2209 2210 return Impl.runImpl(F, *SE, *LI, *TTI, *DT, *BFI, TLI, *DB, *AA, *AC, 2211 GetLAA, *ORE, PSI).MadeAnyChange; 2212 } 2213 2214 void getAnalysisUsage(AnalysisUsage &AU) const override { 2215 AU.addRequired<AssumptionCacheTracker>(); 2216 AU.addRequired<BlockFrequencyInfoWrapperPass>(); 2217 AU.addRequired<DominatorTreeWrapperPass>(); 2218 AU.addRequired<LoopInfoWrapperPass>(); 2219 AU.addRequired<ScalarEvolutionWrapperPass>(); 2220 AU.addRequired<TargetTransformInfoWrapperPass>(); 2221 AU.addRequired<AAResultsWrapperPass>(); 2222 AU.addRequired<LoopAccessLegacyAnalysis>(); 2223 AU.addRequired<DemandedBitsWrapperPass>(); 2224 AU.addRequired<OptimizationRemarkEmitterWrapperPass>(); 2225 AU.addRequired<InjectTLIMappingsLegacy>(); 2226 2227 // We currently do not preserve loopinfo/dominator analyses with outer loop 2228 // vectorization. Until this is addressed, mark these analyses as preserved 2229 // only for non-VPlan-native path. 2230 // TODO: Preserve Loop and Dominator analyses for VPlan-native path. 2231 if (!EnableVPlanNativePath) { 2232 AU.addPreserved<LoopInfoWrapperPass>(); 2233 AU.addPreserved<DominatorTreeWrapperPass>(); 2234 } 2235 2236 AU.addPreserved<BasicAAWrapperPass>(); 2237 AU.addPreserved<GlobalsAAWrapperPass>(); 2238 AU.addRequired<ProfileSummaryInfoWrapperPass>(); 2239 } 2240 }; 2241 2242 } // end anonymous namespace 2243 2244 //===----------------------------------------------------------------------===// 2245 // Implementation of LoopVectorizationLegality, InnerLoopVectorizer and 2246 // LoopVectorizationCostModel and LoopVectorizationPlanner. 2247 //===----------------------------------------------------------------------===// 2248 2249 Value *InnerLoopVectorizer::getBroadcastInstrs(Value *V) { 2250 // We need to place the broadcast of invariant variables outside the loop, 2251 // but only if it's proven safe to do so. Else, broadcast will be inside 2252 // vector loop body. 2253 Instruction *Instr = dyn_cast<Instruction>(V); 2254 bool SafeToHoist = OrigLoop->isLoopInvariant(V) && 2255 (!Instr || 2256 DT->dominates(Instr->getParent(), LoopVectorPreHeader)); 2257 // Place the code for broadcasting invariant variables in the new preheader. 2258 IRBuilder<>::InsertPointGuard Guard(Builder); 2259 if (SafeToHoist) 2260 Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); 2261 2262 // Broadcast the scalar into all locations in the vector. 2263 Value *Shuf = Builder.CreateVectorSplat(VF, V, "broadcast"); 2264 2265 return Shuf; 2266 } 2267 2268 /// This function adds 2269 /// (StartIdx * Step, (StartIdx + 1) * Step, (StartIdx + 2) * Step, ...) 2270 /// to each vector element of Val. The sequence starts at StartIndex. 2271 /// \p Opcode is relevant for FP induction variable. 2272 static Value *getStepVector(Value *Val, Value *StartIdx, Value *Step, 2273 Instruction::BinaryOps BinOp, ElementCount VF, 2274 IRBuilderBase &Builder) { 2275 assert(VF.isVector() && "only vector VFs are supported"); 2276 2277 // Create and check the types. 2278 auto *ValVTy = cast<VectorType>(Val->getType()); 2279 ElementCount VLen = ValVTy->getElementCount(); 2280 2281 Type *STy = Val->getType()->getScalarType(); 2282 assert((STy->isIntegerTy() || STy->isFloatingPointTy()) && 2283 "Induction Step must be an integer or FP"); 2284 assert(Step->getType() == STy && "Step has wrong type"); 2285 2286 SmallVector<Constant *, 8> Indices; 2287 2288 // Create a vector of consecutive numbers from zero to VF. 2289 VectorType *InitVecValVTy = ValVTy; 2290 if (STy->isFloatingPointTy()) { 2291 Type *InitVecValSTy = 2292 IntegerType::get(STy->getContext(), STy->getScalarSizeInBits()); 2293 InitVecValVTy = VectorType::get(InitVecValSTy, VLen); 2294 } 2295 Value *InitVec = Builder.CreateStepVector(InitVecValVTy); 2296 2297 // Splat the StartIdx 2298 Value *StartIdxSplat = Builder.CreateVectorSplat(VLen, StartIdx); 2299 2300 if (STy->isIntegerTy()) { 2301 InitVec = Builder.CreateAdd(InitVec, StartIdxSplat); 2302 Step = Builder.CreateVectorSplat(VLen, Step); 2303 assert(Step->getType() == Val->getType() && "Invalid step vec"); 2304 // FIXME: The newly created binary instructions should contain nsw/nuw 2305 // flags, which can be found from the original scalar operations. 2306 Step = Builder.CreateMul(InitVec, Step); 2307 return Builder.CreateAdd(Val, Step, "induction"); 2308 } 2309 2310 // Floating point induction. 2311 assert((BinOp == Instruction::FAdd || BinOp == Instruction::FSub) && 2312 "Binary Opcode should be specified for FP induction"); 2313 InitVec = Builder.CreateUIToFP(InitVec, ValVTy); 2314 InitVec = Builder.CreateFAdd(InitVec, StartIdxSplat); 2315 2316 Step = Builder.CreateVectorSplat(VLen, Step); 2317 Value *MulOp = Builder.CreateFMul(InitVec, Step); 2318 return Builder.CreateBinOp(BinOp, Val, MulOp, "induction"); 2319 } 2320 2321 /// Compute scalar induction steps. \p ScalarIV is the scalar induction 2322 /// variable on which to base the steps, \p Step is the size of the step. 2323 static void buildScalarSteps(Value *ScalarIV, Value *Step, 2324 const InductionDescriptor &ID, VPValue *Def, 2325 VPTransformState &State) { 2326 IRBuilderBase &Builder = State.Builder; 2327 // We shouldn't have to build scalar steps if we aren't vectorizing. 2328 assert(State.VF.isVector() && "VF should be greater than one"); 2329 // Get the value type and ensure it and the step have the same integer type. 2330 Type *ScalarIVTy = ScalarIV->getType()->getScalarType(); 2331 assert(ScalarIVTy == Step->getType() && 2332 "Val and Step should have the same type"); 2333 2334 // We build scalar steps for both integer and floating-point induction 2335 // variables. Here, we determine the kind of arithmetic we will perform. 2336 Instruction::BinaryOps AddOp; 2337 Instruction::BinaryOps MulOp; 2338 if (ScalarIVTy->isIntegerTy()) { 2339 AddOp = Instruction::Add; 2340 MulOp = Instruction::Mul; 2341 } else { 2342 AddOp = ID.getInductionOpcode(); 2343 MulOp = Instruction::FMul; 2344 } 2345 2346 // Determine the number of scalars we need to generate for each unroll 2347 // iteration. 2348 bool FirstLaneOnly = vputils::onlyFirstLaneUsed(Def); 2349 unsigned Lanes = FirstLaneOnly ? 1 : State.VF.getKnownMinValue(); 2350 // Compute the scalar steps and save the results in State. 2351 Type *IntStepTy = IntegerType::get(ScalarIVTy->getContext(), 2352 ScalarIVTy->getScalarSizeInBits()); 2353 Type *VecIVTy = nullptr; 2354 Value *UnitStepVec = nullptr, *SplatStep = nullptr, *SplatIV = nullptr; 2355 if (!FirstLaneOnly && State.VF.isScalable()) { 2356 VecIVTy = VectorType::get(ScalarIVTy, State.VF); 2357 UnitStepVec = 2358 Builder.CreateStepVector(VectorType::get(IntStepTy, State.VF)); 2359 SplatStep = Builder.CreateVectorSplat(State.VF, Step); 2360 SplatIV = Builder.CreateVectorSplat(State.VF, ScalarIV); 2361 } 2362 2363 for (unsigned Part = 0; Part < State.UF; ++Part) { 2364 Value *StartIdx0 = createStepForVF(Builder, IntStepTy, State.VF, Part); 2365 2366 if (!FirstLaneOnly && State.VF.isScalable()) { 2367 auto *SplatStartIdx = Builder.CreateVectorSplat(State.VF, StartIdx0); 2368 auto *InitVec = Builder.CreateAdd(SplatStartIdx, UnitStepVec); 2369 if (ScalarIVTy->isFloatingPointTy()) 2370 InitVec = Builder.CreateSIToFP(InitVec, VecIVTy); 2371 auto *Mul = Builder.CreateBinOp(MulOp, InitVec, SplatStep); 2372 auto *Add = Builder.CreateBinOp(AddOp, SplatIV, Mul); 2373 State.set(Def, Add, Part); 2374 // It's useful to record the lane values too for the known minimum number 2375 // of elements so we do those below. This improves the code quality when 2376 // trying to extract the first element, for example. 2377 } 2378 2379 if (ScalarIVTy->isFloatingPointTy()) 2380 StartIdx0 = Builder.CreateSIToFP(StartIdx0, ScalarIVTy); 2381 2382 for (unsigned Lane = 0; Lane < Lanes; ++Lane) { 2383 Value *StartIdx = Builder.CreateBinOp( 2384 AddOp, StartIdx0, getSignedIntOrFpConstant(ScalarIVTy, Lane)); 2385 // The step returned by `createStepForVF` is a runtime-evaluated value 2386 // when VF is scalable. Otherwise, it should be folded into a Constant. 2387 assert((State.VF.isScalable() || isa<Constant>(StartIdx)) && 2388 "Expected StartIdx to be folded to a constant when VF is not " 2389 "scalable"); 2390 auto *Mul = Builder.CreateBinOp(MulOp, StartIdx, Step); 2391 auto *Add = Builder.CreateBinOp(AddOp, ScalarIV, Mul); 2392 State.set(Def, Add, VPIteration(Part, Lane)); 2393 } 2394 } 2395 } 2396 2397 // Generate code for the induction step. Note that induction steps are 2398 // required to be loop-invariant 2399 static Value *CreateStepValue(const SCEV *Step, ScalarEvolution &SE, 2400 Instruction *InsertBefore, 2401 Loop *OrigLoop = nullptr) { 2402 const DataLayout &DL = SE.getDataLayout(); 2403 assert((!OrigLoop || SE.isLoopInvariant(Step, OrigLoop)) && 2404 "Induction step should be loop invariant"); 2405 if (auto *E = dyn_cast<SCEVUnknown>(Step)) 2406 return E->getValue(); 2407 2408 SCEVExpander Exp(SE, DL, "induction"); 2409 return Exp.expandCodeFor(Step, Step->getType(), InsertBefore); 2410 } 2411 2412 /// Compute the transformed value of Index at offset StartValue using step 2413 /// StepValue. 2414 /// For integer induction, returns StartValue + Index * StepValue. 2415 /// For pointer induction, returns StartValue[Index * StepValue]. 2416 /// FIXME: The newly created binary instructions should contain nsw/nuw 2417 /// flags, which can be found from the original scalar operations. 2418 static Value *emitTransformedIndex(IRBuilderBase &B, Value *Index, 2419 Value *StartValue, Value *Step, 2420 const InductionDescriptor &ID) { 2421 assert(Index->getType()->getScalarType() == Step->getType() && 2422 "Index scalar type does not match StepValue type"); 2423 2424 // Note: the IR at this point is broken. We cannot use SE to create any new 2425 // SCEV and then expand it, hoping that SCEV's simplification will give us 2426 // a more optimal code. Unfortunately, attempt of doing so on invalid IR may 2427 // lead to various SCEV crashes. So all we can do is to use builder and rely 2428 // on InstCombine for future simplifications. Here we handle some trivial 2429 // cases only. 2430 auto CreateAdd = [&B](Value *X, Value *Y) { 2431 assert(X->getType() == Y->getType() && "Types don't match!"); 2432 if (auto *CX = dyn_cast<ConstantInt>(X)) 2433 if (CX->isZero()) 2434 return Y; 2435 if (auto *CY = dyn_cast<ConstantInt>(Y)) 2436 if (CY->isZero()) 2437 return X; 2438 return B.CreateAdd(X, Y); 2439 }; 2440 2441 // We allow X to be a vector type, in which case Y will potentially be 2442 // splatted into a vector with the same element count. 2443 auto CreateMul = [&B](Value *X, Value *Y) { 2444 assert(X->getType()->getScalarType() == Y->getType() && 2445 "Types don't match!"); 2446 if (auto *CX = dyn_cast<ConstantInt>(X)) 2447 if (CX->isOne()) 2448 return Y; 2449 if (auto *CY = dyn_cast<ConstantInt>(Y)) 2450 if (CY->isOne()) 2451 return X; 2452 VectorType *XVTy = dyn_cast<VectorType>(X->getType()); 2453 if (XVTy && !isa<VectorType>(Y->getType())) 2454 Y = B.CreateVectorSplat(XVTy->getElementCount(), Y); 2455 return B.CreateMul(X, Y); 2456 }; 2457 2458 switch (ID.getKind()) { 2459 case InductionDescriptor::IK_IntInduction: { 2460 assert(!isa<VectorType>(Index->getType()) && 2461 "Vector indices not supported for integer inductions yet"); 2462 assert(Index->getType() == StartValue->getType() && 2463 "Index type does not match StartValue type"); 2464 if (isa<ConstantInt>(Step) && cast<ConstantInt>(Step)->isMinusOne()) 2465 return B.CreateSub(StartValue, Index); 2466 auto *Offset = CreateMul(Index, Step); 2467 return CreateAdd(StartValue, Offset); 2468 } 2469 case InductionDescriptor::IK_PtrInduction: { 2470 assert(isa<Constant>(Step) && 2471 "Expected constant step for pointer induction"); 2472 return B.CreateGEP(ID.getElementType(), StartValue, CreateMul(Index, Step)); 2473 } 2474 case InductionDescriptor::IK_FpInduction: { 2475 assert(!isa<VectorType>(Index->getType()) && 2476 "Vector indices not supported for FP inductions yet"); 2477 assert(Step->getType()->isFloatingPointTy() && "Expected FP Step value"); 2478 auto InductionBinOp = ID.getInductionBinOp(); 2479 assert(InductionBinOp && 2480 (InductionBinOp->getOpcode() == Instruction::FAdd || 2481 InductionBinOp->getOpcode() == Instruction::FSub) && 2482 "Original bin op should be defined for FP induction"); 2483 2484 Value *MulExp = B.CreateFMul(Step, Index); 2485 return B.CreateBinOp(InductionBinOp->getOpcode(), StartValue, MulExp, 2486 "induction"); 2487 } 2488 case InductionDescriptor::IK_NoInduction: 2489 return nullptr; 2490 } 2491 llvm_unreachable("invalid enum"); 2492 } 2493 2494 void InnerLoopVectorizer::packScalarIntoVectorValue(VPValue *Def, 2495 const VPIteration &Instance, 2496 VPTransformState &State) { 2497 Value *ScalarInst = State.get(Def, Instance); 2498 Value *VectorValue = State.get(Def, Instance.Part); 2499 VectorValue = Builder.CreateInsertElement( 2500 VectorValue, ScalarInst, 2501 Instance.Lane.getAsRuntimeExpr(State.Builder, VF)); 2502 State.set(Def, VectorValue, Instance.Part); 2503 } 2504 2505 // Return whether we allow using masked interleave-groups (for dealing with 2506 // strided loads/stores that reside in predicated blocks, or for dealing 2507 // with gaps). 2508 static bool useMaskedInterleavedAccesses(const TargetTransformInfo &TTI) { 2509 // If an override option has been passed in for interleaved accesses, use it. 2510 if (EnableMaskedInterleavedMemAccesses.getNumOccurrences() > 0) 2511 return EnableMaskedInterleavedMemAccesses; 2512 2513 return TTI.enableMaskedInterleavedAccessVectorization(); 2514 } 2515 2516 // Try to vectorize the interleave group that \p Instr belongs to. 2517 // 2518 // E.g. Translate following interleaved load group (factor = 3): 2519 // for (i = 0; i < N; i+=3) { 2520 // R = Pic[i]; // Member of index 0 2521 // G = Pic[i+1]; // Member of index 1 2522 // B = Pic[i+2]; // Member of index 2 2523 // ... // do something to R, G, B 2524 // } 2525 // To: 2526 // %wide.vec = load <12 x i32> ; Read 4 tuples of R,G,B 2527 // %R.vec = shuffle %wide.vec, poison, <0, 3, 6, 9> ; R elements 2528 // %G.vec = shuffle %wide.vec, poison, <1, 4, 7, 10> ; G elements 2529 // %B.vec = shuffle %wide.vec, poison, <2, 5, 8, 11> ; B elements 2530 // 2531 // Or translate following interleaved store group (factor = 3): 2532 // for (i = 0; i < N; i+=3) { 2533 // ... do something to R, G, B 2534 // Pic[i] = R; // Member of index 0 2535 // Pic[i+1] = G; // Member of index 1 2536 // Pic[i+2] = B; // Member of index 2 2537 // } 2538 // To: 2539 // %R_G.vec = shuffle %R.vec, %G.vec, <0, 1, 2, ..., 7> 2540 // %B_U.vec = shuffle %B.vec, poison, <0, 1, 2, 3, u, u, u, u> 2541 // %interleaved.vec = shuffle %R_G.vec, %B_U.vec, 2542 // <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11> ; Interleave R,G,B elements 2543 // store <12 x i32> %interleaved.vec ; Write 4 tuples of R,G,B 2544 void InnerLoopVectorizer::vectorizeInterleaveGroup( 2545 const InterleaveGroup<Instruction> *Group, ArrayRef<VPValue *> VPDefs, 2546 VPTransformState &State, VPValue *Addr, ArrayRef<VPValue *> StoredValues, 2547 VPValue *BlockInMask) { 2548 Instruction *Instr = Group->getInsertPos(); 2549 const DataLayout &DL = Instr->getModule()->getDataLayout(); 2550 2551 // Prepare for the vector type of the interleaved load/store. 2552 Type *ScalarTy = getLoadStoreType(Instr); 2553 unsigned InterleaveFactor = Group->getFactor(); 2554 assert(!VF.isScalable() && "scalable vectors not yet supported."); 2555 auto *VecTy = VectorType::get(ScalarTy, VF * InterleaveFactor); 2556 2557 // Prepare for the new pointers. 2558 SmallVector<Value *, 2> AddrParts; 2559 unsigned Index = Group->getIndex(Instr); 2560 2561 // TODO: extend the masked interleaved-group support to reversed access. 2562 assert((!BlockInMask || !Group->isReverse()) && 2563 "Reversed masked interleave-group not supported."); 2564 2565 // If the group is reverse, adjust the index to refer to the last vector lane 2566 // instead of the first. We adjust the index from the first vector lane, 2567 // rather than directly getting the pointer for lane VF - 1, because the 2568 // pointer operand of the interleaved access is supposed to be uniform. For 2569 // uniform instructions, we're only required to generate a value for the 2570 // first vector lane in each unroll iteration. 2571 if (Group->isReverse()) 2572 Index += (VF.getKnownMinValue() - 1) * Group->getFactor(); 2573 2574 for (unsigned Part = 0; Part < UF; Part++) { 2575 Value *AddrPart = State.get(Addr, VPIteration(Part, 0)); 2576 State.setDebugLocFromInst(AddrPart); 2577 2578 // Notice current instruction could be any index. Need to adjust the address 2579 // to the member of index 0. 2580 // 2581 // E.g. a = A[i+1]; // Member of index 1 (Current instruction) 2582 // b = A[i]; // Member of index 0 2583 // Current pointer is pointed to A[i+1], adjust it to A[i]. 2584 // 2585 // E.g. A[i+1] = a; // Member of index 1 2586 // A[i] = b; // Member of index 0 2587 // A[i+2] = c; // Member of index 2 (Current instruction) 2588 // Current pointer is pointed to A[i+2], adjust it to A[i]. 2589 2590 bool InBounds = false; 2591 if (auto *gep = dyn_cast<GetElementPtrInst>(AddrPart->stripPointerCasts())) 2592 InBounds = gep->isInBounds(); 2593 AddrPart = Builder.CreateGEP(ScalarTy, AddrPart, Builder.getInt32(-Index)); 2594 cast<GetElementPtrInst>(AddrPart)->setIsInBounds(InBounds); 2595 2596 // Cast to the vector pointer type. 2597 unsigned AddressSpace = AddrPart->getType()->getPointerAddressSpace(); 2598 Type *PtrTy = VecTy->getPointerTo(AddressSpace); 2599 AddrParts.push_back(Builder.CreateBitCast(AddrPart, PtrTy)); 2600 } 2601 2602 State.setDebugLocFromInst(Instr); 2603 Value *PoisonVec = PoisonValue::get(VecTy); 2604 2605 Value *MaskForGaps = nullptr; 2606 if (Group->requiresScalarEpilogue() && !Cost->isScalarEpilogueAllowed()) { 2607 MaskForGaps = createBitMaskForGaps(Builder, VF.getKnownMinValue(), *Group); 2608 assert(MaskForGaps && "Mask for Gaps is required but it is null"); 2609 } 2610 2611 // Vectorize the interleaved load group. 2612 if (isa<LoadInst>(Instr)) { 2613 // For each unroll part, create a wide load for the group. 2614 SmallVector<Value *, 2> NewLoads; 2615 for (unsigned Part = 0; Part < UF; Part++) { 2616 Instruction *NewLoad; 2617 if (BlockInMask || MaskForGaps) { 2618 assert(useMaskedInterleavedAccesses(*TTI) && 2619 "masked interleaved groups are not allowed."); 2620 Value *GroupMask = MaskForGaps; 2621 if (BlockInMask) { 2622 Value *BlockInMaskPart = State.get(BlockInMask, Part); 2623 Value *ShuffledMask = Builder.CreateShuffleVector( 2624 BlockInMaskPart, 2625 createReplicatedMask(InterleaveFactor, VF.getKnownMinValue()), 2626 "interleaved.mask"); 2627 GroupMask = MaskForGaps 2628 ? Builder.CreateBinOp(Instruction::And, ShuffledMask, 2629 MaskForGaps) 2630 : ShuffledMask; 2631 } 2632 NewLoad = 2633 Builder.CreateMaskedLoad(VecTy, AddrParts[Part], Group->getAlign(), 2634 GroupMask, PoisonVec, "wide.masked.vec"); 2635 } 2636 else 2637 NewLoad = Builder.CreateAlignedLoad(VecTy, AddrParts[Part], 2638 Group->getAlign(), "wide.vec"); 2639 Group->addMetadata(NewLoad); 2640 NewLoads.push_back(NewLoad); 2641 } 2642 2643 // For each member in the group, shuffle out the appropriate data from the 2644 // wide loads. 2645 unsigned J = 0; 2646 for (unsigned I = 0; I < InterleaveFactor; ++I) { 2647 Instruction *Member = Group->getMember(I); 2648 2649 // Skip the gaps in the group. 2650 if (!Member) 2651 continue; 2652 2653 auto StrideMask = 2654 createStrideMask(I, InterleaveFactor, VF.getKnownMinValue()); 2655 for (unsigned Part = 0; Part < UF; Part++) { 2656 Value *StridedVec = Builder.CreateShuffleVector( 2657 NewLoads[Part], StrideMask, "strided.vec"); 2658 2659 // If this member has different type, cast the result type. 2660 if (Member->getType() != ScalarTy) { 2661 assert(!VF.isScalable() && "VF is assumed to be non scalable."); 2662 VectorType *OtherVTy = VectorType::get(Member->getType(), VF); 2663 StridedVec = createBitOrPointerCast(StridedVec, OtherVTy, DL); 2664 } 2665 2666 if (Group->isReverse()) 2667 StridedVec = Builder.CreateVectorReverse(StridedVec, "reverse"); 2668 2669 State.set(VPDefs[J], StridedVec, Part); 2670 } 2671 ++J; 2672 } 2673 return; 2674 } 2675 2676 // The sub vector type for current instruction. 2677 auto *SubVT = VectorType::get(ScalarTy, VF); 2678 2679 // Vectorize the interleaved store group. 2680 MaskForGaps = createBitMaskForGaps(Builder, VF.getKnownMinValue(), *Group); 2681 assert((!MaskForGaps || useMaskedInterleavedAccesses(*TTI)) && 2682 "masked interleaved groups are not allowed."); 2683 assert((!MaskForGaps || !VF.isScalable()) && 2684 "masking gaps for scalable vectors is not yet supported."); 2685 for (unsigned Part = 0; Part < UF; Part++) { 2686 // Collect the stored vector from each member. 2687 SmallVector<Value *, 4> StoredVecs; 2688 for (unsigned i = 0; i < InterleaveFactor; i++) { 2689 assert((Group->getMember(i) || MaskForGaps) && 2690 "Fail to get a member from an interleaved store group"); 2691 Instruction *Member = Group->getMember(i); 2692 2693 // Skip the gaps in the group. 2694 if (!Member) { 2695 Value *Undef = PoisonValue::get(SubVT); 2696 StoredVecs.push_back(Undef); 2697 continue; 2698 } 2699 2700 Value *StoredVec = State.get(StoredValues[i], Part); 2701 2702 if (Group->isReverse()) 2703 StoredVec = Builder.CreateVectorReverse(StoredVec, "reverse"); 2704 2705 // If this member has different type, cast it to a unified type. 2706 2707 if (StoredVec->getType() != SubVT) 2708 StoredVec = createBitOrPointerCast(StoredVec, SubVT, DL); 2709 2710 StoredVecs.push_back(StoredVec); 2711 } 2712 2713 // Concatenate all vectors into a wide vector. 2714 Value *WideVec = concatenateVectors(Builder, StoredVecs); 2715 2716 // Interleave the elements in the wide vector. 2717 Value *IVec = Builder.CreateShuffleVector( 2718 WideVec, createInterleaveMask(VF.getKnownMinValue(), InterleaveFactor), 2719 "interleaved.vec"); 2720 2721 Instruction *NewStoreInstr; 2722 if (BlockInMask || MaskForGaps) { 2723 Value *GroupMask = MaskForGaps; 2724 if (BlockInMask) { 2725 Value *BlockInMaskPart = State.get(BlockInMask, Part); 2726 Value *ShuffledMask = Builder.CreateShuffleVector( 2727 BlockInMaskPart, 2728 createReplicatedMask(InterleaveFactor, VF.getKnownMinValue()), 2729 "interleaved.mask"); 2730 GroupMask = MaskForGaps ? Builder.CreateBinOp(Instruction::And, 2731 ShuffledMask, MaskForGaps) 2732 : ShuffledMask; 2733 } 2734 NewStoreInstr = Builder.CreateMaskedStore(IVec, AddrParts[Part], 2735 Group->getAlign(), GroupMask); 2736 } else 2737 NewStoreInstr = 2738 Builder.CreateAlignedStore(IVec, AddrParts[Part], Group->getAlign()); 2739 2740 Group->addMetadata(NewStoreInstr); 2741 } 2742 } 2743 2744 void InnerLoopVectorizer::scalarizeInstruction(Instruction *Instr, 2745 VPReplicateRecipe *RepRecipe, 2746 const VPIteration &Instance, 2747 bool IfPredicateInstr, 2748 VPTransformState &State) { 2749 assert(!Instr->getType()->isAggregateType() && "Can't handle vectors"); 2750 2751 // llvm.experimental.noalias.scope.decl intrinsics must only be duplicated for 2752 // the first lane and part. 2753 if (isa<NoAliasScopeDeclInst>(Instr)) 2754 if (!Instance.isFirstIteration()) 2755 return; 2756 2757 // Does this instruction return a value ? 2758 bool IsVoidRetTy = Instr->getType()->isVoidTy(); 2759 2760 Instruction *Cloned = Instr->clone(); 2761 if (!IsVoidRetTy) 2762 Cloned->setName(Instr->getName() + ".cloned"); 2763 2764 // If the scalarized instruction contributes to the address computation of a 2765 // widen masked load/store which was in a basic block that needed predication 2766 // and is not predicated after vectorization, we can't propagate 2767 // poison-generating flags (nuw/nsw, exact, inbounds, etc.). The scalarized 2768 // instruction could feed a poison value to the base address of the widen 2769 // load/store. 2770 if (State.MayGeneratePoisonRecipes.contains(RepRecipe)) 2771 Cloned->dropPoisonGeneratingFlags(); 2772 2773 if (Instr->getDebugLoc()) 2774 State.setDebugLocFromInst(Instr); 2775 2776 // Replace the operands of the cloned instructions with their scalar 2777 // equivalents in the new loop. 2778 for (auto &I : enumerate(RepRecipe->operands())) { 2779 auto InputInstance = Instance; 2780 VPValue *Operand = I.value(); 2781 VPReplicateRecipe *OperandR = dyn_cast<VPReplicateRecipe>(Operand); 2782 if (OperandR && OperandR->isUniform()) 2783 InputInstance.Lane = VPLane::getFirstLane(); 2784 Cloned->setOperand(I.index(), State.get(Operand, InputInstance)); 2785 } 2786 State.addNewMetadata(Cloned, Instr); 2787 2788 // Place the cloned scalar in the new loop. 2789 State.Builder.Insert(Cloned); 2790 2791 State.set(RepRecipe, Cloned, Instance); 2792 2793 // If we just cloned a new assumption, add it the assumption cache. 2794 if (auto *II = dyn_cast<AssumeInst>(Cloned)) 2795 AC->registerAssumption(II); 2796 2797 // End if-block. 2798 if (IfPredicateInstr) 2799 PredicatedInstructions.push_back(Cloned); 2800 } 2801 2802 Value *InnerLoopVectorizer::getOrCreateTripCount(BasicBlock *InsertBlock) { 2803 if (TripCount) 2804 return TripCount; 2805 2806 assert(InsertBlock); 2807 IRBuilder<> Builder(InsertBlock->getTerminator()); 2808 // Find the loop boundaries. 2809 ScalarEvolution *SE = PSE.getSE(); 2810 const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount(); 2811 assert(!isa<SCEVCouldNotCompute>(BackedgeTakenCount) && 2812 "Invalid loop count"); 2813 2814 Type *IdxTy = Legal->getWidestInductionType(); 2815 assert(IdxTy && "No type for induction"); 2816 2817 // The exit count might have the type of i64 while the phi is i32. This can 2818 // happen if we have an induction variable that is sign extended before the 2819 // compare. The only way that we get a backedge taken count is that the 2820 // induction variable was signed and as such will not overflow. In such a case 2821 // truncation is legal. 2822 if (SE->getTypeSizeInBits(BackedgeTakenCount->getType()) > 2823 IdxTy->getPrimitiveSizeInBits()) 2824 BackedgeTakenCount = SE->getTruncateOrNoop(BackedgeTakenCount, IdxTy); 2825 BackedgeTakenCount = SE->getNoopOrZeroExtend(BackedgeTakenCount, IdxTy); 2826 2827 // Get the total trip count from the count by adding 1. 2828 const SCEV *ExitCount = SE->getAddExpr( 2829 BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType())); 2830 2831 const DataLayout &DL = InsertBlock->getModule()->getDataLayout(); 2832 2833 // Expand the trip count and place the new instructions in the preheader. 2834 // Notice that the pre-header does not change, only the loop body. 2835 SCEVExpander Exp(*SE, DL, "induction"); 2836 2837 // Count holds the overall loop count (N). 2838 TripCount = Exp.expandCodeFor(ExitCount, ExitCount->getType(), 2839 InsertBlock->getTerminator()); 2840 2841 if (TripCount->getType()->isPointerTy()) 2842 TripCount = 2843 CastInst::CreatePointerCast(TripCount, IdxTy, "exitcount.ptrcnt.to.int", 2844 InsertBlock->getTerminator()); 2845 2846 return TripCount; 2847 } 2848 2849 Value * 2850 InnerLoopVectorizer::getOrCreateVectorTripCount(BasicBlock *InsertBlock) { 2851 if (VectorTripCount) 2852 return VectorTripCount; 2853 2854 Value *TC = getOrCreateTripCount(InsertBlock); 2855 IRBuilder<> Builder(InsertBlock->getTerminator()); 2856 2857 Type *Ty = TC->getType(); 2858 // This is where we can make the step a runtime constant. 2859 Value *Step = createStepForVF(Builder, Ty, VF, UF); 2860 2861 // If the tail is to be folded by masking, round the number of iterations N 2862 // up to a multiple of Step instead of rounding down. This is done by first 2863 // adding Step-1 and then rounding down. Note that it's ok if this addition 2864 // overflows: the vector induction variable will eventually wrap to zero given 2865 // that it starts at zero and its Step is a power of two; the loop will then 2866 // exit, with the last early-exit vector comparison also producing all-true. 2867 // For scalable vectors the VF is not guaranteed to be a power of 2, but this 2868 // is accounted for in emitIterationCountCheck that adds an overflow check. 2869 if (Cost->foldTailByMasking()) { 2870 assert(isPowerOf2_32(VF.getKnownMinValue() * UF) && 2871 "VF*UF must be a power of 2 when folding tail by masking"); 2872 Value *NumLanes = getRuntimeVF(Builder, Ty, VF * UF); 2873 TC = Builder.CreateAdd( 2874 TC, Builder.CreateSub(NumLanes, ConstantInt::get(Ty, 1)), "n.rnd.up"); 2875 } 2876 2877 // Now we need to generate the expression for the part of the loop that the 2878 // vectorized body will execute. This is equal to N - (N % Step) if scalar 2879 // iterations are not required for correctness, or N - Step, otherwise. Step 2880 // is equal to the vectorization factor (number of SIMD elements) times the 2881 // unroll factor (number of SIMD instructions). 2882 Value *R = Builder.CreateURem(TC, Step, "n.mod.vf"); 2883 2884 // There are cases where we *must* run at least one iteration in the remainder 2885 // loop. See the cost model for when this can happen. If the step evenly 2886 // divides the trip count, we set the remainder to be equal to the step. If 2887 // the step does not evenly divide the trip count, no adjustment is necessary 2888 // since there will already be scalar iterations. Note that the minimum 2889 // iterations check ensures that N >= Step. 2890 if (Cost->requiresScalarEpilogue(VF)) { 2891 auto *IsZero = Builder.CreateICmpEQ(R, ConstantInt::get(R->getType(), 0)); 2892 R = Builder.CreateSelect(IsZero, Step, R); 2893 } 2894 2895 VectorTripCount = Builder.CreateSub(TC, R, "n.vec"); 2896 2897 return VectorTripCount; 2898 } 2899 2900 Value *InnerLoopVectorizer::createBitOrPointerCast(Value *V, VectorType *DstVTy, 2901 const DataLayout &DL) { 2902 // Verify that V is a vector type with same number of elements as DstVTy. 2903 auto *DstFVTy = cast<FixedVectorType>(DstVTy); 2904 unsigned VF = DstFVTy->getNumElements(); 2905 auto *SrcVecTy = cast<FixedVectorType>(V->getType()); 2906 assert((VF == SrcVecTy->getNumElements()) && "Vector dimensions do not match"); 2907 Type *SrcElemTy = SrcVecTy->getElementType(); 2908 Type *DstElemTy = DstFVTy->getElementType(); 2909 assert((DL.getTypeSizeInBits(SrcElemTy) == DL.getTypeSizeInBits(DstElemTy)) && 2910 "Vector elements must have same size"); 2911 2912 // Do a direct cast if element types are castable. 2913 if (CastInst::isBitOrNoopPointerCastable(SrcElemTy, DstElemTy, DL)) { 2914 return Builder.CreateBitOrPointerCast(V, DstFVTy); 2915 } 2916 // V cannot be directly casted to desired vector type. 2917 // May happen when V is a floating point vector but DstVTy is a vector of 2918 // pointers or vice-versa. Handle this using a two-step bitcast using an 2919 // intermediate Integer type for the bitcast i.e. Ptr <-> Int <-> Float. 2920 assert((DstElemTy->isPointerTy() != SrcElemTy->isPointerTy()) && 2921 "Only one type should be a pointer type"); 2922 assert((DstElemTy->isFloatingPointTy() != SrcElemTy->isFloatingPointTy()) && 2923 "Only one type should be a floating point type"); 2924 Type *IntTy = 2925 IntegerType::getIntNTy(V->getContext(), DL.getTypeSizeInBits(SrcElemTy)); 2926 auto *VecIntTy = FixedVectorType::get(IntTy, VF); 2927 Value *CastVal = Builder.CreateBitOrPointerCast(V, VecIntTy); 2928 return Builder.CreateBitOrPointerCast(CastVal, DstFVTy); 2929 } 2930 2931 void InnerLoopVectorizer::emitIterationCountCheck(BasicBlock *Bypass) { 2932 Value *Count = getOrCreateTripCount(LoopVectorPreHeader); 2933 // Reuse existing vector loop preheader for TC checks. 2934 // Note that new preheader block is generated for vector loop. 2935 BasicBlock *const TCCheckBlock = LoopVectorPreHeader; 2936 IRBuilder<> Builder(TCCheckBlock->getTerminator()); 2937 2938 // Generate code to check if the loop's trip count is less than VF * UF, or 2939 // equal to it in case a scalar epilogue is required; this implies that the 2940 // vector trip count is zero. This check also covers the case where adding one 2941 // to the backedge-taken count overflowed leading to an incorrect trip count 2942 // of zero. In this case we will also jump to the scalar loop. 2943 auto P = Cost->requiresScalarEpilogue(VF) ? ICmpInst::ICMP_ULE 2944 : ICmpInst::ICMP_ULT; 2945 2946 // If tail is to be folded, vector loop takes care of all iterations. 2947 Type *CountTy = Count->getType(); 2948 Value *CheckMinIters = Builder.getFalse(); 2949 auto CreateStep = [&]() -> Value * { 2950 // Create step with max(MinProTripCount, UF * VF). 2951 if (UF * VF.getKnownMinValue() >= MinProfitableTripCount.getKnownMinValue()) 2952 return createStepForVF(Builder, CountTy, VF, UF); 2953 2954 Value *MinProfTC = 2955 createStepForVF(Builder, CountTy, MinProfitableTripCount, 1); 2956 if (!VF.isScalable()) 2957 return MinProfTC; 2958 return Builder.CreateBinaryIntrinsic( 2959 Intrinsic::umax, MinProfTC, createStepForVF(Builder, CountTy, VF, UF)); 2960 }; 2961 2962 if (!Cost->foldTailByMasking()) 2963 CheckMinIters = 2964 Builder.CreateICmp(P, Count, CreateStep(), "min.iters.check"); 2965 else if (VF.isScalable()) { 2966 // vscale is not necessarily a power-of-2, which means we cannot guarantee 2967 // an overflow to zero when updating induction variables and so an 2968 // additional overflow check is required before entering the vector loop. 2969 2970 // Get the maximum unsigned value for the type. 2971 Value *MaxUIntTripCount = 2972 ConstantInt::get(CountTy, cast<IntegerType>(CountTy)->getMask()); 2973 Value *LHS = Builder.CreateSub(MaxUIntTripCount, Count); 2974 2975 // Don't execute the vector loop if (UMax - n) < (VF * UF). 2976 CheckMinIters = Builder.CreateICmp(ICmpInst::ICMP_ULT, LHS, CreateStep()); 2977 } 2978 2979 // Create new preheader for vector loop. 2980 LoopVectorPreHeader = 2981 SplitBlock(TCCheckBlock, TCCheckBlock->getTerminator(), DT, LI, nullptr, 2982 "vector.ph"); 2983 2984 assert(DT->properlyDominates(DT->getNode(TCCheckBlock), 2985 DT->getNode(Bypass)->getIDom()) && 2986 "TC check is expected to dominate Bypass"); 2987 2988 // Update dominator for Bypass & LoopExit (if needed). 2989 DT->changeImmediateDominator(Bypass, TCCheckBlock); 2990 if (!Cost->requiresScalarEpilogue(VF)) 2991 // If there is an epilogue which must run, there's no edge from the 2992 // middle block to exit blocks and thus no need to update the immediate 2993 // dominator of the exit blocks. 2994 DT->changeImmediateDominator(LoopExitBlock, TCCheckBlock); 2995 2996 ReplaceInstWithInst( 2997 TCCheckBlock->getTerminator(), 2998 BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters)); 2999 LoopBypassBlocks.push_back(TCCheckBlock); 3000 } 3001 3002 BasicBlock *InnerLoopVectorizer::emitSCEVChecks(BasicBlock *Bypass) { 3003 BasicBlock *const SCEVCheckBlock = 3004 RTChecks.emitSCEVChecks(Bypass, LoopVectorPreHeader, LoopExitBlock); 3005 if (!SCEVCheckBlock) 3006 return nullptr; 3007 3008 assert(!(SCEVCheckBlock->getParent()->hasOptSize() || 3009 (OptForSizeBasedOnProfile && 3010 Cost->Hints->getForce() != LoopVectorizeHints::FK_Enabled)) && 3011 "Cannot SCEV check stride or overflow when optimizing for size"); 3012 3013 3014 // Update dominator only if this is first RT check. 3015 if (LoopBypassBlocks.empty()) { 3016 DT->changeImmediateDominator(Bypass, SCEVCheckBlock); 3017 if (!Cost->requiresScalarEpilogue(VF)) 3018 // If there is an epilogue which must run, there's no edge from the 3019 // middle block to exit blocks and thus no need to update the immediate 3020 // dominator of the exit blocks. 3021 DT->changeImmediateDominator(LoopExitBlock, SCEVCheckBlock); 3022 } 3023 3024 LoopBypassBlocks.push_back(SCEVCheckBlock); 3025 AddedSafetyChecks = true; 3026 return SCEVCheckBlock; 3027 } 3028 3029 BasicBlock *InnerLoopVectorizer::emitMemRuntimeChecks(BasicBlock *Bypass) { 3030 // VPlan-native path does not do any analysis for runtime checks currently. 3031 if (EnableVPlanNativePath) 3032 return nullptr; 3033 3034 BasicBlock *const MemCheckBlock = 3035 RTChecks.emitMemRuntimeChecks(Bypass, LoopVectorPreHeader); 3036 3037 // Check if we generated code that checks in runtime if arrays overlap. We put 3038 // the checks into a separate block to make the more common case of few 3039 // elements faster. 3040 if (!MemCheckBlock) 3041 return nullptr; 3042 3043 if (MemCheckBlock->getParent()->hasOptSize() || OptForSizeBasedOnProfile) { 3044 assert(Cost->Hints->getForce() == LoopVectorizeHints::FK_Enabled && 3045 "Cannot emit memory checks when optimizing for size, unless forced " 3046 "to vectorize."); 3047 ORE->emit([&]() { 3048 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationCodeSize", 3049 OrigLoop->getStartLoc(), 3050 OrigLoop->getHeader()) 3051 << "Code-size may be reduced by not forcing " 3052 "vectorization, or by source-code modifications " 3053 "eliminating the need for runtime checks " 3054 "(e.g., adding 'restrict')."; 3055 }); 3056 } 3057 3058 LoopBypassBlocks.push_back(MemCheckBlock); 3059 3060 AddedSafetyChecks = true; 3061 3062 return MemCheckBlock; 3063 } 3064 3065 void InnerLoopVectorizer::createVectorLoopSkeleton(StringRef Prefix) { 3066 LoopScalarBody = OrigLoop->getHeader(); 3067 LoopVectorPreHeader = OrigLoop->getLoopPreheader(); 3068 assert(LoopVectorPreHeader && "Invalid loop structure"); 3069 LoopExitBlock = OrigLoop->getUniqueExitBlock(); // may be nullptr 3070 assert((LoopExitBlock || Cost->requiresScalarEpilogue(VF)) && 3071 "multiple exit loop without required epilogue?"); 3072 3073 LoopMiddleBlock = 3074 SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT, 3075 LI, nullptr, Twine(Prefix) + "middle.block"); 3076 LoopScalarPreHeader = 3077 SplitBlock(LoopMiddleBlock, LoopMiddleBlock->getTerminator(), DT, LI, 3078 nullptr, Twine(Prefix) + "scalar.ph"); 3079 3080 auto *ScalarLatchTerm = OrigLoop->getLoopLatch()->getTerminator(); 3081 3082 // Set up the middle block terminator. Two cases: 3083 // 1) If we know that we must execute the scalar epilogue, emit an 3084 // unconditional branch. 3085 // 2) Otherwise, we must have a single unique exit block (due to how we 3086 // implement the multiple exit case). In this case, set up a conditonal 3087 // branch from the middle block to the loop scalar preheader, and the 3088 // exit block. completeLoopSkeleton will update the condition to use an 3089 // iteration check, if required to decide whether to execute the remainder. 3090 BranchInst *BrInst = Cost->requiresScalarEpilogue(VF) ? 3091 BranchInst::Create(LoopScalarPreHeader) : 3092 BranchInst::Create(LoopExitBlock, LoopScalarPreHeader, 3093 Builder.getTrue()); 3094 BrInst->setDebugLoc(ScalarLatchTerm->getDebugLoc()); 3095 ReplaceInstWithInst(LoopMiddleBlock->getTerminator(), BrInst); 3096 3097 // Update dominator for loop exit. During skeleton creation, only the vector 3098 // pre-header and the middle block are created. The vector loop is entirely 3099 // created during VPlan exection. 3100 if (!Cost->requiresScalarEpilogue(VF)) 3101 // If there is an epilogue which must run, there's no edge from the 3102 // middle block to exit blocks and thus no need to update the immediate 3103 // dominator of the exit blocks. 3104 DT->changeImmediateDominator(LoopExitBlock, LoopMiddleBlock); 3105 } 3106 3107 void InnerLoopVectorizer::createInductionResumeValues( 3108 std::pair<BasicBlock *, Value *> AdditionalBypass) { 3109 assert(((AdditionalBypass.first && AdditionalBypass.second) || 3110 (!AdditionalBypass.first && !AdditionalBypass.second)) && 3111 "Inconsistent information about additional bypass."); 3112 3113 Value *VectorTripCount = getOrCreateVectorTripCount(LoopVectorPreHeader); 3114 assert(VectorTripCount && "Expected valid arguments"); 3115 // We are going to resume the execution of the scalar loop. 3116 // Go over all of the induction variables that we found and fix the 3117 // PHIs that are left in the scalar version of the loop. 3118 // The starting values of PHI nodes depend on the counter of the last 3119 // iteration in the vectorized loop. 3120 // If we come from a bypass edge then we need to start from the original 3121 // start value. 3122 Instruction *OldInduction = Legal->getPrimaryInduction(); 3123 for (auto &InductionEntry : Legal->getInductionVars()) { 3124 PHINode *OrigPhi = InductionEntry.first; 3125 InductionDescriptor II = InductionEntry.second; 3126 3127 Value *&EndValue = IVEndValues[OrigPhi]; 3128 Value *EndValueFromAdditionalBypass = AdditionalBypass.second; 3129 if (OrigPhi == OldInduction) { 3130 // We know what the end value is. 3131 EndValue = VectorTripCount; 3132 } else { 3133 IRBuilder<> B(LoopVectorPreHeader->getTerminator()); 3134 3135 // Fast-math-flags propagate from the original induction instruction. 3136 if (II.getInductionBinOp() && isa<FPMathOperator>(II.getInductionBinOp())) 3137 B.setFastMathFlags(II.getInductionBinOp()->getFastMathFlags()); 3138 3139 Type *StepType = II.getStep()->getType(); 3140 Instruction::CastOps CastOp = 3141 CastInst::getCastOpcode(VectorTripCount, true, StepType, true); 3142 Value *VTC = B.CreateCast(CastOp, VectorTripCount, StepType, "cast.vtc"); 3143 Value *Step = 3144 CreateStepValue(II.getStep(), *PSE.getSE(), &*B.GetInsertPoint()); 3145 EndValue = emitTransformedIndex(B, VTC, II.getStartValue(), Step, II); 3146 EndValue->setName("ind.end"); 3147 3148 // Compute the end value for the additional bypass (if applicable). 3149 if (AdditionalBypass.first) { 3150 B.SetInsertPoint(&(*AdditionalBypass.first->getFirstInsertionPt())); 3151 CastOp = CastInst::getCastOpcode(AdditionalBypass.second, true, 3152 StepType, true); 3153 Value *Step = 3154 CreateStepValue(II.getStep(), *PSE.getSE(), &*B.GetInsertPoint()); 3155 VTC = 3156 B.CreateCast(CastOp, AdditionalBypass.second, StepType, "cast.vtc"); 3157 EndValueFromAdditionalBypass = 3158 emitTransformedIndex(B, VTC, II.getStartValue(), Step, II); 3159 EndValueFromAdditionalBypass->setName("ind.end"); 3160 } 3161 } 3162 3163 // Create phi nodes to merge from the backedge-taken check block. 3164 PHINode *BCResumeVal = 3165 PHINode::Create(OrigPhi->getType(), 3, "bc.resume.val", 3166 LoopScalarPreHeader->getTerminator()); 3167 // Copy original phi DL over to the new one. 3168 BCResumeVal->setDebugLoc(OrigPhi->getDebugLoc()); 3169 3170 // The new PHI merges the original incoming value, in case of a bypass, 3171 // or the value at the end of the vectorized loop. 3172 BCResumeVal->addIncoming(EndValue, LoopMiddleBlock); 3173 3174 // Fix the scalar body counter (PHI node). 3175 // The old induction's phi node in the scalar body needs the truncated 3176 // value. 3177 for (BasicBlock *BB : LoopBypassBlocks) 3178 BCResumeVal->addIncoming(II.getStartValue(), BB); 3179 3180 if (AdditionalBypass.first) 3181 BCResumeVal->setIncomingValueForBlock(AdditionalBypass.first, 3182 EndValueFromAdditionalBypass); 3183 3184 OrigPhi->setIncomingValueForBlock(LoopScalarPreHeader, BCResumeVal); 3185 } 3186 } 3187 3188 BasicBlock *InnerLoopVectorizer::completeLoopSkeleton(MDNode *OrigLoopID) { 3189 // The trip counts should be cached by now. 3190 Value *Count = getOrCreateTripCount(LoopVectorPreHeader); 3191 Value *VectorTripCount = getOrCreateVectorTripCount(LoopVectorPreHeader); 3192 3193 auto *ScalarLatchTerm = OrigLoop->getLoopLatch()->getTerminator(); 3194 3195 // Add a check in the middle block to see if we have completed 3196 // all of the iterations in the first vector loop. Three cases: 3197 // 1) If we require a scalar epilogue, there is no conditional branch as 3198 // we unconditionally branch to the scalar preheader. Do nothing. 3199 // 2) If (N - N%VF) == N, then we *don't* need to run the remainder. 3200 // Thus if tail is to be folded, we know we don't need to run the 3201 // remainder and we can use the previous value for the condition (true). 3202 // 3) Otherwise, construct a runtime check. 3203 if (!Cost->requiresScalarEpilogue(VF) && !Cost->foldTailByMasking()) { 3204 Instruction *CmpN = CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_EQ, 3205 Count, VectorTripCount, "cmp.n", 3206 LoopMiddleBlock->getTerminator()); 3207 3208 // Here we use the same DebugLoc as the scalar loop latch terminator instead 3209 // of the corresponding compare because they may have ended up with 3210 // different line numbers and we want to avoid awkward line stepping while 3211 // debugging. Eg. if the compare has got a line number inside the loop. 3212 CmpN->setDebugLoc(ScalarLatchTerm->getDebugLoc()); 3213 cast<BranchInst>(LoopMiddleBlock->getTerminator())->setCondition(CmpN); 3214 } 3215 3216 #ifdef EXPENSIVE_CHECKS 3217 assert(DT->verify(DominatorTree::VerificationLevel::Fast)); 3218 #endif 3219 3220 return LoopVectorPreHeader; 3221 } 3222 3223 std::pair<BasicBlock *, Value *> 3224 InnerLoopVectorizer::createVectorizedLoopSkeleton() { 3225 /* 3226 In this function we generate a new loop. The new loop will contain 3227 the vectorized instructions while the old loop will continue to run the 3228 scalar remainder. 3229 3230 [ ] <-- loop iteration number check. 3231 / | 3232 / v 3233 | [ ] <-- vector loop bypass (may consist of multiple blocks). 3234 | / | 3235 | / v 3236 || [ ] <-- vector pre header. 3237 |/ | 3238 | v 3239 | [ ] \ 3240 | [ ]_| <-- vector loop (created during VPlan execution). 3241 | | 3242 | v 3243 \ -[ ] <--- middle-block. 3244 \/ | 3245 /\ v 3246 | ->[ ] <--- new preheader. 3247 | | 3248 (opt) v <-- edge from middle to exit iff epilogue is not required. 3249 | [ ] \ 3250 | [ ]_| <-- old scalar loop to handle remainder (scalar epilogue). 3251 \ | 3252 \ v 3253 >[ ] <-- exit block(s). 3254 ... 3255 */ 3256 3257 // Get the metadata of the original loop before it gets modified. 3258 MDNode *OrigLoopID = OrigLoop->getLoopID(); 3259 3260 // Workaround! Compute the trip count of the original loop and cache it 3261 // before we start modifying the CFG. This code has a systemic problem 3262 // wherein it tries to run analysis over partially constructed IR; this is 3263 // wrong, and not simply for SCEV. The trip count of the original loop 3264 // simply happens to be prone to hitting this in practice. In theory, we 3265 // can hit the same issue for any SCEV, or ValueTracking query done during 3266 // mutation. See PR49900. 3267 getOrCreateTripCount(OrigLoop->getLoopPreheader()); 3268 3269 // Create an empty vector loop, and prepare basic blocks for the runtime 3270 // checks. 3271 createVectorLoopSkeleton(""); 3272 3273 // Now, compare the new count to zero. If it is zero skip the vector loop and 3274 // jump to the scalar loop. This check also covers the case where the 3275 // backedge-taken count is uint##_max: adding one to it will overflow leading 3276 // to an incorrect trip count of zero. In this (rare) case we will also jump 3277 // to the scalar loop. 3278 emitIterationCountCheck(LoopScalarPreHeader); 3279 3280 // Generate the code to check any assumptions that we've made for SCEV 3281 // expressions. 3282 emitSCEVChecks(LoopScalarPreHeader); 3283 3284 // Generate the code that checks in runtime if arrays overlap. We put the 3285 // checks into a separate block to make the more common case of few elements 3286 // faster. 3287 emitMemRuntimeChecks(LoopScalarPreHeader); 3288 3289 // Emit phis for the new starting index of the scalar loop. 3290 createInductionResumeValues(); 3291 3292 return {completeLoopSkeleton(OrigLoopID), nullptr}; 3293 } 3294 3295 // Fix up external users of the induction variable. At this point, we are 3296 // in LCSSA form, with all external PHIs that use the IV having one input value, 3297 // coming from the remainder loop. We need those PHIs to also have a correct 3298 // value for the IV when arriving directly from the middle block. 3299 void InnerLoopVectorizer::fixupIVUsers(PHINode *OrigPhi, 3300 const InductionDescriptor &II, 3301 Value *VectorTripCount, Value *EndValue, 3302 BasicBlock *MiddleBlock, 3303 BasicBlock *VectorHeader, VPlan &Plan) { 3304 // There are two kinds of external IV usages - those that use the value 3305 // computed in the last iteration (the PHI) and those that use the penultimate 3306 // value (the value that feeds into the phi from the loop latch). 3307 // We allow both, but they, obviously, have different values. 3308 3309 assert(OrigLoop->getUniqueExitBlock() && "Expected a single exit block"); 3310 3311 DenseMap<Value *, Value *> MissingVals; 3312 3313 // An external user of the last iteration's value should see the value that 3314 // the remainder loop uses to initialize its own IV. 3315 Value *PostInc = OrigPhi->getIncomingValueForBlock(OrigLoop->getLoopLatch()); 3316 for (User *U : PostInc->users()) { 3317 Instruction *UI = cast<Instruction>(U); 3318 if (!OrigLoop->contains(UI)) { 3319 assert(isa<PHINode>(UI) && "Expected LCSSA form"); 3320 MissingVals[UI] = EndValue; 3321 } 3322 } 3323 3324 // An external user of the penultimate value need to see EndValue - Step. 3325 // The simplest way to get this is to recompute it from the constituent SCEVs, 3326 // that is Start + (Step * (CRD - 1)). 3327 for (User *U : OrigPhi->users()) { 3328 auto *UI = cast<Instruction>(U); 3329 if (!OrigLoop->contains(UI)) { 3330 assert(isa<PHINode>(UI) && "Expected LCSSA form"); 3331 3332 IRBuilder<> B(MiddleBlock->getTerminator()); 3333 3334 // Fast-math-flags propagate from the original induction instruction. 3335 if (II.getInductionBinOp() && isa<FPMathOperator>(II.getInductionBinOp())) 3336 B.setFastMathFlags(II.getInductionBinOp()->getFastMathFlags()); 3337 3338 Value *CountMinusOne = B.CreateSub( 3339 VectorTripCount, ConstantInt::get(VectorTripCount->getType(), 1)); 3340 Value *CMO = 3341 !II.getStep()->getType()->isIntegerTy() 3342 ? B.CreateCast(Instruction::SIToFP, CountMinusOne, 3343 II.getStep()->getType()) 3344 : B.CreateSExtOrTrunc(CountMinusOne, II.getStep()->getType()); 3345 CMO->setName("cast.cmo"); 3346 3347 Value *Step = CreateStepValue(II.getStep(), *PSE.getSE(), 3348 VectorHeader->getTerminator()); 3349 Value *Escape = 3350 emitTransformedIndex(B, CMO, II.getStartValue(), Step, II); 3351 Escape->setName("ind.escape"); 3352 MissingVals[UI] = Escape; 3353 } 3354 } 3355 3356 for (auto &I : MissingVals) { 3357 PHINode *PHI = cast<PHINode>(I.first); 3358 // One corner case we have to handle is two IVs "chasing" each-other, 3359 // that is %IV2 = phi [...], [ %IV1, %latch ] 3360 // In this case, if IV1 has an external use, we need to avoid adding both 3361 // "last value of IV1" and "penultimate value of IV2". So, verify that we 3362 // don't already have an incoming value for the middle block. 3363 if (PHI->getBasicBlockIndex(MiddleBlock) == -1) { 3364 PHI->addIncoming(I.second, MiddleBlock); 3365 Plan.removeLiveOut(PHI); 3366 } 3367 } 3368 } 3369 3370 namespace { 3371 3372 struct CSEDenseMapInfo { 3373 static bool canHandle(const Instruction *I) { 3374 return isa<InsertElementInst>(I) || isa<ExtractElementInst>(I) || 3375 isa<ShuffleVectorInst>(I) || isa<GetElementPtrInst>(I); 3376 } 3377 3378 static inline Instruction *getEmptyKey() { 3379 return DenseMapInfo<Instruction *>::getEmptyKey(); 3380 } 3381 3382 static inline Instruction *getTombstoneKey() { 3383 return DenseMapInfo<Instruction *>::getTombstoneKey(); 3384 } 3385 3386 static unsigned getHashValue(const Instruction *I) { 3387 assert(canHandle(I) && "Unknown instruction!"); 3388 return hash_combine(I->getOpcode(), hash_combine_range(I->value_op_begin(), 3389 I->value_op_end())); 3390 } 3391 3392 static bool isEqual(const Instruction *LHS, const Instruction *RHS) { 3393 if (LHS == getEmptyKey() || RHS == getEmptyKey() || 3394 LHS == getTombstoneKey() || RHS == getTombstoneKey()) 3395 return LHS == RHS; 3396 return LHS->isIdenticalTo(RHS); 3397 } 3398 }; 3399 3400 } // end anonymous namespace 3401 3402 ///Perform cse of induction variable instructions. 3403 static void cse(BasicBlock *BB) { 3404 // Perform simple cse. 3405 SmallDenseMap<Instruction *, Instruction *, 4, CSEDenseMapInfo> CSEMap; 3406 for (Instruction &In : llvm::make_early_inc_range(*BB)) { 3407 if (!CSEDenseMapInfo::canHandle(&In)) 3408 continue; 3409 3410 // Check if we can replace this instruction with any of the 3411 // visited instructions. 3412 if (Instruction *V = CSEMap.lookup(&In)) { 3413 In.replaceAllUsesWith(V); 3414 In.eraseFromParent(); 3415 continue; 3416 } 3417 3418 CSEMap[&In] = &In; 3419 } 3420 } 3421 3422 InstructionCost 3423 LoopVectorizationCostModel::getVectorCallCost(CallInst *CI, ElementCount VF, 3424 bool &NeedToScalarize) const { 3425 Function *F = CI->getCalledFunction(); 3426 Type *ScalarRetTy = CI->getType(); 3427 SmallVector<Type *, 4> Tys, ScalarTys; 3428 for (auto &ArgOp : CI->args()) 3429 ScalarTys.push_back(ArgOp->getType()); 3430 3431 // Estimate cost of scalarized vector call. The source operands are assumed 3432 // to be vectors, so we need to extract individual elements from there, 3433 // execute VF scalar calls, and then gather the result into the vector return 3434 // value. 3435 InstructionCost ScalarCallCost = 3436 TTI.getCallInstrCost(F, ScalarRetTy, ScalarTys, TTI::TCK_RecipThroughput); 3437 if (VF.isScalar()) 3438 return ScalarCallCost; 3439 3440 // Compute corresponding vector type for return value and arguments. 3441 Type *RetTy = ToVectorTy(ScalarRetTy, VF); 3442 for (Type *ScalarTy : ScalarTys) 3443 Tys.push_back(ToVectorTy(ScalarTy, VF)); 3444 3445 // Compute costs of unpacking argument values for the scalar calls and 3446 // packing the return values to a vector. 3447 InstructionCost ScalarizationCost = getScalarizationOverhead(CI, VF); 3448 3449 InstructionCost Cost = 3450 ScalarCallCost * VF.getKnownMinValue() + ScalarizationCost; 3451 3452 // If we can't emit a vector call for this function, then the currently found 3453 // cost is the cost we need to return. 3454 NeedToScalarize = true; 3455 VFShape Shape = VFShape::get(*CI, VF, false /*HasGlobalPred*/); 3456 Function *VecFunc = VFDatabase(*CI).getVectorizedFunction(Shape); 3457 3458 if (!TLI || CI->isNoBuiltin() || !VecFunc) 3459 return Cost; 3460 3461 // If the corresponding vector cost is cheaper, return its cost. 3462 InstructionCost VectorCallCost = 3463 TTI.getCallInstrCost(nullptr, RetTy, Tys, TTI::TCK_RecipThroughput); 3464 if (VectorCallCost < Cost) { 3465 NeedToScalarize = false; 3466 Cost = VectorCallCost; 3467 } 3468 return Cost; 3469 } 3470 3471 static Type *MaybeVectorizeType(Type *Elt, ElementCount VF) { 3472 if (VF.isScalar() || (!Elt->isIntOrPtrTy() && !Elt->isFloatingPointTy())) 3473 return Elt; 3474 return VectorType::get(Elt, VF); 3475 } 3476 3477 InstructionCost 3478 LoopVectorizationCostModel::getVectorIntrinsicCost(CallInst *CI, 3479 ElementCount VF) const { 3480 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 3481 assert(ID && "Expected intrinsic call!"); 3482 Type *RetTy = MaybeVectorizeType(CI->getType(), VF); 3483 FastMathFlags FMF; 3484 if (auto *FPMO = dyn_cast<FPMathOperator>(CI)) 3485 FMF = FPMO->getFastMathFlags(); 3486 3487 SmallVector<const Value *> Arguments(CI->args()); 3488 FunctionType *FTy = CI->getCalledFunction()->getFunctionType(); 3489 SmallVector<Type *> ParamTys; 3490 std::transform(FTy->param_begin(), FTy->param_end(), 3491 std::back_inserter(ParamTys), 3492 [&](Type *Ty) { return MaybeVectorizeType(Ty, VF); }); 3493 3494 IntrinsicCostAttributes CostAttrs(ID, RetTy, Arguments, ParamTys, FMF, 3495 dyn_cast<IntrinsicInst>(CI)); 3496 return TTI.getIntrinsicInstrCost(CostAttrs, 3497 TargetTransformInfo::TCK_RecipThroughput); 3498 } 3499 3500 static Type *smallestIntegerVectorType(Type *T1, Type *T2) { 3501 auto *I1 = cast<IntegerType>(cast<VectorType>(T1)->getElementType()); 3502 auto *I2 = cast<IntegerType>(cast<VectorType>(T2)->getElementType()); 3503 return I1->getBitWidth() < I2->getBitWidth() ? T1 : T2; 3504 } 3505 3506 static Type *largestIntegerVectorType(Type *T1, Type *T2) { 3507 auto *I1 = cast<IntegerType>(cast<VectorType>(T1)->getElementType()); 3508 auto *I2 = cast<IntegerType>(cast<VectorType>(T2)->getElementType()); 3509 return I1->getBitWidth() > I2->getBitWidth() ? T1 : T2; 3510 } 3511 3512 void InnerLoopVectorizer::truncateToMinimalBitwidths(VPTransformState &State) { 3513 // For every instruction `I` in MinBWs, truncate the operands, create a 3514 // truncated version of `I` and reextend its result. InstCombine runs 3515 // later and will remove any ext/trunc pairs. 3516 SmallPtrSet<Value *, 4> Erased; 3517 for (const auto &KV : Cost->getMinimalBitwidths()) { 3518 // If the value wasn't vectorized, we must maintain the original scalar 3519 // type. The absence of the value from State indicates that it 3520 // wasn't vectorized. 3521 // FIXME: Should not rely on getVPValue at this point. 3522 VPValue *Def = State.Plan->getVPValue(KV.first, true); 3523 if (!State.hasAnyVectorValue(Def)) 3524 continue; 3525 for (unsigned Part = 0; Part < UF; ++Part) { 3526 Value *I = State.get(Def, Part); 3527 if (Erased.count(I) || I->use_empty() || !isa<Instruction>(I)) 3528 continue; 3529 Type *OriginalTy = I->getType(); 3530 Type *ScalarTruncatedTy = 3531 IntegerType::get(OriginalTy->getContext(), KV.second); 3532 auto *TruncatedTy = VectorType::get( 3533 ScalarTruncatedTy, cast<VectorType>(OriginalTy)->getElementCount()); 3534 if (TruncatedTy == OriginalTy) 3535 continue; 3536 3537 IRBuilder<> B(cast<Instruction>(I)); 3538 auto ShrinkOperand = [&](Value *V) -> Value * { 3539 if (auto *ZI = dyn_cast<ZExtInst>(V)) 3540 if (ZI->getSrcTy() == TruncatedTy) 3541 return ZI->getOperand(0); 3542 return B.CreateZExtOrTrunc(V, TruncatedTy); 3543 }; 3544 3545 // The actual instruction modification depends on the instruction type, 3546 // unfortunately. 3547 Value *NewI = nullptr; 3548 if (auto *BO = dyn_cast<BinaryOperator>(I)) { 3549 NewI = B.CreateBinOp(BO->getOpcode(), ShrinkOperand(BO->getOperand(0)), 3550 ShrinkOperand(BO->getOperand(1))); 3551 3552 // Any wrapping introduced by shrinking this operation shouldn't be 3553 // considered undefined behavior. So, we can't unconditionally copy 3554 // arithmetic wrapping flags to NewI. 3555 cast<BinaryOperator>(NewI)->copyIRFlags(I, /*IncludeWrapFlags=*/false); 3556 } else if (auto *CI = dyn_cast<ICmpInst>(I)) { 3557 NewI = 3558 B.CreateICmp(CI->getPredicate(), ShrinkOperand(CI->getOperand(0)), 3559 ShrinkOperand(CI->getOperand(1))); 3560 } else if (auto *SI = dyn_cast<SelectInst>(I)) { 3561 NewI = B.CreateSelect(SI->getCondition(), 3562 ShrinkOperand(SI->getTrueValue()), 3563 ShrinkOperand(SI->getFalseValue())); 3564 } else if (auto *CI = dyn_cast<CastInst>(I)) { 3565 switch (CI->getOpcode()) { 3566 default: 3567 llvm_unreachable("Unhandled cast!"); 3568 case Instruction::Trunc: 3569 NewI = ShrinkOperand(CI->getOperand(0)); 3570 break; 3571 case Instruction::SExt: 3572 NewI = B.CreateSExtOrTrunc( 3573 CI->getOperand(0), 3574 smallestIntegerVectorType(OriginalTy, TruncatedTy)); 3575 break; 3576 case Instruction::ZExt: 3577 NewI = B.CreateZExtOrTrunc( 3578 CI->getOperand(0), 3579 smallestIntegerVectorType(OriginalTy, TruncatedTy)); 3580 break; 3581 } 3582 } else if (auto *SI = dyn_cast<ShuffleVectorInst>(I)) { 3583 auto Elements0 = 3584 cast<VectorType>(SI->getOperand(0)->getType())->getElementCount(); 3585 auto *O0 = B.CreateZExtOrTrunc( 3586 SI->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements0)); 3587 auto Elements1 = 3588 cast<VectorType>(SI->getOperand(1)->getType())->getElementCount(); 3589 auto *O1 = B.CreateZExtOrTrunc( 3590 SI->getOperand(1), VectorType::get(ScalarTruncatedTy, Elements1)); 3591 3592 NewI = B.CreateShuffleVector(O0, O1, SI->getShuffleMask()); 3593 } else if (isa<LoadInst>(I) || isa<PHINode>(I)) { 3594 // Don't do anything with the operands, just extend the result. 3595 continue; 3596 } else if (auto *IE = dyn_cast<InsertElementInst>(I)) { 3597 auto Elements = 3598 cast<VectorType>(IE->getOperand(0)->getType())->getElementCount(); 3599 auto *O0 = B.CreateZExtOrTrunc( 3600 IE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements)); 3601 auto *O1 = B.CreateZExtOrTrunc(IE->getOperand(1), ScalarTruncatedTy); 3602 NewI = B.CreateInsertElement(O0, O1, IE->getOperand(2)); 3603 } else if (auto *EE = dyn_cast<ExtractElementInst>(I)) { 3604 auto Elements = 3605 cast<VectorType>(EE->getOperand(0)->getType())->getElementCount(); 3606 auto *O0 = B.CreateZExtOrTrunc( 3607 EE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements)); 3608 NewI = B.CreateExtractElement(O0, EE->getOperand(2)); 3609 } else { 3610 // If we don't know what to do, be conservative and don't do anything. 3611 continue; 3612 } 3613 3614 // Lastly, extend the result. 3615 NewI->takeName(cast<Instruction>(I)); 3616 Value *Res = B.CreateZExtOrTrunc(NewI, OriginalTy); 3617 I->replaceAllUsesWith(Res); 3618 cast<Instruction>(I)->eraseFromParent(); 3619 Erased.insert(I); 3620 State.reset(Def, Res, Part); 3621 } 3622 } 3623 3624 // We'll have created a bunch of ZExts that are now parentless. Clean up. 3625 for (const auto &KV : Cost->getMinimalBitwidths()) { 3626 // If the value wasn't vectorized, we must maintain the original scalar 3627 // type. The absence of the value from State indicates that it 3628 // wasn't vectorized. 3629 // FIXME: Should not rely on getVPValue at this point. 3630 VPValue *Def = State.Plan->getVPValue(KV.first, true); 3631 if (!State.hasAnyVectorValue(Def)) 3632 continue; 3633 for (unsigned Part = 0; Part < UF; ++Part) { 3634 Value *I = State.get(Def, Part); 3635 ZExtInst *Inst = dyn_cast<ZExtInst>(I); 3636 if (Inst && Inst->use_empty()) { 3637 Value *NewI = Inst->getOperand(0); 3638 Inst->eraseFromParent(); 3639 State.reset(Def, NewI, Part); 3640 } 3641 } 3642 } 3643 } 3644 3645 void InnerLoopVectorizer::fixVectorizedLoop(VPTransformState &State, 3646 VPlan &Plan) { 3647 // Insert truncates and extends for any truncated instructions as hints to 3648 // InstCombine. 3649 if (VF.isVector()) 3650 truncateToMinimalBitwidths(State); 3651 3652 // Fix widened non-induction PHIs by setting up the PHI operands. 3653 if (EnableVPlanNativePath) 3654 fixNonInductionPHIs(Plan, State); 3655 3656 // At this point every instruction in the original loop is widened to a 3657 // vector form. Now we need to fix the recurrences in the loop. These PHI 3658 // nodes are currently empty because we did not want to introduce cycles. 3659 // This is the second stage of vectorizing recurrences. 3660 fixCrossIterationPHIs(State); 3661 3662 // Forget the original basic block. 3663 PSE.getSE()->forgetLoop(OrigLoop); 3664 3665 VPBasicBlock *LatchVPBB = Plan.getVectorLoopRegion()->getExitingBasicBlock(); 3666 Loop *VectorLoop = LI->getLoopFor(State.CFG.VPBB2IRBB[LatchVPBB]); 3667 if (Cost->requiresScalarEpilogue(VF)) { 3668 // No edge from the middle block to the unique exit block has been inserted 3669 // and there is nothing to fix from vector loop; phis should have incoming 3670 // from scalar loop only. 3671 Plan.clearLiveOuts(); 3672 } else { 3673 // If we inserted an edge from the middle block to the unique exit block, 3674 // update uses outside the loop (phis) to account for the newly inserted 3675 // edge. 3676 3677 // Fix-up external users of the induction variables. 3678 for (auto &Entry : Legal->getInductionVars()) 3679 fixupIVUsers(Entry.first, Entry.second, 3680 getOrCreateVectorTripCount(VectorLoop->getLoopPreheader()), 3681 IVEndValues[Entry.first], LoopMiddleBlock, 3682 VectorLoop->getHeader(), Plan); 3683 } 3684 3685 // Fix LCSSA phis not already fixed earlier. Extracts may need to be generated 3686 // in the exit block, so update the builder. 3687 State.Builder.SetInsertPoint(State.CFG.ExitBB->getFirstNonPHI()); 3688 for (auto &KV : Plan.getLiveOuts()) 3689 KV.second->fixPhi(Plan, State); 3690 3691 for (Instruction *PI : PredicatedInstructions) 3692 sinkScalarOperands(&*PI); 3693 3694 // Remove redundant induction instructions. 3695 cse(VectorLoop->getHeader()); 3696 3697 // Set/update profile weights for the vector and remainder loops as original 3698 // loop iterations are now distributed among them. Note that original loop 3699 // represented by LoopScalarBody becomes remainder loop after vectorization. 3700 // 3701 // For cases like foldTailByMasking() and requiresScalarEpiloque() we may 3702 // end up getting slightly roughened result but that should be OK since 3703 // profile is not inherently precise anyway. Note also possible bypass of 3704 // vector code caused by legality checks is ignored, assigning all the weight 3705 // to the vector loop, optimistically. 3706 // 3707 // For scalable vectorization we can't know at compile time how many iterations 3708 // of the loop are handled in one vector iteration, so instead assume a pessimistic 3709 // vscale of '1'. 3710 setProfileInfoAfterUnrolling(LI->getLoopFor(LoopScalarBody), VectorLoop, 3711 LI->getLoopFor(LoopScalarBody), 3712 VF.getKnownMinValue() * UF); 3713 } 3714 3715 void InnerLoopVectorizer::fixCrossIterationPHIs(VPTransformState &State) { 3716 // In order to support recurrences we need to be able to vectorize Phi nodes. 3717 // Phi nodes have cycles, so we need to vectorize them in two stages. This is 3718 // stage #2: We now need to fix the recurrences by adding incoming edges to 3719 // the currently empty PHI nodes. At this point every instruction in the 3720 // original loop is widened to a vector form so we can use them to construct 3721 // the incoming edges. 3722 VPBasicBlock *Header = 3723 State.Plan->getVectorLoopRegion()->getEntryBasicBlock(); 3724 for (VPRecipeBase &R : Header->phis()) { 3725 if (auto *ReductionPhi = dyn_cast<VPReductionPHIRecipe>(&R)) 3726 fixReduction(ReductionPhi, State); 3727 else if (auto *FOR = dyn_cast<VPFirstOrderRecurrencePHIRecipe>(&R)) 3728 fixFirstOrderRecurrence(FOR, State); 3729 } 3730 } 3731 3732 void InnerLoopVectorizer::fixFirstOrderRecurrence( 3733 VPFirstOrderRecurrencePHIRecipe *PhiR, VPTransformState &State) { 3734 // This is the second phase of vectorizing first-order recurrences. An 3735 // overview of the transformation is described below. Suppose we have the 3736 // following loop. 3737 // 3738 // for (int i = 0; i < n; ++i) 3739 // b[i] = a[i] - a[i - 1]; 3740 // 3741 // There is a first-order recurrence on "a". For this loop, the shorthand 3742 // scalar IR looks like: 3743 // 3744 // scalar.ph: 3745 // s_init = a[-1] 3746 // br scalar.body 3747 // 3748 // scalar.body: 3749 // i = phi [0, scalar.ph], [i+1, scalar.body] 3750 // s1 = phi [s_init, scalar.ph], [s2, scalar.body] 3751 // s2 = a[i] 3752 // b[i] = s2 - s1 3753 // br cond, scalar.body, ... 3754 // 3755 // In this example, s1 is a recurrence because it's value depends on the 3756 // previous iteration. In the first phase of vectorization, we created a 3757 // vector phi v1 for s1. We now complete the vectorization and produce the 3758 // shorthand vector IR shown below (for VF = 4, UF = 1). 3759 // 3760 // vector.ph: 3761 // v_init = vector(..., ..., ..., a[-1]) 3762 // br vector.body 3763 // 3764 // vector.body 3765 // i = phi [0, vector.ph], [i+4, vector.body] 3766 // v1 = phi [v_init, vector.ph], [v2, vector.body] 3767 // v2 = a[i, i+1, i+2, i+3]; 3768 // v3 = vector(v1(3), v2(0, 1, 2)) 3769 // b[i, i+1, i+2, i+3] = v2 - v3 3770 // br cond, vector.body, middle.block 3771 // 3772 // middle.block: 3773 // x = v2(3) 3774 // br scalar.ph 3775 // 3776 // scalar.ph: 3777 // s_init = phi [x, middle.block], [a[-1], otherwise] 3778 // br scalar.body 3779 // 3780 // After execution completes the vector loop, we extract the next value of 3781 // the recurrence (x) to use as the initial value in the scalar loop. 3782 3783 // Extract the last vector element in the middle block. This will be the 3784 // initial value for the recurrence when jumping to the scalar loop. 3785 VPValue *PreviousDef = PhiR->getBackedgeValue(); 3786 Value *Incoming = State.get(PreviousDef, UF - 1); 3787 auto *ExtractForScalar = Incoming; 3788 auto *IdxTy = Builder.getInt32Ty(); 3789 if (VF.isVector()) { 3790 auto *One = ConstantInt::get(IdxTy, 1); 3791 Builder.SetInsertPoint(LoopMiddleBlock->getTerminator()); 3792 auto *RuntimeVF = getRuntimeVF(Builder, IdxTy, VF); 3793 auto *LastIdx = Builder.CreateSub(RuntimeVF, One); 3794 ExtractForScalar = Builder.CreateExtractElement(ExtractForScalar, LastIdx, 3795 "vector.recur.extract"); 3796 } 3797 // Extract the second last element in the middle block if the 3798 // Phi is used outside the loop. We need to extract the phi itself 3799 // and not the last element (the phi update in the current iteration). This 3800 // will be the value when jumping to the exit block from the LoopMiddleBlock, 3801 // when the scalar loop is not run at all. 3802 Value *ExtractForPhiUsedOutsideLoop = nullptr; 3803 if (VF.isVector()) { 3804 auto *RuntimeVF = getRuntimeVF(Builder, IdxTy, VF); 3805 auto *Idx = Builder.CreateSub(RuntimeVF, ConstantInt::get(IdxTy, 2)); 3806 ExtractForPhiUsedOutsideLoop = Builder.CreateExtractElement( 3807 Incoming, Idx, "vector.recur.extract.for.phi"); 3808 } else if (UF > 1) 3809 // When loop is unrolled without vectorizing, initialize 3810 // ExtractForPhiUsedOutsideLoop with the value just prior to unrolled value 3811 // of `Incoming`. This is analogous to the vectorized case above: extracting 3812 // the second last element when VF > 1. 3813 ExtractForPhiUsedOutsideLoop = State.get(PreviousDef, UF - 2); 3814 3815 // Fix the initial value of the original recurrence in the scalar loop. 3816 Builder.SetInsertPoint(&*LoopScalarPreHeader->begin()); 3817 PHINode *Phi = cast<PHINode>(PhiR->getUnderlyingValue()); 3818 auto *Start = Builder.CreatePHI(Phi->getType(), 2, "scalar.recur.init"); 3819 auto *ScalarInit = PhiR->getStartValue()->getLiveInIRValue(); 3820 for (auto *BB : predecessors(LoopScalarPreHeader)) { 3821 auto *Incoming = BB == LoopMiddleBlock ? ExtractForScalar : ScalarInit; 3822 Start->addIncoming(Incoming, BB); 3823 } 3824 3825 Phi->setIncomingValueForBlock(LoopScalarPreHeader, Start); 3826 Phi->setName("scalar.recur"); 3827 3828 // Finally, fix users of the recurrence outside the loop. The users will need 3829 // either the last value of the scalar recurrence or the last value of the 3830 // vector recurrence we extracted in the middle block. Since the loop is in 3831 // LCSSA form, we just need to find all the phi nodes for the original scalar 3832 // recurrence in the exit block, and then add an edge for the middle block. 3833 // Note that LCSSA does not imply single entry when the original scalar loop 3834 // had multiple exiting edges (as we always run the last iteration in the 3835 // scalar epilogue); in that case, there is no edge from middle to exit and 3836 // and thus no phis which needed updated. 3837 if (!Cost->requiresScalarEpilogue(VF)) 3838 for (PHINode &LCSSAPhi : LoopExitBlock->phis()) 3839 if (llvm::is_contained(LCSSAPhi.incoming_values(), Phi)) { 3840 LCSSAPhi.addIncoming(ExtractForPhiUsedOutsideLoop, LoopMiddleBlock); 3841 State.Plan->removeLiveOut(&LCSSAPhi); 3842 } 3843 } 3844 3845 void InnerLoopVectorizer::fixReduction(VPReductionPHIRecipe *PhiR, 3846 VPTransformState &State) { 3847 PHINode *OrigPhi = cast<PHINode>(PhiR->getUnderlyingValue()); 3848 // Get it's reduction variable descriptor. 3849 assert(Legal->isReductionVariable(OrigPhi) && 3850 "Unable to find the reduction variable"); 3851 const RecurrenceDescriptor &RdxDesc = PhiR->getRecurrenceDescriptor(); 3852 3853 RecurKind RK = RdxDesc.getRecurrenceKind(); 3854 TrackingVH<Value> ReductionStartValue = RdxDesc.getRecurrenceStartValue(); 3855 Instruction *LoopExitInst = RdxDesc.getLoopExitInstr(); 3856 State.setDebugLocFromInst(ReductionStartValue); 3857 3858 VPValue *LoopExitInstDef = PhiR->getBackedgeValue(); 3859 // This is the vector-clone of the value that leaves the loop. 3860 Type *VecTy = State.get(LoopExitInstDef, 0)->getType(); 3861 3862 // Wrap flags are in general invalid after vectorization, clear them. 3863 clearReductionWrapFlags(PhiR, State); 3864 3865 // Before each round, move the insertion point right between 3866 // the PHIs and the values we are going to write. 3867 // This allows us to write both PHINodes and the extractelement 3868 // instructions. 3869 Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt()); 3870 3871 State.setDebugLocFromInst(LoopExitInst); 3872 3873 Type *PhiTy = OrigPhi->getType(); 3874 3875 VPBasicBlock *LatchVPBB = 3876 PhiR->getParent()->getEnclosingLoopRegion()->getExitingBasicBlock(); 3877 BasicBlock *VectorLoopLatch = State.CFG.VPBB2IRBB[LatchVPBB]; 3878 // If tail is folded by masking, the vector value to leave the loop should be 3879 // a Select choosing between the vectorized LoopExitInst and vectorized Phi, 3880 // instead of the former. For an inloop reduction the reduction will already 3881 // be predicated, and does not need to be handled here. 3882 if (Cost->foldTailByMasking() && !PhiR->isInLoop()) { 3883 for (unsigned Part = 0; Part < UF; ++Part) { 3884 Value *VecLoopExitInst = State.get(LoopExitInstDef, Part); 3885 SelectInst *Sel = nullptr; 3886 for (User *U : VecLoopExitInst->users()) { 3887 if (isa<SelectInst>(U)) { 3888 assert(!Sel && "Reduction exit feeding two selects"); 3889 Sel = cast<SelectInst>(U); 3890 } else 3891 assert(isa<PHINode>(U) && "Reduction exit must feed Phi's or select"); 3892 } 3893 assert(Sel && "Reduction exit feeds no select"); 3894 State.reset(LoopExitInstDef, Sel, Part); 3895 3896 if (isa<FPMathOperator>(Sel)) 3897 Sel->setFastMathFlags(RdxDesc.getFastMathFlags()); 3898 3899 // If the target can create a predicated operator for the reduction at no 3900 // extra cost in the loop (for example a predicated vadd), it can be 3901 // cheaper for the select to remain in the loop than be sunk out of it, 3902 // and so use the select value for the phi instead of the old 3903 // LoopExitValue. 3904 if (PreferPredicatedReductionSelect || 3905 TTI->preferPredicatedReductionSelect( 3906 RdxDesc.getOpcode(), PhiTy, 3907 TargetTransformInfo::ReductionFlags())) { 3908 auto *VecRdxPhi = 3909 cast<PHINode>(State.get(PhiR, Part)); 3910 VecRdxPhi->setIncomingValueForBlock(VectorLoopLatch, Sel); 3911 } 3912 } 3913 } 3914 3915 // If the vector reduction can be performed in a smaller type, we truncate 3916 // then extend the loop exit value to enable InstCombine to evaluate the 3917 // entire expression in the smaller type. 3918 if (VF.isVector() && PhiTy != RdxDesc.getRecurrenceType()) { 3919 assert(!PhiR->isInLoop() && "Unexpected truncated inloop reduction!"); 3920 Type *RdxVecTy = VectorType::get(RdxDesc.getRecurrenceType(), VF); 3921 Builder.SetInsertPoint(VectorLoopLatch->getTerminator()); 3922 VectorParts RdxParts(UF); 3923 for (unsigned Part = 0; Part < UF; ++Part) { 3924 RdxParts[Part] = State.get(LoopExitInstDef, Part); 3925 Value *Trunc = Builder.CreateTrunc(RdxParts[Part], RdxVecTy); 3926 Value *Extnd = RdxDesc.isSigned() ? Builder.CreateSExt(Trunc, VecTy) 3927 : Builder.CreateZExt(Trunc, VecTy); 3928 for (User *U : llvm::make_early_inc_range(RdxParts[Part]->users())) 3929 if (U != Trunc) { 3930 U->replaceUsesOfWith(RdxParts[Part], Extnd); 3931 RdxParts[Part] = Extnd; 3932 } 3933 } 3934 Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt()); 3935 for (unsigned Part = 0; Part < UF; ++Part) { 3936 RdxParts[Part] = Builder.CreateTrunc(RdxParts[Part], RdxVecTy); 3937 State.reset(LoopExitInstDef, RdxParts[Part], Part); 3938 } 3939 } 3940 3941 // Reduce all of the unrolled parts into a single vector. 3942 Value *ReducedPartRdx = State.get(LoopExitInstDef, 0); 3943 unsigned Op = RecurrenceDescriptor::getOpcode(RK); 3944 3945 // The middle block terminator has already been assigned a DebugLoc here (the 3946 // OrigLoop's single latch terminator). We want the whole middle block to 3947 // appear to execute on this line because: (a) it is all compiler generated, 3948 // (b) these instructions are always executed after evaluating the latch 3949 // conditional branch, and (c) other passes may add new predecessors which 3950 // terminate on this line. This is the easiest way to ensure we don't 3951 // accidentally cause an extra step back into the loop while debugging. 3952 State.setDebugLocFromInst(LoopMiddleBlock->getTerminator()); 3953 if (PhiR->isOrdered()) 3954 ReducedPartRdx = State.get(LoopExitInstDef, UF - 1); 3955 else { 3956 // Floating-point operations should have some FMF to enable the reduction. 3957 IRBuilderBase::FastMathFlagGuard FMFG(Builder); 3958 Builder.setFastMathFlags(RdxDesc.getFastMathFlags()); 3959 for (unsigned Part = 1; Part < UF; ++Part) { 3960 Value *RdxPart = State.get(LoopExitInstDef, Part); 3961 if (Op != Instruction::ICmp && Op != Instruction::FCmp) { 3962 ReducedPartRdx = Builder.CreateBinOp( 3963 (Instruction::BinaryOps)Op, RdxPart, ReducedPartRdx, "bin.rdx"); 3964 } else if (RecurrenceDescriptor::isSelectCmpRecurrenceKind(RK)) 3965 ReducedPartRdx = createSelectCmpOp(Builder, ReductionStartValue, RK, 3966 ReducedPartRdx, RdxPart); 3967 else 3968 ReducedPartRdx = createMinMaxOp(Builder, RK, ReducedPartRdx, RdxPart); 3969 } 3970 } 3971 3972 // Create the reduction after the loop. Note that inloop reductions create the 3973 // target reduction in the loop using a Reduction recipe. 3974 if (VF.isVector() && !PhiR->isInLoop()) { 3975 ReducedPartRdx = 3976 createTargetReduction(Builder, TTI, RdxDesc, ReducedPartRdx, OrigPhi); 3977 // If the reduction can be performed in a smaller type, we need to extend 3978 // the reduction to the wider type before we branch to the original loop. 3979 if (PhiTy != RdxDesc.getRecurrenceType()) 3980 ReducedPartRdx = RdxDesc.isSigned() 3981 ? Builder.CreateSExt(ReducedPartRdx, PhiTy) 3982 : Builder.CreateZExt(ReducedPartRdx, PhiTy); 3983 } 3984 3985 PHINode *ResumePhi = 3986 dyn_cast<PHINode>(PhiR->getStartValue()->getUnderlyingValue()); 3987 3988 // Create a phi node that merges control-flow from the backedge-taken check 3989 // block and the middle block. 3990 PHINode *BCBlockPhi = PHINode::Create(PhiTy, 2, "bc.merge.rdx", 3991 LoopScalarPreHeader->getTerminator()); 3992 3993 // If we are fixing reductions in the epilogue loop then we should already 3994 // have created a bc.merge.rdx Phi after the main vector body. Ensure that 3995 // we carry over the incoming values correctly. 3996 for (auto *Incoming : predecessors(LoopScalarPreHeader)) { 3997 if (Incoming == LoopMiddleBlock) 3998 BCBlockPhi->addIncoming(ReducedPartRdx, Incoming); 3999 else if (ResumePhi && llvm::is_contained(ResumePhi->blocks(), Incoming)) 4000 BCBlockPhi->addIncoming(ResumePhi->getIncomingValueForBlock(Incoming), 4001 Incoming); 4002 else 4003 BCBlockPhi->addIncoming(ReductionStartValue, Incoming); 4004 } 4005 4006 // Set the resume value for this reduction 4007 ReductionResumeValues.insert({&RdxDesc, BCBlockPhi}); 4008 4009 // If there were stores of the reduction value to a uniform memory address 4010 // inside the loop, create the final store here. 4011 if (StoreInst *SI = RdxDesc.IntermediateStore) { 4012 StoreInst *NewSI = 4013 Builder.CreateStore(ReducedPartRdx, SI->getPointerOperand()); 4014 propagateMetadata(NewSI, SI); 4015 4016 // If the reduction value is used in other places, 4017 // then let the code below create PHI's for that. 4018 } 4019 4020 // Now, we need to fix the users of the reduction variable 4021 // inside and outside of the scalar remainder loop. 4022 4023 // We know that the loop is in LCSSA form. We need to update the PHI nodes 4024 // in the exit blocks. See comment on analogous loop in 4025 // fixFirstOrderRecurrence for a more complete explaination of the logic. 4026 if (!Cost->requiresScalarEpilogue(VF)) 4027 for (PHINode &LCSSAPhi : LoopExitBlock->phis()) 4028 if (llvm::is_contained(LCSSAPhi.incoming_values(), LoopExitInst)) { 4029 LCSSAPhi.addIncoming(ReducedPartRdx, LoopMiddleBlock); 4030 State.Plan->removeLiveOut(&LCSSAPhi); 4031 } 4032 4033 // Fix the scalar loop reduction variable with the incoming reduction sum 4034 // from the vector body and from the backedge value. 4035 int IncomingEdgeBlockIdx = 4036 OrigPhi->getBasicBlockIndex(OrigLoop->getLoopLatch()); 4037 assert(IncomingEdgeBlockIdx >= 0 && "Invalid block index"); 4038 // Pick the other block. 4039 int SelfEdgeBlockIdx = (IncomingEdgeBlockIdx ? 0 : 1); 4040 OrigPhi->setIncomingValue(SelfEdgeBlockIdx, BCBlockPhi); 4041 OrigPhi->setIncomingValue(IncomingEdgeBlockIdx, LoopExitInst); 4042 } 4043 4044 void InnerLoopVectorizer::clearReductionWrapFlags(VPReductionPHIRecipe *PhiR, 4045 VPTransformState &State) { 4046 const RecurrenceDescriptor &RdxDesc = PhiR->getRecurrenceDescriptor(); 4047 RecurKind RK = RdxDesc.getRecurrenceKind(); 4048 if (RK != RecurKind::Add && RK != RecurKind::Mul) 4049 return; 4050 4051 SmallVector<VPValue *, 8> Worklist; 4052 SmallPtrSet<VPValue *, 8> Visited; 4053 Worklist.push_back(PhiR); 4054 Visited.insert(PhiR); 4055 4056 while (!Worklist.empty()) { 4057 VPValue *Cur = Worklist.pop_back_val(); 4058 for (unsigned Part = 0; Part < UF; ++Part) { 4059 Value *V = State.get(Cur, Part); 4060 if (!isa<OverflowingBinaryOperator>(V)) 4061 break; 4062 cast<Instruction>(V)->dropPoisonGeneratingFlags(); 4063 } 4064 4065 for (VPUser *U : Cur->users()) { 4066 auto *UserRecipe = dyn_cast<VPRecipeBase>(U); 4067 if (!UserRecipe) 4068 continue; 4069 for (VPValue *V : UserRecipe->definedValues()) 4070 if (Visited.insert(V).second) 4071 Worklist.push_back(V); 4072 } 4073 } 4074 } 4075 4076 void InnerLoopVectorizer::sinkScalarOperands(Instruction *PredInst) { 4077 // The basic block and loop containing the predicated instruction. 4078 auto *PredBB = PredInst->getParent(); 4079 auto *VectorLoop = LI->getLoopFor(PredBB); 4080 4081 // Initialize a worklist with the operands of the predicated instruction. 4082 SetVector<Value *> Worklist(PredInst->op_begin(), PredInst->op_end()); 4083 4084 // Holds instructions that we need to analyze again. An instruction may be 4085 // reanalyzed if we don't yet know if we can sink it or not. 4086 SmallVector<Instruction *, 8> InstsToReanalyze; 4087 4088 // Returns true if a given use occurs in the predicated block. Phi nodes use 4089 // their operands in their corresponding predecessor blocks. 4090 auto isBlockOfUsePredicated = [&](Use &U) -> bool { 4091 auto *I = cast<Instruction>(U.getUser()); 4092 BasicBlock *BB = I->getParent(); 4093 if (auto *Phi = dyn_cast<PHINode>(I)) 4094 BB = Phi->getIncomingBlock( 4095 PHINode::getIncomingValueNumForOperand(U.getOperandNo())); 4096 return BB == PredBB; 4097 }; 4098 4099 // Iteratively sink the scalarized operands of the predicated instruction 4100 // into the block we created for it. When an instruction is sunk, it's 4101 // operands are then added to the worklist. The algorithm ends after one pass 4102 // through the worklist doesn't sink a single instruction. 4103 bool Changed; 4104 do { 4105 // Add the instructions that need to be reanalyzed to the worklist, and 4106 // reset the changed indicator. 4107 Worklist.insert(InstsToReanalyze.begin(), InstsToReanalyze.end()); 4108 InstsToReanalyze.clear(); 4109 Changed = false; 4110 4111 while (!Worklist.empty()) { 4112 auto *I = dyn_cast<Instruction>(Worklist.pop_back_val()); 4113 4114 // We can't sink an instruction if it is a phi node, is not in the loop, 4115 // or may have side effects. 4116 if (!I || isa<PHINode>(I) || !VectorLoop->contains(I) || 4117 I->mayHaveSideEffects()) 4118 continue; 4119 4120 // If the instruction is already in PredBB, check if we can sink its 4121 // operands. In that case, VPlan's sinkScalarOperands() succeeded in 4122 // sinking the scalar instruction I, hence it appears in PredBB; but it 4123 // may have failed to sink I's operands (recursively), which we try 4124 // (again) here. 4125 if (I->getParent() == PredBB) { 4126 Worklist.insert(I->op_begin(), I->op_end()); 4127 continue; 4128 } 4129 4130 // It's legal to sink the instruction if all its uses occur in the 4131 // predicated block. Otherwise, there's nothing to do yet, and we may 4132 // need to reanalyze the instruction. 4133 if (!llvm::all_of(I->uses(), isBlockOfUsePredicated)) { 4134 InstsToReanalyze.push_back(I); 4135 continue; 4136 } 4137 4138 // Move the instruction to the beginning of the predicated block, and add 4139 // it's operands to the worklist. 4140 I->moveBefore(&*PredBB->getFirstInsertionPt()); 4141 Worklist.insert(I->op_begin(), I->op_end()); 4142 4143 // The sinking may have enabled other instructions to be sunk, so we will 4144 // need to iterate. 4145 Changed = true; 4146 } 4147 } while (Changed); 4148 } 4149 4150 void InnerLoopVectorizer::fixNonInductionPHIs(VPlan &Plan, 4151 VPTransformState &State) { 4152 auto Iter = depth_first( 4153 VPBlockRecursiveTraversalWrapper<VPBlockBase *>(Plan.getEntry())); 4154 for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(Iter)) { 4155 for (VPRecipeBase &P : VPBB->phis()) { 4156 VPWidenPHIRecipe *VPPhi = dyn_cast<VPWidenPHIRecipe>(&P); 4157 if (!VPPhi) 4158 continue; 4159 PHINode *NewPhi = cast<PHINode>(State.get(VPPhi, 0)); 4160 // Make sure the builder has a valid insert point. 4161 Builder.SetInsertPoint(NewPhi); 4162 for (unsigned i = 0; i < VPPhi->getNumOperands(); ++i) { 4163 VPValue *Inc = VPPhi->getIncomingValue(i); 4164 VPBasicBlock *VPBB = VPPhi->getIncomingBlock(i); 4165 NewPhi->addIncoming(State.get(Inc, 0), State.CFG.VPBB2IRBB[VPBB]); 4166 } 4167 } 4168 } 4169 } 4170 4171 bool InnerLoopVectorizer::useOrderedReductions( 4172 const RecurrenceDescriptor &RdxDesc) { 4173 return Cost->useOrderedReductions(RdxDesc); 4174 } 4175 4176 void InnerLoopVectorizer::widenCallInstruction(CallInst &CI, VPValue *Def, 4177 VPUser &ArgOperands, 4178 VPTransformState &State) { 4179 assert(!isa<DbgInfoIntrinsic>(CI) && 4180 "DbgInfoIntrinsic should have been dropped during VPlan construction"); 4181 State.setDebugLocFromInst(&CI); 4182 4183 SmallVector<Type *, 4> Tys; 4184 for (Value *ArgOperand : CI.args()) 4185 Tys.push_back(ToVectorTy(ArgOperand->getType(), VF.getKnownMinValue())); 4186 4187 Intrinsic::ID ID = getVectorIntrinsicIDForCall(&CI, TLI); 4188 4189 // The flag shows whether we use Intrinsic or a usual Call for vectorized 4190 // version of the instruction. 4191 // Is it beneficial to perform intrinsic call compared to lib call? 4192 bool NeedToScalarize = false; 4193 InstructionCost CallCost = Cost->getVectorCallCost(&CI, VF, NeedToScalarize); 4194 InstructionCost IntrinsicCost = 4195 ID ? Cost->getVectorIntrinsicCost(&CI, VF) : 0; 4196 bool UseVectorIntrinsic = ID && IntrinsicCost <= CallCost; 4197 assert((UseVectorIntrinsic || !NeedToScalarize) && 4198 "Instruction should be scalarized elsewhere."); 4199 assert((IntrinsicCost.isValid() || CallCost.isValid()) && 4200 "Either the intrinsic cost or vector call cost must be valid"); 4201 4202 for (unsigned Part = 0; Part < UF; ++Part) { 4203 SmallVector<Type *, 2> TysForDecl = {CI.getType()}; 4204 SmallVector<Value *, 4> Args; 4205 for (auto &I : enumerate(ArgOperands.operands())) { 4206 // Some intrinsics have a scalar argument - don't replace it with a 4207 // vector. 4208 Value *Arg; 4209 if (!UseVectorIntrinsic || 4210 !isVectorIntrinsicWithScalarOpAtArg(ID, I.index())) 4211 Arg = State.get(I.value(), Part); 4212 else 4213 Arg = State.get(I.value(), VPIteration(0, 0)); 4214 if (isVectorIntrinsicWithOverloadTypeAtArg(ID, I.index())) 4215 TysForDecl.push_back(Arg->getType()); 4216 Args.push_back(Arg); 4217 } 4218 4219 Function *VectorF; 4220 if (UseVectorIntrinsic) { 4221 // Use vector version of the intrinsic. 4222 if (VF.isVector()) 4223 TysForDecl[0] = VectorType::get(CI.getType()->getScalarType(), VF); 4224 Module *M = State.Builder.GetInsertBlock()->getModule(); 4225 VectorF = Intrinsic::getDeclaration(M, ID, TysForDecl); 4226 assert(VectorF && "Can't retrieve vector intrinsic."); 4227 } else { 4228 // Use vector version of the function call. 4229 const VFShape Shape = VFShape::get(CI, VF, false /*HasGlobalPred*/); 4230 #ifndef NDEBUG 4231 assert(VFDatabase(CI).getVectorizedFunction(Shape) != nullptr && 4232 "Can't create vector function."); 4233 #endif 4234 VectorF = VFDatabase(CI).getVectorizedFunction(Shape); 4235 } 4236 SmallVector<OperandBundleDef, 1> OpBundles; 4237 CI.getOperandBundlesAsDefs(OpBundles); 4238 CallInst *V = Builder.CreateCall(VectorF, Args, OpBundles); 4239 4240 if (isa<FPMathOperator>(V)) 4241 V->copyFastMathFlags(&CI); 4242 4243 State.set(Def, V, Part); 4244 State.addMetadata(V, &CI); 4245 } 4246 } 4247 4248 void LoopVectorizationCostModel::collectLoopScalars(ElementCount VF) { 4249 // We should not collect Scalars more than once per VF. Right now, this 4250 // function is called from collectUniformsAndScalars(), which already does 4251 // this check. Collecting Scalars for VF=1 does not make any sense. 4252 assert(VF.isVector() && Scalars.find(VF) == Scalars.end() && 4253 "This function should not be visited twice for the same VF"); 4254 4255 // This avoids any chances of creating a REPLICATE recipe during planning 4256 // since that would result in generation of scalarized code during execution, 4257 // which is not supported for scalable vectors. 4258 if (VF.isScalable()) { 4259 Scalars[VF].insert(Uniforms[VF].begin(), Uniforms[VF].end()); 4260 return; 4261 } 4262 4263 SmallSetVector<Instruction *, 8> Worklist; 4264 4265 // These sets are used to seed the analysis with pointers used by memory 4266 // accesses that will remain scalar. 4267 SmallSetVector<Instruction *, 8> ScalarPtrs; 4268 SmallPtrSet<Instruction *, 8> PossibleNonScalarPtrs; 4269 auto *Latch = TheLoop->getLoopLatch(); 4270 4271 // A helper that returns true if the use of Ptr by MemAccess will be scalar. 4272 // The pointer operands of loads and stores will be scalar as long as the 4273 // memory access is not a gather or scatter operation. The value operand of a 4274 // store will remain scalar if the store is scalarized. 4275 auto isScalarUse = [&](Instruction *MemAccess, Value *Ptr) { 4276 InstWidening WideningDecision = getWideningDecision(MemAccess, VF); 4277 assert(WideningDecision != CM_Unknown && 4278 "Widening decision should be ready at this moment"); 4279 if (auto *Store = dyn_cast<StoreInst>(MemAccess)) 4280 if (Ptr == Store->getValueOperand()) 4281 return WideningDecision == CM_Scalarize; 4282 assert(Ptr == getLoadStorePointerOperand(MemAccess) && 4283 "Ptr is neither a value or pointer operand"); 4284 return WideningDecision != CM_GatherScatter; 4285 }; 4286 4287 // A helper that returns true if the given value is a bitcast or 4288 // getelementptr instruction contained in the loop. 4289 auto isLoopVaryingBitCastOrGEP = [&](Value *V) { 4290 return ((isa<BitCastInst>(V) && V->getType()->isPointerTy()) || 4291 isa<GetElementPtrInst>(V)) && 4292 !TheLoop->isLoopInvariant(V); 4293 }; 4294 4295 // A helper that evaluates a memory access's use of a pointer. If the use will 4296 // be a scalar use and the pointer is only used by memory accesses, we place 4297 // the pointer in ScalarPtrs. Otherwise, the pointer is placed in 4298 // PossibleNonScalarPtrs. 4299 auto evaluatePtrUse = [&](Instruction *MemAccess, Value *Ptr) { 4300 // We only care about bitcast and getelementptr instructions contained in 4301 // the loop. 4302 if (!isLoopVaryingBitCastOrGEP(Ptr)) 4303 return; 4304 4305 // If the pointer has already been identified as scalar (e.g., if it was 4306 // also identified as uniform), there's nothing to do. 4307 auto *I = cast<Instruction>(Ptr); 4308 if (Worklist.count(I)) 4309 return; 4310 4311 // If the use of the pointer will be a scalar use, and all users of the 4312 // pointer are memory accesses, place the pointer in ScalarPtrs. Otherwise, 4313 // place the pointer in PossibleNonScalarPtrs. 4314 if (isScalarUse(MemAccess, Ptr) && llvm::all_of(I->users(), [&](User *U) { 4315 return isa<LoadInst>(U) || isa<StoreInst>(U); 4316 })) 4317 ScalarPtrs.insert(I); 4318 else 4319 PossibleNonScalarPtrs.insert(I); 4320 }; 4321 4322 // We seed the scalars analysis with three classes of instructions: (1) 4323 // instructions marked uniform-after-vectorization and (2) bitcast, 4324 // getelementptr and (pointer) phi instructions used by memory accesses 4325 // requiring a scalar use. 4326 // 4327 // (1) Add to the worklist all instructions that have been identified as 4328 // uniform-after-vectorization. 4329 Worklist.insert(Uniforms[VF].begin(), Uniforms[VF].end()); 4330 4331 // (2) Add to the worklist all bitcast and getelementptr instructions used by 4332 // memory accesses requiring a scalar use. The pointer operands of loads and 4333 // stores will be scalar as long as the memory accesses is not a gather or 4334 // scatter operation. The value operand of a store will remain scalar if the 4335 // store is scalarized. 4336 for (auto *BB : TheLoop->blocks()) 4337 for (auto &I : *BB) { 4338 if (auto *Load = dyn_cast<LoadInst>(&I)) { 4339 evaluatePtrUse(Load, Load->getPointerOperand()); 4340 } else if (auto *Store = dyn_cast<StoreInst>(&I)) { 4341 evaluatePtrUse(Store, Store->getPointerOperand()); 4342 evaluatePtrUse(Store, Store->getValueOperand()); 4343 } 4344 } 4345 for (auto *I : ScalarPtrs) 4346 if (!PossibleNonScalarPtrs.count(I)) { 4347 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *I << "\n"); 4348 Worklist.insert(I); 4349 } 4350 4351 // Insert the forced scalars. 4352 // FIXME: Currently VPWidenPHIRecipe() often creates a dead vector 4353 // induction variable when the PHI user is scalarized. 4354 auto ForcedScalar = ForcedScalars.find(VF); 4355 if (ForcedScalar != ForcedScalars.end()) 4356 for (auto *I : ForcedScalar->second) 4357 Worklist.insert(I); 4358 4359 // Expand the worklist by looking through any bitcasts and getelementptr 4360 // instructions we've already identified as scalar. This is similar to the 4361 // expansion step in collectLoopUniforms(); however, here we're only 4362 // expanding to include additional bitcasts and getelementptr instructions. 4363 unsigned Idx = 0; 4364 while (Idx != Worklist.size()) { 4365 Instruction *Dst = Worklist[Idx++]; 4366 if (!isLoopVaryingBitCastOrGEP(Dst->getOperand(0))) 4367 continue; 4368 auto *Src = cast<Instruction>(Dst->getOperand(0)); 4369 if (llvm::all_of(Src->users(), [&](User *U) -> bool { 4370 auto *J = cast<Instruction>(U); 4371 return !TheLoop->contains(J) || Worklist.count(J) || 4372 ((isa<LoadInst>(J) || isa<StoreInst>(J)) && 4373 isScalarUse(J, Src)); 4374 })) { 4375 Worklist.insert(Src); 4376 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Src << "\n"); 4377 } 4378 } 4379 4380 // An induction variable will remain scalar if all users of the induction 4381 // variable and induction variable update remain scalar. 4382 for (auto &Induction : Legal->getInductionVars()) { 4383 auto *Ind = Induction.first; 4384 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 4385 4386 // If tail-folding is applied, the primary induction variable will be used 4387 // to feed a vector compare. 4388 if (Ind == Legal->getPrimaryInduction() && foldTailByMasking()) 4389 continue; 4390 4391 // Returns true if \p Indvar is a pointer induction that is used directly by 4392 // load/store instruction \p I. 4393 auto IsDirectLoadStoreFromPtrIndvar = [&](Instruction *Indvar, 4394 Instruction *I) { 4395 return Induction.second.getKind() == 4396 InductionDescriptor::IK_PtrInduction && 4397 (isa<LoadInst>(I) || isa<StoreInst>(I)) && 4398 Indvar == getLoadStorePointerOperand(I) && isScalarUse(I, Indvar); 4399 }; 4400 4401 // Determine if all users of the induction variable are scalar after 4402 // vectorization. 4403 auto ScalarInd = llvm::all_of(Ind->users(), [&](User *U) -> bool { 4404 auto *I = cast<Instruction>(U); 4405 return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) || 4406 IsDirectLoadStoreFromPtrIndvar(Ind, I); 4407 }); 4408 if (!ScalarInd) 4409 continue; 4410 4411 // Determine if all users of the induction variable update instruction are 4412 // scalar after vectorization. 4413 auto ScalarIndUpdate = 4414 llvm::all_of(IndUpdate->users(), [&](User *U) -> bool { 4415 auto *I = cast<Instruction>(U); 4416 return I == Ind || !TheLoop->contains(I) || Worklist.count(I) || 4417 IsDirectLoadStoreFromPtrIndvar(IndUpdate, I); 4418 }); 4419 if (!ScalarIndUpdate) 4420 continue; 4421 4422 // The induction variable and its update instruction will remain scalar. 4423 Worklist.insert(Ind); 4424 Worklist.insert(IndUpdate); 4425 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Ind << "\n"); 4426 LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *IndUpdate 4427 << "\n"); 4428 } 4429 4430 Scalars[VF].insert(Worklist.begin(), Worklist.end()); 4431 } 4432 4433 bool LoopVectorizationCostModel::isScalarWithPredication( 4434 Instruction *I, ElementCount VF) const { 4435 if (!blockNeedsPredicationForAnyReason(I->getParent())) 4436 return false; 4437 switch(I->getOpcode()) { 4438 default: 4439 break; 4440 case Instruction::Load: 4441 case Instruction::Store: { 4442 if (!Legal->isMaskRequired(I)) 4443 return false; 4444 auto *Ptr = getLoadStorePointerOperand(I); 4445 auto *Ty = getLoadStoreType(I); 4446 Type *VTy = Ty; 4447 if (VF.isVector()) 4448 VTy = VectorType::get(Ty, VF); 4449 const Align Alignment = getLoadStoreAlignment(I); 4450 return isa<LoadInst>(I) ? !(isLegalMaskedLoad(Ty, Ptr, Alignment) || 4451 TTI.isLegalMaskedGather(VTy, Alignment)) 4452 : !(isLegalMaskedStore(Ty, Ptr, Alignment) || 4453 TTI.isLegalMaskedScatter(VTy, Alignment)); 4454 } 4455 case Instruction::UDiv: 4456 case Instruction::SDiv: 4457 case Instruction::SRem: 4458 case Instruction::URem: 4459 // TODO: We can use the loop-preheader as context point here and get 4460 // context sensitive reasoning 4461 return !isSafeToSpeculativelyExecute(I); 4462 } 4463 return false; 4464 } 4465 4466 bool LoopVectorizationCostModel::interleavedAccessCanBeWidened( 4467 Instruction *I, ElementCount VF) { 4468 assert(isAccessInterleaved(I) && "Expecting interleaved access."); 4469 assert(getWideningDecision(I, VF) == CM_Unknown && 4470 "Decision should not be set yet."); 4471 auto *Group = getInterleavedAccessGroup(I); 4472 assert(Group && "Must have a group."); 4473 4474 // If the instruction's allocated size doesn't equal it's type size, it 4475 // requires padding and will be scalarized. 4476 auto &DL = I->getModule()->getDataLayout(); 4477 auto *ScalarTy = getLoadStoreType(I); 4478 if (hasIrregularType(ScalarTy, DL)) 4479 return false; 4480 4481 // If the group involves a non-integral pointer, we may not be able to 4482 // losslessly cast all values to a common type. 4483 unsigned InterleaveFactor = Group->getFactor(); 4484 bool ScalarNI = DL.isNonIntegralPointerType(ScalarTy); 4485 for (unsigned i = 0; i < InterleaveFactor; i++) { 4486 Instruction *Member = Group->getMember(i); 4487 if (!Member) 4488 continue; 4489 auto *MemberTy = getLoadStoreType(Member); 4490 bool MemberNI = DL.isNonIntegralPointerType(MemberTy); 4491 // Don't coerce non-integral pointers to integers or vice versa. 4492 if (MemberNI != ScalarNI) { 4493 // TODO: Consider adding special nullptr value case here 4494 return false; 4495 } else if (MemberNI && ScalarNI && 4496 ScalarTy->getPointerAddressSpace() != 4497 MemberTy->getPointerAddressSpace()) { 4498 return false; 4499 } 4500 } 4501 4502 // Check if masking is required. 4503 // A Group may need masking for one of two reasons: it resides in a block that 4504 // needs predication, or it was decided to use masking to deal with gaps 4505 // (either a gap at the end of a load-access that may result in a speculative 4506 // load, or any gaps in a store-access). 4507 bool PredicatedAccessRequiresMasking = 4508 blockNeedsPredicationForAnyReason(I->getParent()) && 4509 Legal->isMaskRequired(I); 4510 bool LoadAccessWithGapsRequiresEpilogMasking = 4511 isa<LoadInst>(I) && Group->requiresScalarEpilogue() && 4512 !isScalarEpilogueAllowed(); 4513 bool StoreAccessWithGapsRequiresMasking = 4514 isa<StoreInst>(I) && (Group->getNumMembers() < Group->getFactor()); 4515 if (!PredicatedAccessRequiresMasking && 4516 !LoadAccessWithGapsRequiresEpilogMasking && 4517 !StoreAccessWithGapsRequiresMasking) 4518 return true; 4519 4520 // If masked interleaving is required, we expect that the user/target had 4521 // enabled it, because otherwise it either wouldn't have been created or 4522 // it should have been invalidated by the CostModel. 4523 assert(useMaskedInterleavedAccesses(TTI) && 4524 "Masked interleave-groups for predicated accesses are not enabled."); 4525 4526 if (Group->isReverse()) 4527 return false; 4528 4529 auto *Ty = getLoadStoreType(I); 4530 const Align Alignment = getLoadStoreAlignment(I); 4531 return isa<LoadInst>(I) ? TTI.isLegalMaskedLoad(Ty, Alignment) 4532 : TTI.isLegalMaskedStore(Ty, Alignment); 4533 } 4534 4535 bool LoopVectorizationCostModel::memoryInstructionCanBeWidened( 4536 Instruction *I, ElementCount VF) { 4537 // Get and ensure we have a valid memory instruction. 4538 assert((isa<LoadInst, StoreInst>(I)) && "Invalid memory instruction"); 4539 4540 auto *Ptr = getLoadStorePointerOperand(I); 4541 auto *ScalarTy = getLoadStoreType(I); 4542 4543 // In order to be widened, the pointer should be consecutive, first of all. 4544 if (!Legal->isConsecutivePtr(ScalarTy, Ptr)) 4545 return false; 4546 4547 // If the instruction is a store located in a predicated block, it will be 4548 // scalarized. 4549 if (isScalarWithPredication(I, VF)) 4550 return false; 4551 4552 // If the instruction's allocated size doesn't equal it's type size, it 4553 // requires padding and will be scalarized. 4554 auto &DL = I->getModule()->getDataLayout(); 4555 if (hasIrregularType(ScalarTy, DL)) 4556 return false; 4557 4558 return true; 4559 } 4560 4561 void LoopVectorizationCostModel::collectLoopUniforms(ElementCount VF) { 4562 // We should not collect Uniforms more than once per VF. Right now, 4563 // this function is called from collectUniformsAndScalars(), which 4564 // already does this check. Collecting Uniforms for VF=1 does not make any 4565 // sense. 4566 4567 assert(VF.isVector() && Uniforms.find(VF) == Uniforms.end() && 4568 "This function should not be visited twice for the same VF"); 4569 4570 // Visit the list of Uniforms. If we'll not find any uniform value, we'll 4571 // not analyze again. Uniforms.count(VF) will return 1. 4572 Uniforms[VF].clear(); 4573 4574 // We now know that the loop is vectorizable! 4575 // Collect instructions inside the loop that will remain uniform after 4576 // vectorization. 4577 4578 // Global values, params and instructions outside of current loop are out of 4579 // scope. 4580 auto isOutOfScope = [&](Value *V) -> bool { 4581 Instruction *I = dyn_cast<Instruction>(V); 4582 return (!I || !TheLoop->contains(I)); 4583 }; 4584 4585 // Worklist containing uniform instructions demanding lane 0. 4586 SetVector<Instruction *> Worklist; 4587 BasicBlock *Latch = TheLoop->getLoopLatch(); 4588 4589 // Add uniform instructions demanding lane 0 to the worklist. Instructions 4590 // that are scalar with predication must not be considered uniform after 4591 // vectorization, because that would create an erroneous replicating region 4592 // where only a single instance out of VF should be formed. 4593 // TODO: optimize such seldom cases if found important, see PR40816. 4594 auto addToWorklistIfAllowed = [&](Instruction *I) -> void { 4595 if (isOutOfScope(I)) { 4596 LLVM_DEBUG(dbgs() << "LV: Found not uniform due to scope: " 4597 << *I << "\n"); 4598 return; 4599 } 4600 if (isScalarWithPredication(I, VF)) { 4601 LLVM_DEBUG(dbgs() << "LV: Found not uniform being ScalarWithPredication: " 4602 << *I << "\n"); 4603 return; 4604 } 4605 LLVM_DEBUG(dbgs() << "LV: Found uniform instruction: " << *I << "\n"); 4606 Worklist.insert(I); 4607 }; 4608 4609 // Start with the conditional branch. If the branch condition is an 4610 // instruction contained in the loop that is only used by the branch, it is 4611 // uniform. 4612 auto *Cmp = dyn_cast<Instruction>(Latch->getTerminator()->getOperand(0)); 4613 if (Cmp && TheLoop->contains(Cmp) && Cmp->hasOneUse()) 4614 addToWorklistIfAllowed(Cmp); 4615 4616 auto isUniformDecision = [&](Instruction *I, ElementCount VF) { 4617 InstWidening WideningDecision = getWideningDecision(I, VF); 4618 assert(WideningDecision != CM_Unknown && 4619 "Widening decision should be ready at this moment"); 4620 4621 // A uniform memory op is itself uniform. We exclude uniform stores 4622 // here as they demand the last lane, not the first one. 4623 if (isa<LoadInst>(I) && Legal->isUniformMemOp(*I)) { 4624 assert(WideningDecision == CM_Scalarize); 4625 return true; 4626 } 4627 4628 return (WideningDecision == CM_Widen || 4629 WideningDecision == CM_Widen_Reverse || 4630 WideningDecision == CM_Interleave); 4631 }; 4632 4633 4634 // Returns true if Ptr is the pointer operand of a memory access instruction 4635 // I, and I is known to not require scalarization. 4636 auto isVectorizedMemAccessUse = [&](Instruction *I, Value *Ptr) -> bool { 4637 return getLoadStorePointerOperand(I) == Ptr && isUniformDecision(I, VF); 4638 }; 4639 4640 // Holds a list of values which are known to have at least one uniform use. 4641 // Note that there may be other uses which aren't uniform. A "uniform use" 4642 // here is something which only demands lane 0 of the unrolled iterations; 4643 // it does not imply that all lanes produce the same value (e.g. this is not 4644 // the usual meaning of uniform) 4645 SetVector<Value *> HasUniformUse; 4646 4647 // Scan the loop for instructions which are either a) known to have only 4648 // lane 0 demanded or b) are uses which demand only lane 0 of their operand. 4649 for (auto *BB : TheLoop->blocks()) 4650 for (auto &I : *BB) { 4651 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(&I)) { 4652 switch (II->getIntrinsicID()) { 4653 case Intrinsic::sideeffect: 4654 case Intrinsic::experimental_noalias_scope_decl: 4655 case Intrinsic::assume: 4656 case Intrinsic::lifetime_start: 4657 case Intrinsic::lifetime_end: 4658 if (TheLoop->hasLoopInvariantOperands(&I)) 4659 addToWorklistIfAllowed(&I); 4660 break; 4661 default: 4662 break; 4663 } 4664 } 4665 4666 // ExtractValue instructions must be uniform, because the operands are 4667 // known to be loop-invariant. 4668 if (auto *EVI = dyn_cast<ExtractValueInst>(&I)) { 4669 assert(isOutOfScope(EVI->getAggregateOperand()) && 4670 "Expected aggregate value to be loop invariant"); 4671 addToWorklistIfAllowed(EVI); 4672 continue; 4673 } 4674 4675 // If there's no pointer operand, there's nothing to do. 4676 auto *Ptr = getLoadStorePointerOperand(&I); 4677 if (!Ptr) 4678 continue; 4679 4680 // A uniform memory op is itself uniform. We exclude uniform stores 4681 // here as they demand the last lane, not the first one. 4682 if (isa<LoadInst>(I) && Legal->isUniformMemOp(I)) 4683 addToWorklistIfAllowed(&I); 4684 4685 if (isUniformDecision(&I, VF)) { 4686 assert(isVectorizedMemAccessUse(&I, Ptr) && "consistency check"); 4687 HasUniformUse.insert(Ptr); 4688 } 4689 } 4690 4691 // Add to the worklist any operands which have *only* uniform (e.g. lane 0 4692 // demanding) users. Since loops are assumed to be in LCSSA form, this 4693 // disallows uses outside the loop as well. 4694 for (auto *V : HasUniformUse) { 4695 if (isOutOfScope(V)) 4696 continue; 4697 auto *I = cast<Instruction>(V); 4698 auto UsersAreMemAccesses = 4699 llvm::all_of(I->users(), [&](User *U) -> bool { 4700 return isVectorizedMemAccessUse(cast<Instruction>(U), V); 4701 }); 4702 if (UsersAreMemAccesses) 4703 addToWorklistIfAllowed(I); 4704 } 4705 4706 // Expand Worklist in topological order: whenever a new instruction 4707 // is added , its users should be already inside Worklist. It ensures 4708 // a uniform instruction will only be used by uniform instructions. 4709 unsigned idx = 0; 4710 while (idx != Worklist.size()) { 4711 Instruction *I = Worklist[idx++]; 4712 4713 for (auto OV : I->operand_values()) { 4714 // isOutOfScope operands cannot be uniform instructions. 4715 if (isOutOfScope(OV)) 4716 continue; 4717 // First order recurrence Phi's should typically be considered 4718 // non-uniform. 4719 auto *OP = dyn_cast<PHINode>(OV); 4720 if (OP && Legal->isFirstOrderRecurrence(OP)) 4721 continue; 4722 // If all the users of the operand are uniform, then add the 4723 // operand into the uniform worklist. 4724 auto *OI = cast<Instruction>(OV); 4725 if (llvm::all_of(OI->users(), [&](User *U) -> bool { 4726 auto *J = cast<Instruction>(U); 4727 return Worklist.count(J) || isVectorizedMemAccessUse(J, OI); 4728 })) 4729 addToWorklistIfAllowed(OI); 4730 } 4731 } 4732 4733 // For an instruction to be added into Worklist above, all its users inside 4734 // the loop should also be in Worklist. However, this condition cannot be 4735 // true for phi nodes that form a cyclic dependence. We must process phi 4736 // nodes separately. An induction variable will remain uniform if all users 4737 // of the induction variable and induction variable update remain uniform. 4738 // The code below handles both pointer and non-pointer induction variables. 4739 for (auto &Induction : Legal->getInductionVars()) { 4740 auto *Ind = Induction.first; 4741 auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch)); 4742 4743 // Determine if all users of the induction variable are uniform after 4744 // vectorization. 4745 auto UniformInd = llvm::all_of(Ind->users(), [&](User *U) -> bool { 4746 auto *I = cast<Instruction>(U); 4747 return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) || 4748 isVectorizedMemAccessUse(I, Ind); 4749 }); 4750 if (!UniformInd) 4751 continue; 4752 4753 // Determine if all users of the induction variable update instruction are 4754 // uniform after vectorization. 4755 auto UniformIndUpdate = 4756 llvm::all_of(IndUpdate->users(), [&](User *U) -> bool { 4757 auto *I = cast<Instruction>(U); 4758 return I == Ind || !TheLoop->contains(I) || Worklist.count(I) || 4759 isVectorizedMemAccessUse(I, IndUpdate); 4760 }); 4761 if (!UniformIndUpdate) 4762 continue; 4763 4764 // The induction variable and its update instruction will remain uniform. 4765 addToWorklistIfAllowed(Ind); 4766 addToWorklistIfAllowed(IndUpdate); 4767 } 4768 4769 Uniforms[VF].insert(Worklist.begin(), Worklist.end()); 4770 } 4771 4772 bool LoopVectorizationCostModel::runtimeChecksRequired() { 4773 LLVM_DEBUG(dbgs() << "LV: Performing code size checks.\n"); 4774 4775 if (Legal->getRuntimePointerChecking()->Need) { 4776 reportVectorizationFailure("Runtime ptr check is required with -Os/-Oz", 4777 "runtime pointer checks needed. Enable vectorization of this " 4778 "loop with '#pragma clang loop vectorize(enable)' when " 4779 "compiling with -Os/-Oz", 4780 "CantVersionLoopWithOptForSize", ORE, TheLoop); 4781 return true; 4782 } 4783 4784 if (!PSE.getPredicate().isAlwaysTrue()) { 4785 reportVectorizationFailure("Runtime SCEV check is required with -Os/-Oz", 4786 "runtime SCEV checks needed. Enable vectorization of this " 4787 "loop with '#pragma clang loop vectorize(enable)' when " 4788 "compiling with -Os/-Oz", 4789 "CantVersionLoopWithOptForSize", ORE, TheLoop); 4790 return true; 4791 } 4792 4793 // FIXME: Avoid specializing for stride==1 instead of bailing out. 4794 if (!Legal->getLAI()->getSymbolicStrides().empty()) { 4795 reportVectorizationFailure("Runtime stride check for small trip count", 4796 "runtime stride == 1 checks needed. Enable vectorization of " 4797 "this loop without such check by compiling with -Os/-Oz", 4798 "CantVersionLoopWithOptForSize", ORE, TheLoop); 4799 return true; 4800 } 4801 4802 return false; 4803 } 4804 4805 ElementCount 4806 LoopVectorizationCostModel::getMaxLegalScalableVF(unsigned MaxSafeElements) { 4807 if (!TTI.supportsScalableVectors() && !ForceTargetSupportsScalableVectors) 4808 return ElementCount::getScalable(0); 4809 4810 if (Hints->isScalableVectorizationDisabled()) { 4811 reportVectorizationInfo("Scalable vectorization is explicitly disabled", 4812 "ScalableVectorizationDisabled", ORE, TheLoop); 4813 return ElementCount::getScalable(0); 4814 } 4815 4816 LLVM_DEBUG(dbgs() << "LV: Scalable vectorization is available\n"); 4817 4818 auto MaxScalableVF = ElementCount::getScalable( 4819 std::numeric_limits<ElementCount::ScalarTy>::max()); 4820 4821 // Test that the loop-vectorizer can legalize all operations for this MaxVF. 4822 // FIXME: While for scalable vectors this is currently sufficient, this should 4823 // be replaced by a more detailed mechanism that filters out specific VFs, 4824 // instead of invalidating vectorization for a whole set of VFs based on the 4825 // MaxVF. 4826 4827 // Disable scalable vectorization if the loop contains unsupported reductions. 4828 if (!canVectorizeReductions(MaxScalableVF)) { 4829 reportVectorizationInfo( 4830 "Scalable vectorization not supported for the reduction " 4831 "operations found in this loop.", 4832 "ScalableVFUnfeasible", ORE, TheLoop); 4833 return ElementCount::getScalable(0); 4834 } 4835 4836 // Disable scalable vectorization if the loop contains any instructions 4837 // with element types not supported for scalable vectors. 4838 if (any_of(ElementTypesInLoop, [&](Type *Ty) { 4839 return !Ty->isVoidTy() && 4840 !this->TTI.isElementTypeLegalForScalableVector(Ty); 4841 })) { 4842 reportVectorizationInfo("Scalable vectorization is not supported " 4843 "for all element types found in this loop.", 4844 "ScalableVFUnfeasible", ORE, TheLoop); 4845 return ElementCount::getScalable(0); 4846 } 4847 4848 if (Legal->isSafeForAnyVectorWidth()) 4849 return MaxScalableVF; 4850 4851 // Limit MaxScalableVF by the maximum safe dependence distance. 4852 Optional<unsigned> MaxVScale = TTI.getMaxVScale(); 4853 if (!MaxVScale && TheFunction->hasFnAttribute(Attribute::VScaleRange)) 4854 MaxVScale = 4855 TheFunction->getFnAttribute(Attribute::VScaleRange).getVScaleRangeMax(); 4856 MaxScalableVF = ElementCount::getScalable( 4857 MaxVScale ? (MaxSafeElements / MaxVScale.value()) : 0); 4858 if (!MaxScalableVF) 4859 reportVectorizationInfo( 4860 "Max legal vector width too small, scalable vectorization " 4861 "unfeasible.", 4862 "ScalableVFUnfeasible", ORE, TheLoop); 4863 4864 return MaxScalableVF; 4865 } 4866 4867 FixedScalableVFPair LoopVectorizationCostModel::computeFeasibleMaxVF( 4868 unsigned ConstTripCount, ElementCount UserVF, bool FoldTailByMasking) { 4869 MinBWs = computeMinimumValueSizes(TheLoop->getBlocks(), *DB, &TTI); 4870 unsigned SmallestType, WidestType; 4871 std::tie(SmallestType, WidestType) = getSmallestAndWidestTypes(); 4872 4873 // Get the maximum safe dependence distance in bits computed by LAA. 4874 // It is computed by MaxVF * sizeOf(type) * 8, where type is taken from 4875 // the memory accesses that is most restrictive (involved in the smallest 4876 // dependence distance). 4877 unsigned MaxSafeElements = 4878 PowerOf2Floor(Legal->getMaxSafeVectorWidthInBits() / WidestType); 4879 4880 auto MaxSafeFixedVF = ElementCount::getFixed(MaxSafeElements); 4881 auto MaxSafeScalableVF = getMaxLegalScalableVF(MaxSafeElements); 4882 4883 LLVM_DEBUG(dbgs() << "LV: The max safe fixed VF is: " << MaxSafeFixedVF 4884 << ".\n"); 4885 LLVM_DEBUG(dbgs() << "LV: The max safe scalable VF is: " << MaxSafeScalableVF 4886 << ".\n"); 4887 4888 // First analyze the UserVF, fall back if the UserVF should be ignored. 4889 if (UserVF) { 4890 auto MaxSafeUserVF = 4891 UserVF.isScalable() ? MaxSafeScalableVF : MaxSafeFixedVF; 4892 4893 if (ElementCount::isKnownLE(UserVF, MaxSafeUserVF)) { 4894 // If `VF=vscale x N` is safe, then so is `VF=N` 4895 if (UserVF.isScalable()) 4896 return FixedScalableVFPair( 4897 ElementCount::getFixed(UserVF.getKnownMinValue()), UserVF); 4898 else 4899 return UserVF; 4900 } 4901 4902 assert(ElementCount::isKnownGT(UserVF, MaxSafeUserVF)); 4903 4904 // Only clamp if the UserVF is not scalable. If the UserVF is scalable, it 4905 // is better to ignore the hint and let the compiler choose a suitable VF. 4906 if (!UserVF.isScalable()) { 4907 LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF 4908 << " is unsafe, clamping to max safe VF=" 4909 << MaxSafeFixedVF << ".\n"); 4910 ORE->emit([&]() { 4911 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor", 4912 TheLoop->getStartLoc(), 4913 TheLoop->getHeader()) 4914 << "User-specified vectorization factor " 4915 << ore::NV("UserVectorizationFactor", UserVF) 4916 << " is unsafe, clamping to maximum safe vectorization factor " 4917 << ore::NV("VectorizationFactor", MaxSafeFixedVF); 4918 }); 4919 return MaxSafeFixedVF; 4920 } 4921 4922 if (!TTI.supportsScalableVectors() && !ForceTargetSupportsScalableVectors) { 4923 LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF 4924 << " is ignored because scalable vectors are not " 4925 "available.\n"); 4926 ORE->emit([&]() { 4927 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor", 4928 TheLoop->getStartLoc(), 4929 TheLoop->getHeader()) 4930 << "User-specified vectorization factor " 4931 << ore::NV("UserVectorizationFactor", UserVF) 4932 << " is ignored because the target does not support scalable " 4933 "vectors. The compiler will pick a more suitable value."; 4934 }); 4935 } else { 4936 LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF 4937 << " is unsafe. Ignoring scalable UserVF.\n"); 4938 ORE->emit([&]() { 4939 return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor", 4940 TheLoop->getStartLoc(), 4941 TheLoop->getHeader()) 4942 << "User-specified vectorization factor " 4943 << ore::NV("UserVectorizationFactor", UserVF) 4944 << " is unsafe. Ignoring the hint to let the compiler pick a " 4945 "more suitable value."; 4946 }); 4947 } 4948 } 4949 4950 LLVM_DEBUG(dbgs() << "LV: The Smallest and Widest types: " << SmallestType 4951 << " / " << WidestType << " bits.\n"); 4952 4953 FixedScalableVFPair Result(ElementCount::getFixed(1), 4954 ElementCount::getScalable(0)); 4955 if (auto MaxVF = 4956 getMaximizedVFForTarget(ConstTripCount, SmallestType, WidestType, 4957 MaxSafeFixedVF, FoldTailByMasking)) 4958 Result.FixedVF = MaxVF; 4959 4960 if (auto MaxVF = 4961 getMaximizedVFForTarget(ConstTripCount, SmallestType, WidestType, 4962 MaxSafeScalableVF, FoldTailByMasking)) 4963 if (MaxVF.isScalable()) { 4964 Result.ScalableVF = MaxVF; 4965 LLVM_DEBUG(dbgs() << "LV: Found feasible scalable VF = " << MaxVF 4966 << "\n"); 4967 } 4968 4969 return Result; 4970 } 4971 4972 FixedScalableVFPair 4973 LoopVectorizationCostModel::computeMaxVF(ElementCount UserVF, unsigned UserIC) { 4974 if (Legal->getRuntimePointerChecking()->Need && TTI.hasBranchDivergence()) { 4975 // TODO: It may by useful to do since it's still likely to be dynamically 4976 // uniform if the target can skip. 4977 reportVectorizationFailure( 4978 "Not inserting runtime ptr check for divergent target", 4979 "runtime pointer checks needed. Not enabled for divergent target", 4980 "CantVersionLoopWithDivergentTarget", ORE, TheLoop); 4981 return FixedScalableVFPair::getNone(); 4982 } 4983 4984 unsigned TC = PSE.getSE()->getSmallConstantTripCount(TheLoop); 4985 LLVM_DEBUG(dbgs() << "LV: Found trip count: " << TC << '\n'); 4986 if (TC == 1) { 4987 reportVectorizationFailure("Single iteration (non) loop", 4988 "loop trip count is one, irrelevant for vectorization", 4989 "SingleIterationLoop", ORE, TheLoop); 4990 return FixedScalableVFPair::getNone(); 4991 } 4992 4993 switch (ScalarEpilogueStatus) { 4994 case CM_ScalarEpilogueAllowed: 4995 return computeFeasibleMaxVF(TC, UserVF, false); 4996 case CM_ScalarEpilogueNotAllowedUsePredicate: 4997 LLVM_FALLTHROUGH; 4998 case CM_ScalarEpilogueNotNeededUsePredicate: 4999 LLVM_DEBUG( 5000 dbgs() << "LV: vector predicate hint/switch found.\n" 5001 << "LV: Not allowing scalar epilogue, creating predicated " 5002 << "vector loop.\n"); 5003 break; 5004 case CM_ScalarEpilogueNotAllowedLowTripLoop: 5005 // fallthrough as a special case of OptForSize 5006 case CM_ScalarEpilogueNotAllowedOptSize: 5007 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedOptSize) 5008 LLVM_DEBUG( 5009 dbgs() << "LV: Not allowing scalar epilogue due to -Os/-Oz.\n"); 5010 else 5011 LLVM_DEBUG(dbgs() << "LV: Not allowing scalar epilogue due to low trip " 5012 << "count.\n"); 5013 5014 // Bail if runtime checks are required, which are not good when optimising 5015 // for size. 5016 if (runtimeChecksRequired()) 5017 return FixedScalableVFPair::getNone(); 5018 5019 break; 5020 } 5021 5022 // The only loops we can vectorize without a scalar epilogue, are loops with 5023 // a bottom-test and a single exiting block. We'd have to handle the fact 5024 // that not every instruction executes on the last iteration. This will 5025 // require a lane mask which varies through the vector loop body. (TODO) 5026 if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch()) { 5027 // If there was a tail-folding hint/switch, but we can't fold the tail by 5028 // masking, fallback to a vectorization with a scalar epilogue. 5029 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotNeededUsePredicate) { 5030 LLVM_DEBUG(dbgs() << "LV: Cannot fold tail by masking: vectorize with a " 5031 "scalar epilogue instead.\n"); 5032 ScalarEpilogueStatus = CM_ScalarEpilogueAllowed; 5033 return computeFeasibleMaxVF(TC, UserVF, false); 5034 } 5035 return FixedScalableVFPair::getNone(); 5036 } 5037 5038 // Now try the tail folding 5039 5040 // Invalidate interleave groups that require an epilogue if we can't mask 5041 // the interleave-group. 5042 if (!useMaskedInterleavedAccesses(TTI)) { 5043 assert(WideningDecisions.empty() && Uniforms.empty() && Scalars.empty() && 5044 "No decisions should have been taken at this point"); 5045 // Note: There is no need to invalidate any cost modeling decisions here, as 5046 // non where taken so far. 5047 InterleaveInfo.invalidateGroupsRequiringScalarEpilogue(); 5048 } 5049 5050 FixedScalableVFPair MaxFactors = computeFeasibleMaxVF(TC, UserVF, true); 5051 // Avoid tail folding if the trip count is known to be a multiple of any VF 5052 // we chose. 5053 // FIXME: The condition below pessimises the case for fixed-width vectors, 5054 // when scalable VFs are also candidates for vectorization. 5055 if (MaxFactors.FixedVF.isVector() && !MaxFactors.ScalableVF) { 5056 ElementCount MaxFixedVF = MaxFactors.FixedVF; 5057 assert((UserVF.isNonZero() || isPowerOf2_32(MaxFixedVF.getFixedValue())) && 5058 "MaxFixedVF must be a power of 2"); 5059 unsigned MaxVFtimesIC = UserIC ? MaxFixedVF.getFixedValue() * UserIC 5060 : MaxFixedVF.getFixedValue(); 5061 ScalarEvolution *SE = PSE.getSE(); 5062 const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount(); 5063 const SCEV *ExitCount = SE->getAddExpr( 5064 BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType())); 5065 const SCEV *Rem = SE->getURemExpr( 5066 SE->applyLoopGuards(ExitCount, TheLoop), 5067 SE->getConstant(BackedgeTakenCount->getType(), MaxVFtimesIC)); 5068 if (Rem->isZero()) { 5069 // Accept MaxFixedVF if we do not have a tail. 5070 LLVM_DEBUG(dbgs() << "LV: No tail will remain for any chosen VF.\n"); 5071 return MaxFactors; 5072 } 5073 } 5074 5075 // If we don't know the precise trip count, or if the trip count that we 5076 // found modulo the vectorization factor is not zero, try to fold the tail 5077 // by masking. 5078 // FIXME: look for a smaller MaxVF that does divide TC rather than masking. 5079 if (Legal->prepareToFoldTailByMasking()) { 5080 FoldTailByMasking = true; 5081 return MaxFactors; 5082 } 5083 5084 // If there was a tail-folding hint/switch, but we can't fold the tail by 5085 // masking, fallback to a vectorization with a scalar epilogue. 5086 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotNeededUsePredicate) { 5087 LLVM_DEBUG(dbgs() << "LV: Cannot fold tail by masking: vectorize with a " 5088 "scalar epilogue instead.\n"); 5089 ScalarEpilogueStatus = CM_ScalarEpilogueAllowed; 5090 return MaxFactors; 5091 } 5092 5093 if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedUsePredicate) { 5094 LLVM_DEBUG(dbgs() << "LV: Can't fold tail by masking: don't vectorize\n"); 5095 return FixedScalableVFPair::getNone(); 5096 } 5097 5098 if (TC == 0) { 5099 reportVectorizationFailure( 5100 "Unable to calculate the loop count due to complex control flow", 5101 "unable to calculate the loop count due to complex control flow", 5102 "UnknownLoopCountComplexCFG", ORE, TheLoop); 5103 return FixedScalableVFPair::getNone(); 5104 } 5105 5106 reportVectorizationFailure( 5107 "Cannot optimize for size and vectorize at the same time.", 5108 "cannot optimize for size and vectorize at the same time. " 5109 "Enable vectorization of this loop with '#pragma clang loop " 5110 "vectorize(enable)' when compiling with -Os/-Oz", 5111 "NoTailLoopWithOptForSize", ORE, TheLoop); 5112 return FixedScalableVFPair::getNone(); 5113 } 5114 5115 ElementCount LoopVectorizationCostModel::getMaximizedVFForTarget( 5116 unsigned ConstTripCount, unsigned SmallestType, unsigned WidestType, 5117 ElementCount MaxSafeVF, bool FoldTailByMasking) { 5118 bool ComputeScalableMaxVF = MaxSafeVF.isScalable(); 5119 TypeSize WidestRegister = TTI.getRegisterBitWidth( 5120 ComputeScalableMaxVF ? TargetTransformInfo::RGK_ScalableVector 5121 : TargetTransformInfo::RGK_FixedWidthVector); 5122 5123 // Convenience function to return the minimum of two ElementCounts. 5124 auto MinVF = [](const ElementCount &LHS, const ElementCount &RHS) { 5125 assert((LHS.isScalable() == RHS.isScalable()) && 5126 "Scalable flags must match"); 5127 return ElementCount::isKnownLT(LHS, RHS) ? LHS : RHS; 5128 }; 5129 5130 // Ensure MaxVF is a power of 2; the dependence distance bound may not be. 5131 // Note that both WidestRegister and WidestType may not be a powers of 2. 5132 auto MaxVectorElementCount = ElementCount::get( 5133 PowerOf2Floor(WidestRegister.getKnownMinSize() / WidestType), 5134 ComputeScalableMaxVF); 5135 MaxVectorElementCount = MinVF(MaxVectorElementCount, MaxSafeVF); 5136 LLVM_DEBUG(dbgs() << "LV: The Widest register safe to use is: " 5137 << (MaxVectorElementCount * WidestType) << " bits.\n"); 5138 5139 if (!MaxVectorElementCount) { 5140 LLVM_DEBUG(dbgs() << "LV: The target has no " 5141 << (ComputeScalableMaxVF ? "scalable" : "fixed") 5142 << " vector registers.\n"); 5143 return ElementCount::getFixed(1); 5144 } 5145 5146 const auto TripCountEC = ElementCount::getFixed(ConstTripCount); 5147 if (ConstTripCount && 5148 ElementCount::isKnownLE(TripCountEC, MaxVectorElementCount) && 5149 (!FoldTailByMasking || isPowerOf2_32(ConstTripCount))) { 5150 // If loop trip count (TC) is known at compile time there is no point in 5151 // choosing VF greater than TC (as done in the loop below). Select maximum 5152 // power of two which doesn't exceed TC. 5153 // If MaxVectorElementCount is scalable, we only fall back on a fixed VF 5154 // when the TC is less than or equal to the known number of lanes. 5155 auto ClampedConstTripCount = PowerOf2Floor(ConstTripCount); 5156 LLVM_DEBUG(dbgs() << "LV: Clamping the MaxVF to maximum power of two not " 5157 "exceeding the constant trip count: " 5158 << ClampedConstTripCount << "\n"); 5159 return ElementCount::getFixed(ClampedConstTripCount); 5160 } 5161 5162 TargetTransformInfo::RegisterKind RegKind = 5163 ComputeScalableMaxVF ? TargetTransformInfo::RGK_ScalableVector 5164 : TargetTransformInfo::RGK_FixedWidthVector; 5165 ElementCount MaxVF = MaxVectorElementCount; 5166 if (MaximizeBandwidth || (MaximizeBandwidth.getNumOccurrences() == 0 && 5167 TTI.shouldMaximizeVectorBandwidth(RegKind))) { 5168 auto MaxVectorElementCountMaxBW = ElementCount::get( 5169 PowerOf2Floor(WidestRegister.getKnownMinSize() / SmallestType), 5170 ComputeScalableMaxVF); 5171 MaxVectorElementCountMaxBW = MinVF(MaxVectorElementCountMaxBW, MaxSafeVF); 5172 5173 // Collect all viable vectorization factors larger than the default MaxVF 5174 // (i.e. MaxVectorElementCount). 5175 SmallVector<ElementCount, 8> VFs; 5176 for (ElementCount VS = MaxVectorElementCount * 2; 5177 ElementCount::isKnownLE(VS, MaxVectorElementCountMaxBW); VS *= 2) 5178 VFs.push_back(VS); 5179 5180 // For each VF calculate its register usage. 5181 auto RUs = calculateRegisterUsage(VFs); 5182 5183 // Select the largest VF which doesn't require more registers than existing 5184 // ones. 5185 for (int i = RUs.size() - 1; i >= 0; --i) { 5186 bool Selected = true; 5187 for (auto &pair : RUs[i].MaxLocalUsers) { 5188 unsigned TargetNumRegisters = TTI.getNumberOfRegisters(pair.first); 5189 if (pair.second > TargetNumRegisters) 5190 Selected = false; 5191 } 5192 if (Selected) { 5193 MaxVF = VFs[i]; 5194 break; 5195 } 5196 } 5197 if (ElementCount MinVF = 5198 TTI.getMinimumVF(SmallestType, ComputeScalableMaxVF)) { 5199 if (ElementCount::isKnownLT(MaxVF, MinVF)) { 5200 LLVM_DEBUG(dbgs() << "LV: Overriding calculated MaxVF(" << MaxVF 5201 << ") with target's minimum: " << MinVF << '\n'); 5202 MaxVF = MinVF; 5203 } 5204 } 5205 5206 // Invalidate any widening decisions we might have made, in case the loop 5207 // requires prediction (decided later), but we have already made some 5208 // load/store widening decisions. 5209 invalidateCostModelingDecisions(); 5210 } 5211 return MaxVF; 5212 } 5213 5214 Optional<unsigned> LoopVectorizationCostModel::getVScaleForTuning() const { 5215 if (TheFunction->hasFnAttribute(Attribute::VScaleRange)) { 5216 auto Attr = TheFunction->getFnAttribute(Attribute::VScaleRange); 5217 auto Min = Attr.getVScaleRangeMin(); 5218 auto Max = Attr.getVScaleRangeMax(); 5219 if (Max && Min == Max) 5220 return Max; 5221 } 5222 5223 return TTI.getVScaleForTuning(); 5224 } 5225 5226 bool LoopVectorizationCostModel::isMoreProfitable( 5227 const VectorizationFactor &A, const VectorizationFactor &B) const { 5228 InstructionCost CostA = A.Cost; 5229 InstructionCost CostB = B.Cost; 5230 5231 unsigned MaxTripCount = PSE.getSE()->getSmallConstantMaxTripCount(TheLoop); 5232 5233 if (!A.Width.isScalable() && !B.Width.isScalable() && FoldTailByMasking && 5234 MaxTripCount) { 5235 // If we are folding the tail and the trip count is a known (possibly small) 5236 // constant, the trip count will be rounded up to an integer number of 5237 // iterations. The total cost will be PerIterationCost*ceil(TripCount/VF), 5238 // which we compare directly. When not folding the tail, the total cost will 5239 // be PerIterationCost*floor(TC/VF) + Scalar remainder cost, and so is 5240 // approximated with the per-lane cost below instead of using the tripcount 5241 // as here. 5242 auto RTCostA = CostA * divideCeil(MaxTripCount, A.Width.getFixedValue()); 5243 auto RTCostB = CostB * divideCeil(MaxTripCount, B.Width.getFixedValue()); 5244 return RTCostA < RTCostB; 5245 } 5246 5247 // Improve estimate for the vector width if it is scalable. 5248 unsigned EstimatedWidthA = A.Width.getKnownMinValue(); 5249 unsigned EstimatedWidthB = B.Width.getKnownMinValue(); 5250 if (Optional<unsigned> VScale = getVScaleForTuning()) { 5251 if (A.Width.isScalable()) 5252 EstimatedWidthA *= VScale.value(); 5253 if (B.Width.isScalable()) 5254 EstimatedWidthB *= VScale.value(); 5255 } 5256 5257 // Assume vscale may be larger than 1 (or the value being tuned for), 5258 // so that scalable vectorization is slightly favorable over fixed-width 5259 // vectorization. 5260 if (A.Width.isScalable() && !B.Width.isScalable()) 5261 return (CostA * B.Width.getFixedValue()) <= (CostB * EstimatedWidthA); 5262 5263 // To avoid the need for FP division: 5264 // (CostA / A.Width) < (CostB / B.Width) 5265 // <=> (CostA * B.Width) < (CostB * A.Width) 5266 return (CostA * EstimatedWidthB) < (CostB * EstimatedWidthA); 5267 } 5268 5269 VectorizationFactor LoopVectorizationCostModel::selectVectorizationFactor( 5270 const ElementCountSet &VFCandidates) { 5271 InstructionCost ExpectedCost = expectedCost(ElementCount::getFixed(1)).first; 5272 LLVM_DEBUG(dbgs() << "LV: Scalar loop costs: " << ExpectedCost << ".\n"); 5273 assert(ExpectedCost.isValid() && "Unexpected invalid cost for scalar loop"); 5274 assert(VFCandidates.count(ElementCount::getFixed(1)) && 5275 "Expected Scalar VF to be a candidate"); 5276 5277 const VectorizationFactor ScalarCost(ElementCount::getFixed(1), ExpectedCost, 5278 ExpectedCost); 5279 VectorizationFactor ChosenFactor = ScalarCost; 5280 5281 bool ForceVectorization = Hints->getForce() == LoopVectorizeHints::FK_Enabled; 5282 if (ForceVectorization && VFCandidates.size() > 1) { 5283 // Ignore scalar width, because the user explicitly wants vectorization. 5284 // Initialize cost to max so that VF = 2 is, at least, chosen during cost 5285 // evaluation. 5286 ChosenFactor.Cost = InstructionCost::getMax(); 5287 } 5288 5289 SmallVector<InstructionVFPair> InvalidCosts; 5290 for (const auto &i : VFCandidates) { 5291 // The cost for scalar VF=1 is already calculated, so ignore it. 5292 if (i.isScalar()) 5293 continue; 5294 5295 VectorizationCostTy C = expectedCost(i, &InvalidCosts); 5296 VectorizationFactor Candidate(i, C.first, ScalarCost.ScalarCost); 5297 5298 #ifndef NDEBUG 5299 unsigned AssumedMinimumVscale = 1; 5300 if (Optional<unsigned> VScale = getVScaleForTuning()) 5301 AssumedMinimumVscale = *VScale; 5302 unsigned Width = 5303 Candidate.Width.isScalable() 5304 ? Candidate.Width.getKnownMinValue() * AssumedMinimumVscale 5305 : Candidate.Width.getFixedValue(); 5306 LLVM_DEBUG(dbgs() << "LV: Vector loop of width " << i 5307 << " costs: " << (Candidate.Cost / Width)); 5308 if (i.isScalable()) 5309 LLVM_DEBUG(dbgs() << " (assuming a minimum vscale of " 5310 << AssumedMinimumVscale << ")"); 5311 LLVM_DEBUG(dbgs() << ".\n"); 5312 #endif 5313 5314 if (!C.second && !ForceVectorization) { 5315 LLVM_DEBUG( 5316 dbgs() << "LV: Not considering vector loop of width " << i 5317 << " because it will not generate any vector instructions.\n"); 5318 continue; 5319 } 5320 5321 // If profitable add it to ProfitableVF list. 5322 if (isMoreProfitable(Candidate, ScalarCost)) 5323 ProfitableVFs.push_back(Candidate); 5324 5325 if (isMoreProfitable(Candidate, ChosenFactor)) 5326 ChosenFactor = Candidate; 5327 } 5328 5329 // Emit a report of VFs with invalid costs in the loop. 5330 if (!InvalidCosts.empty()) { 5331 // Group the remarks per instruction, keeping the instruction order from 5332 // InvalidCosts. 5333 std::map<Instruction *, unsigned> Numbering; 5334 unsigned I = 0; 5335 for (auto &Pair : InvalidCosts) 5336 if (!Numbering.count(Pair.first)) 5337 Numbering[Pair.first] = I++; 5338 5339 // Sort the list, first on instruction(number) then on VF. 5340 llvm::sort(InvalidCosts, 5341 [&Numbering](InstructionVFPair &A, InstructionVFPair &B) { 5342 if (Numbering[A.first] != Numbering[B.first]) 5343 return Numbering[A.first] < Numbering[B.first]; 5344 ElementCountComparator ECC; 5345 return ECC(A.second, B.second); 5346 }); 5347 5348 // For a list of ordered instruction-vf pairs: 5349 // [(load, vf1), (load, vf2), (store, vf1)] 5350 // Group the instructions together to emit separate remarks for: 5351 // load (vf1, vf2) 5352 // store (vf1) 5353 auto Tail = ArrayRef<InstructionVFPair>(InvalidCosts); 5354 auto Subset = ArrayRef<InstructionVFPair>(); 5355 do { 5356 if (Subset.empty()) 5357 Subset = Tail.take_front(1); 5358 5359 Instruction *I = Subset.front().first; 5360 5361 // If the next instruction is different, or if there are no other pairs, 5362 // emit a remark for the collated subset. e.g. 5363 // [(load, vf1), (load, vf2))] 5364 // to emit: 5365 // remark: invalid costs for 'load' at VF=(vf, vf2) 5366 if (Subset == Tail || Tail[Subset.size()].first != I) { 5367 std::string OutString; 5368 raw_string_ostream OS(OutString); 5369 assert(!Subset.empty() && "Unexpected empty range"); 5370 OS << "Instruction with invalid costs prevented vectorization at VF=("; 5371 for (auto &Pair : Subset) 5372 OS << (Pair.second == Subset.front().second ? "" : ", ") 5373 << Pair.second; 5374 OS << "):"; 5375 if (auto *CI = dyn_cast<CallInst>(I)) 5376 OS << " call to " << CI->getCalledFunction()->getName(); 5377 else 5378 OS << " " << I->getOpcodeName(); 5379 OS.flush(); 5380 reportVectorizationInfo(OutString, "InvalidCost", ORE, TheLoop, I); 5381 Tail = Tail.drop_front(Subset.size()); 5382 Subset = {}; 5383 } else 5384 // Grow the subset by one element 5385 Subset = Tail.take_front(Subset.size() + 1); 5386 } while (!Tail.empty()); 5387 } 5388 5389 if (!EnableCondStoresVectorization && NumPredStores) { 5390 reportVectorizationFailure("There are conditional stores.", 5391 "store that is conditionally executed prevents vectorization", 5392 "ConditionalStore", ORE, TheLoop); 5393 ChosenFactor = ScalarCost; 5394 } 5395 5396 LLVM_DEBUG(if (ForceVectorization && !ChosenFactor.Width.isScalar() && 5397 !isMoreProfitable(ChosenFactor, ScalarCost)) dbgs() 5398 << "LV: Vectorization seems to be not beneficial, " 5399 << "but was forced by a user.\n"); 5400 LLVM_DEBUG(dbgs() << "LV: Selecting VF: " << ChosenFactor.Width << ".\n"); 5401 return ChosenFactor; 5402 } 5403 5404 bool LoopVectorizationCostModel::isCandidateForEpilogueVectorization( 5405 const Loop &L, ElementCount VF) const { 5406 // Cross iteration phis such as reductions need special handling and are 5407 // currently unsupported. 5408 if (any_of(L.getHeader()->phis(), 5409 [&](PHINode &Phi) { return Legal->isFirstOrderRecurrence(&Phi); })) 5410 return false; 5411 5412 // Phis with uses outside of the loop require special handling and are 5413 // currently unsupported. 5414 for (auto &Entry : Legal->getInductionVars()) { 5415 // Look for uses of the value of the induction at the last iteration. 5416 Value *PostInc = Entry.first->getIncomingValueForBlock(L.getLoopLatch()); 5417 for (User *U : PostInc->users()) 5418 if (!L.contains(cast<Instruction>(U))) 5419 return false; 5420 // Look for uses of penultimate value of the induction. 5421 for (User *U : Entry.first->users()) 5422 if (!L.contains(cast<Instruction>(U))) 5423 return false; 5424 } 5425 5426 // Induction variables that are widened require special handling that is 5427 // currently not supported. 5428 if (any_of(Legal->getInductionVars(), [&](auto &Entry) { 5429 return !(this->isScalarAfterVectorization(Entry.first, VF) || 5430 this->isProfitableToScalarize(Entry.first, VF)); 5431 })) 5432 return false; 5433 5434 // Epilogue vectorization code has not been auditted to ensure it handles 5435 // non-latch exits properly. It may be fine, but it needs auditted and 5436 // tested. 5437 if (L.getExitingBlock() != L.getLoopLatch()) 5438 return false; 5439 5440 return true; 5441 } 5442 5443 bool LoopVectorizationCostModel::isEpilogueVectorizationProfitable( 5444 const ElementCount VF) const { 5445 // FIXME: We need a much better cost-model to take different parameters such 5446 // as register pressure, code size increase and cost of extra branches into 5447 // account. For now we apply a very crude heuristic and only consider loops 5448 // with vectorization factors larger than a certain value. 5449 // We also consider epilogue vectorization unprofitable for targets that don't 5450 // consider interleaving beneficial (eg. MVE). 5451 if (TTI.getMaxInterleaveFactor(VF.getKnownMinValue()) <= 1) 5452 return false; 5453 // FIXME: We should consider changing the threshold for scalable 5454 // vectors to take VScaleForTuning into account. 5455 if (VF.getKnownMinValue() >= EpilogueVectorizationMinVF) 5456 return true; 5457 return false; 5458 } 5459 5460 VectorizationFactor 5461 LoopVectorizationCostModel::selectEpilogueVectorizationFactor( 5462 const ElementCount MainLoopVF, const LoopVectorizationPlanner &LVP) { 5463 VectorizationFactor Result = VectorizationFactor::Disabled(); 5464 if (!EnableEpilogueVectorization) { 5465 LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization is disabled.\n";); 5466 return Result; 5467 } 5468 5469 if (!isScalarEpilogueAllowed()) { 5470 LLVM_DEBUG( 5471 dbgs() << "LEV: Unable to vectorize epilogue because no epilogue is " 5472 "allowed.\n";); 5473 return Result; 5474 } 5475 5476 // Not really a cost consideration, but check for unsupported cases here to 5477 // simplify the logic. 5478 if (!isCandidateForEpilogueVectorization(*TheLoop, MainLoopVF)) { 5479 LLVM_DEBUG( 5480 dbgs() << "LEV: Unable to vectorize epilogue because the loop is " 5481 "not a supported candidate.\n";); 5482 return Result; 5483 } 5484 5485 if (EpilogueVectorizationForceVF > 1) { 5486 LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization factor is forced.\n";); 5487 ElementCount ForcedEC = ElementCount::getFixed(EpilogueVectorizationForceVF); 5488 if (LVP.hasPlanWithVF(ForcedEC)) 5489 return {ForcedEC, 0, 0}; 5490 else { 5491 LLVM_DEBUG( 5492 dbgs() 5493 << "LEV: Epilogue vectorization forced factor is not viable.\n";); 5494 return Result; 5495 } 5496 } 5497 5498 if (TheLoop->getHeader()->getParent()->hasOptSize() || 5499 TheLoop->getHeader()->getParent()->hasMinSize()) { 5500 LLVM_DEBUG( 5501 dbgs() 5502 << "LEV: Epilogue vectorization skipped due to opt for size.\n";); 5503 return Result; 5504 } 5505 5506 if (!isEpilogueVectorizationProfitable(MainLoopVF)) { 5507 LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization is not profitable for " 5508 "this loop\n"); 5509 return Result; 5510 } 5511 5512 // If MainLoopVF = vscale x 2, and vscale is expected to be 4, then we know 5513 // the main loop handles 8 lanes per iteration. We could still benefit from 5514 // vectorizing the epilogue loop with VF=4. 5515 ElementCount EstimatedRuntimeVF = MainLoopVF; 5516 if (MainLoopVF.isScalable()) { 5517 EstimatedRuntimeVF = ElementCount::getFixed(MainLoopVF.getKnownMinValue()); 5518 if (Optional<unsigned> VScale = getVScaleForTuning()) 5519 EstimatedRuntimeVF *= *VScale; 5520 } 5521 5522 for (auto &NextVF : ProfitableVFs) 5523 if (((!NextVF.Width.isScalable() && MainLoopVF.isScalable() && 5524 ElementCount::isKnownLT(NextVF.Width, EstimatedRuntimeVF)) || 5525 ElementCount::isKnownLT(NextVF.Width, MainLoopVF)) && 5526 (Result.Width.isScalar() || isMoreProfitable(NextVF, Result)) && 5527 LVP.hasPlanWithVF(NextVF.Width)) 5528 Result = NextVF; 5529 5530 if (Result != VectorizationFactor::Disabled()) 5531 LLVM_DEBUG(dbgs() << "LEV: Vectorizing epilogue loop with VF = " 5532 << Result.Width << "\n";); 5533 return Result; 5534 } 5535 5536 std::pair<unsigned, unsigned> 5537 LoopVectorizationCostModel::getSmallestAndWidestTypes() { 5538 unsigned MinWidth = -1U; 5539 unsigned MaxWidth = 8; 5540 const DataLayout &DL = TheFunction->getParent()->getDataLayout(); 5541 // For in-loop reductions, no element types are added to ElementTypesInLoop 5542 // if there are no loads/stores in the loop. In this case, check through the 5543 // reduction variables to determine the maximum width. 5544 if (ElementTypesInLoop.empty() && !Legal->getReductionVars().empty()) { 5545 // Reset MaxWidth so that we can find the smallest type used by recurrences 5546 // in the loop. 5547 MaxWidth = -1U; 5548 for (auto &PhiDescriptorPair : Legal->getReductionVars()) { 5549 const RecurrenceDescriptor &RdxDesc = PhiDescriptorPair.second; 5550 // When finding the min width used by the recurrence we need to account 5551 // for casts on the input operands of the recurrence. 5552 MaxWidth = std::min<unsigned>( 5553 MaxWidth, std::min<unsigned>( 5554 RdxDesc.getMinWidthCastToRecurrenceTypeInBits(), 5555 RdxDesc.getRecurrenceType()->getScalarSizeInBits())); 5556 } 5557 } else { 5558 for (Type *T : ElementTypesInLoop) { 5559 MinWidth = std::min<unsigned>( 5560 MinWidth, DL.getTypeSizeInBits(T->getScalarType()).getFixedSize()); 5561 MaxWidth = std::max<unsigned>( 5562 MaxWidth, DL.getTypeSizeInBits(T->getScalarType()).getFixedSize()); 5563 } 5564 } 5565 return {MinWidth, MaxWidth}; 5566 } 5567 5568 void LoopVectorizationCostModel::collectElementTypesForWidening() { 5569 ElementTypesInLoop.clear(); 5570 // For each block. 5571 for (BasicBlock *BB : TheLoop->blocks()) { 5572 // For each instruction in the loop. 5573 for (Instruction &I : BB->instructionsWithoutDebug()) { 5574 Type *T = I.getType(); 5575 5576 // Skip ignored values. 5577 if (ValuesToIgnore.count(&I)) 5578 continue; 5579 5580 // Only examine Loads, Stores and PHINodes. 5581 if (!isa<LoadInst>(I) && !isa<StoreInst>(I) && !isa<PHINode>(I)) 5582 continue; 5583 5584 // Examine PHI nodes that are reduction variables. Update the type to 5585 // account for the recurrence type. 5586 if (auto *PN = dyn_cast<PHINode>(&I)) { 5587 if (!Legal->isReductionVariable(PN)) 5588 continue; 5589 const RecurrenceDescriptor &RdxDesc = 5590 Legal->getReductionVars().find(PN)->second; 5591 if (PreferInLoopReductions || useOrderedReductions(RdxDesc) || 5592 TTI.preferInLoopReduction(RdxDesc.getOpcode(), 5593 RdxDesc.getRecurrenceType(), 5594 TargetTransformInfo::ReductionFlags())) 5595 continue; 5596 T = RdxDesc.getRecurrenceType(); 5597 } 5598 5599 // Examine the stored values. 5600 if (auto *ST = dyn_cast<StoreInst>(&I)) 5601 T = ST->getValueOperand()->getType(); 5602 5603 assert(T->isSized() && 5604 "Expected the load/store/recurrence type to be sized"); 5605 5606 ElementTypesInLoop.insert(T); 5607 } 5608 } 5609 } 5610 5611 unsigned LoopVectorizationCostModel::selectInterleaveCount(ElementCount VF, 5612 unsigned LoopCost) { 5613 // -- The interleave heuristics -- 5614 // We interleave the loop in order to expose ILP and reduce the loop overhead. 5615 // There are many micro-architectural considerations that we can't predict 5616 // at this level. For example, frontend pressure (on decode or fetch) due to 5617 // code size, or the number and capabilities of the execution ports. 5618 // 5619 // We use the following heuristics to select the interleave count: 5620 // 1. If the code has reductions, then we interleave to break the cross 5621 // iteration dependency. 5622 // 2. If the loop is really small, then we interleave to reduce the loop 5623 // overhead. 5624 // 3. We don't interleave if we think that we will spill registers to memory 5625 // due to the increased register pressure. 5626 5627 if (!isScalarEpilogueAllowed()) 5628 return 1; 5629 5630 // We used the distance for the interleave count. 5631 if (Legal->getMaxSafeDepDistBytes() != -1U) 5632 return 1; 5633 5634 auto BestKnownTC = getSmallBestKnownTC(*PSE.getSE(), TheLoop); 5635 const bool HasReductions = !Legal->getReductionVars().empty(); 5636 // Do not interleave loops with a relatively small known or estimated trip 5637 // count. But we will interleave when InterleaveSmallLoopScalarReduction is 5638 // enabled, and the code has scalar reductions(HasReductions && VF = 1), 5639 // because with the above conditions interleaving can expose ILP and break 5640 // cross iteration dependences for reductions. 5641 if (BestKnownTC && (*BestKnownTC < TinyTripCountInterleaveThreshold) && 5642 !(InterleaveSmallLoopScalarReduction && HasReductions && VF.isScalar())) 5643 return 1; 5644 5645 // If we did not calculate the cost for VF (because the user selected the VF) 5646 // then we calculate the cost of VF here. 5647 if (LoopCost == 0) { 5648 InstructionCost C = expectedCost(VF).first; 5649 assert(C.isValid() && "Expected to have chosen a VF with valid cost"); 5650 LoopCost = *C.getValue(); 5651 5652 // Loop body is free and there is no need for interleaving. 5653 if (LoopCost == 0) 5654 return 1; 5655 } 5656 5657 RegisterUsage R = calculateRegisterUsage({VF})[0]; 5658 // We divide by these constants so assume that we have at least one 5659 // instruction that uses at least one register. 5660 for (auto& pair : R.MaxLocalUsers) { 5661 pair.second = std::max(pair.second, 1U); 5662 } 5663 5664 // We calculate the interleave count using the following formula. 5665 // Subtract the number of loop invariants from the number of available 5666 // registers. These registers are used by all of the interleaved instances. 5667 // Next, divide the remaining registers by the number of registers that is 5668 // required by the loop, in order to estimate how many parallel instances 5669 // fit without causing spills. All of this is rounded down if necessary to be 5670 // a power of two. We want power of two interleave count to simplify any 5671 // addressing operations or alignment considerations. 5672 // We also want power of two interleave counts to ensure that the induction 5673 // variable of the vector loop wraps to zero, when tail is folded by masking; 5674 // this currently happens when OptForSize, in which case IC is set to 1 above. 5675 unsigned IC = UINT_MAX; 5676 5677 for (auto& pair : R.MaxLocalUsers) { 5678 unsigned TargetNumRegisters = TTI.getNumberOfRegisters(pair.first); 5679 LLVM_DEBUG(dbgs() << "LV: The target has " << TargetNumRegisters 5680 << " registers of " 5681 << TTI.getRegisterClassName(pair.first) << " register class\n"); 5682 if (VF.isScalar()) { 5683 if (ForceTargetNumScalarRegs.getNumOccurrences() > 0) 5684 TargetNumRegisters = ForceTargetNumScalarRegs; 5685 } else { 5686 if (ForceTargetNumVectorRegs.getNumOccurrences() > 0) 5687 TargetNumRegisters = ForceTargetNumVectorRegs; 5688 } 5689 unsigned MaxLocalUsers = pair.second; 5690 unsigned LoopInvariantRegs = 0; 5691 if (R.LoopInvariantRegs.find(pair.first) != R.LoopInvariantRegs.end()) 5692 LoopInvariantRegs = R.LoopInvariantRegs[pair.first]; 5693 5694 unsigned TmpIC = PowerOf2Floor((TargetNumRegisters - LoopInvariantRegs) / MaxLocalUsers); 5695 // Don't count the induction variable as interleaved. 5696 if (EnableIndVarRegisterHeur) { 5697 TmpIC = 5698 PowerOf2Floor((TargetNumRegisters - LoopInvariantRegs - 1) / 5699 std::max(1U, (MaxLocalUsers - 1))); 5700 } 5701 5702 IC = std::min(IC, TmpIC); 5703 } 5704 5705 // Clamp the interleave ranges to reasonable counts. 5706 unsigned MaxInterleaveCount = 5707 TTI.getMaxInterleaveFactor(VF.getKnownMinValue()); 5708 5709 // Check if the user has overridden the max. 5710 if (VF.isScalar()) { 5711 if (ForceTargetMaxScalarInterleaveFactor.getNumOccurrences() > 0) 5712 MaxInterleaveCount = ForceTargetMaxScalarInterleaveFactor; 5713 } else { 5714 if (ForceTargetMaxVectorInterleaveFactor.getNumOccurrences() > 0) 5715 MaxInterleaveCount = ForceTargetMaxVectorInterleaveFactor; 5716 } 5717 5718 // If trip count is known or estimated compile time constant, limit the 5719 // interleave count to be less than the trip count divided by VF, provided it 5720 // is at least 1. 5721 // 5722 // For scalable vectors we can't know if interleaving is beneficial. It may 5723 // not be beneficial for small loops if none of the lanes in the second vector 5724 // iterations is enabled. However, for larger loops, there is likely to be a 5725 // similar benefit as for fixed-width vectors. For now, we choose to leave 5726 // the InterleaveCount as if vscale is '1', although if some information about 5727 // the vector is known (e.g. min vector size), we can make a better decision. 5728 if (BestKnownTC) { 5729 MaxInterleaveCount = 5730 std::min(*BestKnownTC / VF.getKnownMinValue(), MaxInterleaveCount); 5731 // Make sure MaxInterleaveCount is greater than 0. 5732 MaxInterleaveCount = std::max(1u, MaxInterleaveCount); 5733 } 5734 5735 assert(MaxInterleaveCount > 0 && 5736 "Maximum interleave count must be greater than 0"); 5737 5738 // Clamp the calculated IC to be between the 1 and the max interleave count 5739 // that the target and trip count allows. 5740 if (IC > MaxInterleaveCount) 5741 IC = MaxInterleaveCount; 5742 else 5743 // Make sure IC is greater than 0. 5744 IC = std::max(1u, IC); 5745 5746 assert(IC > 0 && "Interleave count must be greater than 0."); 5747 5748 // Interleave if we vectorized this loop and there is a reduction that could 5749 // benefit from interleaving. 5750 if (VF.isVector() && HasReductions) { 5751 LLVM_DEBUG(dbgs() << "LV: Interleaving because of reductions.\n"); 5752 return IC; 5753 } 5754 5755 // For any scalar loop that either requires runtime checks or predication we 5756 // are better off leaving this to the unroller. Note that if we've already 5757 // vectorized the loop we will have done the runtime check and so interleaving 5758 // won't require further checks. 5759 bool ScalarInterleavingRequiresPredication = 5760 (VF.isScalar() && any_of(TheLoop->blocks(), [this](BasicBlock *BB) { 5761 return Legal->blockNeedsPredication(BB); 5762 })); 5763 bool ScalarInterleavingRequiresRuntimePointerCheck = 5764 (VF.isScalar() && Legal->getRuntimePointerChecking()->Need); 5765 5766 // We want to interleave small loops in order to reduce the loop overhead and 5767 // potentially expose ILP opportunities. 5768 LLVM_DEBUG(dbgs() << "LV: Loop cost is " << LoopCost << '\n' 5769 << "LV: IC is " << IC << '\n' 5770 << "LV: VF is " << VF << '\n'); 5771 const bool AggressivelyInterleaveReductions = 5772 TTI.enableAggressiveInterleaving(HasReductions); 5773 if (!ScalarInterleavingRequiresRuntimePointerCheck && 5774 !ScalarInterleavingRequiresPredication && LoopCost < SmallLoopCost) { 5775 // We assume that the cost overhead is 1 and we use the cost model 5776 // to estimate the cost of the loop and interleave until the cost of the 5777 // loop overhead is about 5% of the cost of the loop. 5778 unsigned SmallIC = 5779 std::min(IC, (unsigned)PowerOf2Floor(SmallLoopCost / LoopCost)); 5780 5781 // Interleave until store/load ports (estimated by max interleave count) are 5782 // saturated. 5783 unsigned NumStores = Legal->getNumStores(); 5784 unsigned NumLoads = Legal->getNumLoads(); 5785 unsigned StoresIC = IC / (NumStores ? NumStores : 1); 5786 unsigned LoadsIC = IC / (NumLoads ? NumLoads : 1); 5787 5788 // There is little point in interleaving for reductions containing selects 5789 // and compares when VF=1 since it may just create more overhead than it's 5790 // worth for loops with small trip counts. This is because we still have to 5791 // do the final reduction after the loop. 5792 bool HasSelectCmpReductions = 5793 HasReductions && 5794 any_of(Legal->getReductionVars(), [&](auto &Reduction) -> bool { 5795 const RecurrenceDescriptor &RdxDesc = Reduction.second; 5796 return RecurrenceDescriptor::isSelectCmpRecurrenceKind( 5797 RdxDesc.getRecurrenceKind()); 5798 }); 5799 if (HasSelectCmpReductions) { 5800 LLVM_DEBUG(dbgs() << "LV: Not interleaving select-cmp reductions.\n"); 5801 return 1; 5802 } 5803 5804 // If we have a scalar reduction (vector reductions are already dealt with 5805 // by this point), we can increase the critical path length if the loop 5806 // we're interleaving is inside another loop. For tree-wise reductions 5807 // set the limit to 2, and for ordered reductions it's best to disable 5808 // interleaving entirely. 5809 if (HasReductions && TheLoop->getLoopDepth() > 1) { 5810 bool HasOrderedReductions = 5811 any_of(Legal->getReductionVars(), [&](auto &Reduction) -> bool { 5812 const RecurrenceDescriptor &RdxDesc = Reduction.second; 5813 return RdxDesc.isOrdered(); 5814 }); 5815 if (HasOrderedReductions) { 5816 LLVM_DEBUG( 5817 dbgs() << "LV: Not interleaving scalar ordered reductions.\n"); 5818 return 1; 5819 } 5820 5821 unsigned F = static_cast<unsigned>(MaxNestedScalarReductionIC); 5822 SmallIC = std::min(SmallIC, F); 5823 StoresIC = std::min(StoresIC, F); 5824 LoadsIC = std::min(LoadsIC, F); 5825 } 5826 5827 if (EnableLoadStoreRuntimeInterleave && 5828 std::max(StoresIC, LoadsIC) > SmallIC) { 5829 LLVM_DEBUG( 5830 dbgs() << "LV: Interleaving to saturate store or load ports.\n"); 5831 return std::max(StoresIC, LoadsIC); 5832 } 5833 5834 // If there are scalar reductions and TTI has enabled aggressive 5835 // interleaving for reductions, we will interleave to expose ILP. 5836 if (InterleaveSmallLoopScalarReduction && VF.isScalar() && 5837 AggressivelyInterleaveReductions) { 5838 LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n"); 5839 // Interleave no less than SmallIC but not as aggressive as the normal IC 5840 // to satisfy the rare situation when resources are too limited. 5841 return std::max(IC / 2, SmallIC); 5842 } else { 5843 LLVM_DEBUG(dbgs() << "LV: Interleaving to reduce branch cost.\n"); 5844 return SmallIC; 5845 } 5846 } 5847 5848 // Interleave if this is a large loop (small loops are already dealt with by 5849 // this point) that could benefit from interleaving. 5850 if (AggressivelyInterleaveReductions) { 5851 LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n"); 5852 return IC; 5853 } 5854 5855 LLVM_DEBUG(dbgs() << "LV: Not Interleaving.\n"); 5856 return 1; 5857 } 5858 5859 SmallVector<LoopVectorizationCostModel::RegisterUsage, 8> 5860 LoopVectorizationCostModel::calculateRegisterUsage(ArrayRef<ElementCount> VFs) { 5861 // This function calculates the register usage by measuring the highest number 5862 // of values that are alive at a single location. Obviously, this is a very 5863 // rough estimation. We scan the loop in a topological order in order and 5864 // assign a number to each instruction. We use RPO to ensure that defs are 5865 // met before their users. We assume that each instruction that has in-loop 5866 // users starts an interval. We record every time that an in-loop value is 5867 // used, so we have a list of the first and last occurrences of each 5868 // instruction. Next, we transpose this data structure into a multi map that 5869 // holds the list of intervals that *end* at a specific location. This multi 5870 // map allows us to perform a linear search. We scan the instructions linearly 5871 // and record each time that a new interval starts, by placing it in a set. 5872 // If we find this value in the multi-map then we remove it from the set. 5873 // The max register usage is the maximum size of the set. 5874 // We also search for instructions that are defined outside the loop, but are 5875 // used inside the loop. We need this number separately from the max-interval 5876 // usage number because when we unroll, loop-invariant values do not take 5877 // more register. 5878 LoopBlocksDFS DFS(TheLoop); 5879 DFS.perform(LI); 5880 5881 RegisterUsage RU; 5882 5883 // Each 'key' in the map opens a new interval. The values 5884 // of the map are the index of the 'last seen' usage of the 5885 // instruction that is the key. 5886 using IntervalMap = DenseMap<Instruction *, unsigned>; 5887 5888 // Maps instruction to its index. 5889 SmallVector<Instruction *, 64> IdxToInstr; 5890 // Marks the end of each interval. 5891 IntervalMap EndPoint; 5892 // Saves the list of instruction indices that are used in the loop. 5893 SmallPtrSet<Instruction *, 8> Ends; 5894 // Saves the list of values that are used in the loop but are 5895 // defined outside the loop, such as arguments and constants. 5896 SmallPtrSet<Value *, 8> LoopInvariants; 5897 5898 for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) { 5899 for (Instruction &I : BB->instructionsWithoutDebug()) { 5900 IdxToInstr.push_back(&I); 5901 5902 // Save the end location of each USE. 5903 for (Value *U : I.operands()) { 5904 auto *Instr = dyn_cast<Instruction>(U); 5905 5906 // Ignore non-instruction values such as arguments, constants, etc. 5907 if (!Instr) 5908 continue; 5909 5910 // If this instruction is outside the loop then record it and continue. 5911 if (!TheLoop->contains(Instr)) { 5912 LoopInvariants.insert(Instr); 5913 continue; 5914 } 5915 5916 // Overwrite previous end points. 5917 EndPoint[Instr] = IdxToInstr.size(); 5918 Ends.insert(Instr); 5919 } 5920 } 5921 } 5922 5923 // Saves the list of intervals that end with the index in 'key'. 5924 using InstrList = SmallVector<Instruction *, 2>; 5925 DenseMap<unsigned, InstrList> TransposeEnds; 5926 5927 // Transpose the EndPoints to a list of values that end at each index. 5928 for (auto &Interval : EndPoint) 5929 TransposeEnds[Interval.second].push_back(Interval.first); 5930 5931 SmallPtrSet<Instruction *, 8> OpenIntervals; 5932 SmallVector<RegisterUsage, 8> RUs(VFs.size()); 5933 SmallVector<SmallMapVector<unsigned, unsigned, 4>, 8> MaxUsages(VFs.size()); 5934 5935 LLVM_DEBUG(dbgs() << "LV(REG): Calculating max register usage:\n"); 5936 5937 const auto &TTICapture = TTI; 5938 auto GetRegUsage = [&TTICapture](Type *Ty, ElementCount VF) -> unsigned { 5939 if (Ty->isTokenTy() || !VectorType::isValidElementType(Ty)) 5940 return 0; 5941 return TTICapture.getRegUsageForType(VectorType::get(Ty, VF)); 5942 }; 5943 5944 for (unsigned int i = 0, s = IdxToInstr.size(); i < s; ++i) { 5945 Instruction *I = IdxToInstr[i]; 5946 5947 // Remove all of the instructions that end at this location. 5948 InstrList &List = TransposeEnds[i]; 5949 for (Instruction *ToRemove : List) 5950 OpenIntervals.erase(ToRemove); 5951 5952 // Ignore instructions that are never used within the loop. 5953 if (!Ends.count(I)) 5954 continue; 5955 5956 // Skip ignored values. 5957 if (ValuesToIgnore.count(I)) 5958 continue; 5959 5960 // For each VF find the maximum usage of registers. 5961 for (unsigned j = 0, e = VFs.size(); j < e; ++j) { 5962 // Count the number of live intervals. 5963 SmallMapVector<unsigned, unsigned, 4> RegUsage; 5964 5965 if (VFs[j].isScalar()) { 5966 for (auto Inst : OpenIntervals) { 5967 unsigned ClassID = TTI.getRegisterClassForType(false, Inst->getType()); 5968 if (RegUsage.find(ClassID) == RegUsage.end()) 5969 RegUsage[ClassID] = 1; 5970 else 5971 RegUsage[ClassID] += 1; 5972 } 5973 } else { 5974 collectUniformsAndScalars(VFs[j]); 5975 for (auto Inst : OpenIntervals) { 5976 // Skip ignored values for VF > 1. 5977 if (VecValuesToIgnore.count(Inst)) 5978 continue; 5979 if (isScalarAfterVectorization(Inst, VFs[j])) { 5980 unsigned ClassID = TTI.getRegisterClassForType(false, Inst->getType()); 5981 if (RegUsage.find(ClassID) == RegUsage.end()) 5982 RegUsage[ClassID] = 1; 5983 else 5984 RegUsage[ClassID] += 1; 5985 } else { 5986 unsigned ClassID = TTI.getRegisterClassForType(true, Inst->getType()); 5987 if (RegUsage.find(ClassID) == RegUsage.end()) 5988 RegUsage[ClassID] = GetRegUsage(Inst->getType(), VFs[j]); 5989 else 5990 RegUsage[ClassID] += GetRegUsage(Inst->getType(), VFs[j]); 5991 } 5992 } 5993 } 5994 5995 for (auto& pair : RegUsage) { 5996 if (MaxUsages[j].find(pair.first) != MaxUsages[j].end()) 5997 MaxUsages[j][pair.first] = std::max(MaxUsages[j][pair.first], pair.second); 5998 else 5999 MaxUsages[j][pair.first] = pair.second; 6000 } 6001 } 6002 6003 LLVM_DEBUG(dbgs() << "LV(REG): At #" << i << " Interval # " 6004 << OpenIntervals.size() << '\n'); 6005 6006 // Add the current instruction to the list of open intervals. 6007 OpenIntervals.insert(I); 6008 } 6009 6010 for (unsigned i = 0, e = VFs.size(); i < e; ++i) { 6011 SmallMapVector<unsigned, unsigned, 4> Invariant; 6012 6013 for (auto Inst : LoopInvariants) { 6014 unsigned Usage = 6015 VFs[i].isScalar() ? 1 : GetRegUsage(Inst->getType(), VFs[i]); 6016 unsigned ClassID = 6017 TTI.getRegisterClassForType(VFs[i].isVector(), Inst->getType()); 6018 if (Invariant.find(ClassID) == Invariant.end()) 6019 Invariant[ClassID] = Usage; 6020 else 6021 Invariant[ClassID] += Usage; 6022 } 6023 6024 LLVM_DEBUG({ 6025 dbgs() << "LV(REG): VF = " << VFs[i] << '\n'; 6026 dbgs() << "LV(REG): Found max usage: " << MaxUsages[i].size() 6027 << " item\n"; 6028 for (const auto &pair : MaxUsages[i]) { 6029 dbgs() << "LV(REG): RegisterClass: " 6030 << TTI.getRegisterClassName(pair.first) << ", " << pair.second 6031 << " registers\n"; 6032 } 6033 dbgs() << "LV(REG): Found invariant usage: " << Invariant.size() 6034 << " item\n"; 6035 for (const auto &pair : Invariant) { 6036 dbgs() << "LV(REG): RegisterClass: " 6037 << TTI.getRegisterClassName(pair.first) << ", " << pair.second 6038 << " registers\n"; 6039 } 6040 }); 6041 6042 RU.LoopInvariantRegs = Invariant; 6043 RU.MaxLocalUsers = MaxUsages[i]; 6044 RUs[i] = RU; 6045 } 6046 6047 return RUs; 6048 } 6049 6050 bool LoopVectorizationCostModel::useEmulatedMaskMemRefHack(Instruction *I, 6051 ElementCount VF) { 6052 // TODO: Cost model for emulated masked load/store is completely 6053 // broken. This hack guides the cost model to use an artificially 6054 // high enough value to practically disable vectorization with such 6055 // operations, except where previously deployed legality hack allowed 6056 // using very low cost values. This is to avoid regressions coming simply 6057 // from moving "masked load/store" check from legality to cost model. 6058 // Masked Load/Gather emulation was previously never allowed. 6059 // Limited number of Masked Store/Scatter emulation was allowed. 6060 assert((isPredicatedInst(I, VF) || Legal->isUniformMemOp(*I)) && 6061 "Expecting a scalar emulated instruction"); 6062 return isa<LoadInst>(I) || 6063 (isa<StoreInst>(I) && 6064 NumPredStores > NumberOfStoresToPredicate); 6065 } 6066 6067 void LoopVectorizationCostModel::collectInstsToScalarize(ElementCount VF) { 6068 // If we aren't vectorizing the loop, or if we've already collected the 6069 // instructions to scalarize, there's nothing to do. Collection may already 6070 // have occurred if we have a user-selected VF and are now computing the 6071 // expected cost for interleaving. 6072 if (VF.isScalar() || VF.isZero() || 6073 InstsToScalarize.find(VF) != InstsToScalarize.end()) 6074 return; 6075 6076 // Initialize a mapping for VF in InstsToScalalarize. If we find that it's 6077 // not profitable to scalarize any instructions, the presence of VF in the 6078 // map will indicate that we've analyzed it already. 6079 ScalarCostsTy &ScalarCostsVF = InstsToScalarize[VF]; 6080 6081 PredicatedBBsAfterVectorization[VF].clear(); 6082 6083 // Find all the instructions that are scalar with predication in the loop and 6084 // determine if it would be better to not if-convert the blocks they are in. 6085 // If so, we also record the instructions to scalarize. 6086 for (BasicBlock *BB : TheLoop->blocks()) { 6087 if (!blockNeedsPredicationForAnyReason(BB)) 6088 continue; 6089 for (Instruction &I : *BB) 6090 if (isScalarWithPredication(&I, VF)) { 6091 ScalarCostsTy ScalarCosts; 6092 // Do not apply discount if scalable, because that would lead to 6093 // invalid scalarization costs. 6094 // Do not apply discount logic if hacked cost is needed 6095 // for emulated masked memrefs. 6096 if (!VF.isScalable() && !useEmulatedMaskMemRefHack(&I, VF) && 6097 computePredInstDiscount(&I, ScalarCosts, VF) >= 0) 6098 ScalarCostsVF.insert(ScalarCosts.begin(), ScalarCosts.end()); 6099 // Remember that BB will remain after vectorization. 6100 PredicatedBBsAfterVectorization[VF].insert(BB); 6101 } 6102 } 6103 } 6104 6105 int LoopVectorizationCostModel::computePredInstDiscount( 6106 Instruction *PredInst, ScalarCostsTy &ScalarCosts, ElementCount VF) { 6107 assert(!isUniformAfterVectorization(PredInst, VF) && 6108 "Instruction marked uniform-after-vectorization will be predicated"); 6109 6110 // Initialize the discount to zero, meaning that the scalar version and the 6111 // vector version cost the same. 6112 InstructionCost Discount = 0; 6113 6114 // Holds instructions to analyze. The instructions we visit are mapped in 6115 // ScalarCosts. Those instructions are the ones that would be scalarized if 6116 // we find that the scalar version costs less. 6117 SmallVector<Instruction *, 8> Worklist; 6118 6119 // Returns true if the given instruction can be scalarized. 6120 auto canBeScalarized = [&](Instruction *I) -> bool { 6121 // We only attempt to scalarize instructions forming a single-use chain 6122 // from the original predicated block that would otherwise be vectorized. 6123 // Although not strictly necessary, we give up on instructions we know will 6124 // already be scalar to avoid traversing chains that are unlikely to be 6125 // beneficial. 6126 if (!I->hasOneUse() || PredInst->getParent() != I->getParent() || 6127 isScalarAfterVectorization(I, VF)) 6128 return false; 6129 6130 // If the instruction is scalar with predication, it will be analyzed 6131 // separately. We ignore it within the context of PredInst. 6132 if (isScalarWithPredication(I, VF)) 6133 return false; 6134 6135 // If any of the instruction's operands are uniform after vectorization, 6136 // the instruction cannot be scalarized. This prevents, for example, a 6137 // masked load from being scalarized. 6138 // 6139 // We assume we will only emit a value for lane zero of an instruction 6140 // marked uniform after vectorization, rather than VF identical values. 6141 // Thus, if we scalarize an instruction that uses a uniform, we would 6142 // create uses of values corresponding to the lanes we aren't emitting code 6143 // for. This behavior can be changed by allowing getScalarValue to clone 6144 // the lane zero values for uniforms rather than asserting. 6145 for (Use &U : I->operands()) 6146 if (auto *J = dyn_cast<Instruction>(U.get())) 6147 if (isUniformAfterVectorization(J, VF)) 6148 return false; 6149 6150 // Otherwise, we can scalarize the instruction. 6151 return true; 6152 }; 6153 6154 // Compute the expected cost discount from scalarizing the entire expression 6155 // feeding the predicated instruction. We currently only consider expressions 6156 // that are single-use instruction chains. 6157 Worklist.push_back(PredInst); 6158 while (!Worklist.empty()) { 6159 Instruction *I = Worklist.pop_back_val(); 6160 6161 // If we've already analyzed the instruction, there's nothing to do. 6162 if (ScalarCosts.find(I) != ScalarCosts.end()) 6163 continue; 6164 6165 // Compute the cost of the vector instruction. Note that this cost already 6166 // includes the scalarization overhead of the predicated instruction. 6167 InstructionCost VectorCost = getInstructionCost(I, VF).first; 6168 6169 // Compute the cost of the scalarized instruction. This cost is the cost of 6170 // the instruction as if it wasn't if-converted and instead remained in the 6171 // predicated block. We will scale this cost by block probability after 6172 // computing the scalarization overhead. 6173 InstructionCost ScalarCost = 6174 VF.getFixedValue() * 6175 getInstructionCost(I, ElementCount::getFixed(1)).first; 6176 6177 // Compute the scalarization overhead of needed insertelement instructions 6178 // and phi nodes. 6179 if (isScalarWithPredication(I, VF) && !I->getType()->isVoidTy()) { 6180 ScalarCost += TTI.getScalarizationOverhead( 6181 cast<VectorType>(ToVectorTy(I->getType(), VF)), 6182 APInt::getAllOnes(VF.getFixedValue()), true, false); 6183 ScalarCost += 6184 VF.getFixedValue() * 6185 TTI.getCFInstrCost(Instruction::PHI, TTI::TCK_RecipThroughput); 6186 } 6187 6188 // Compute the scalarization overhead of needed extractelement 6189 // instructions. For each of the instruction's operands, if the operand can 6190 // be scalarized, add it to the worklist; otherwise, account for the 6191 // overhead. 6192 for (Use &U : I->operands()) 6193 if (auto *J = dyn_cast<Instruction>(U.get())) { 6194 assert(VectorType::isValidElementType(J->getType()) && 6195 "Instruction has non-scalar type"); 6196 if (canBeScalarized(J)) 6197 Worklist.push_back(J); 6198 else if (needsExtract(J, VF)) { 6199 ScalarCost += TTI.getScalarizationOverhead( 6200 cast<VectorType>(ToVectorTy(J->getType(), VF)), 6201 APInt::getAllOnes(VF.getFixedValue()), false, true); 6202 } 6203 } 6204 6205 // Scale the total scalar cost by block probability. 6206 ScalarCost /= getReciprocalPredBlockProb(); 6207 6208 // Compute the discount. A non-negative discount means the vector version 6209 // of the instruction costs more, and scalarizing would be beneficial. 6210 Discount += VectorCost - ScalarCost; 6211 ScalarCosts[I] = ScalarCost; 6212 } 6213 6214 return *Discount.getValue(); 6215 } 6216 6217 LoopVectorizationCostModel::VectorizationCostTy 6218 LoopVectorizationCostModel::expectedCost( 6219 ElementCount VF, SmallVectorImpl<InstructionVFPair> *Invalid) { 6220 VectorizationCostTy Cost; 6221 6222 // For each block. 6223 for (BasicBlock *BB : TheLoop->blocks()) { 6224 VectorizationCostTy BlockCost; 6225 6226 // For each instruction in the old loop. 6227 for (Instruction &I : BB->instructionsWithoutDebug()) { 6228 // Skip ignored values. 6229 if (ValuesToIgnore.count(&I) || 6230 (VF.isVector() && VecValuesToIgnore.count(&I))) 6231 continue; 6232 6233 VectorizationCostTy C = getInstructionCost(&I, VF); 6234 6235 // Check if we should override the cost. 6236 if (C.first.isValid() && 6237 ForceTargetInstructionCost.getNumOccurrences() > 0) 6238 C.first = InstructionCost(ForceTargetInstructionCost); 6239 6240 // Keep a list of instructions with invalid costs. 6241 if (Invalid && !C.first.isValid()) 6242 Invalid->emplace_back(&I, VF); 6243 6244 BlockCost.first += C.first; 6245 BlockCost.second |= C.second; 6246 LLVM_DEBUG(dbgs() << "LV: Found an estimated cost of " << C.first 6247 << " for VF " << VF << " For instruction: " << I 6248 << '\n'); 6249 } 6250 6251 // If we are vectorizing a predicated block, it will have been 6252 // if-converted. This means that the block's instructions (aside from 6253 // stores and instructions that may divide by zero) will now be 6254 // unconditionally executed. For the scalar case, we may not always execute 6255 // the predicated block, if it is an if-else block. Thus, scale the block's 6256 // cost by the probability of executing it. blockNeedsPredication from 6257 // Legal is used so as to not include all blocks in tail folded loops. 6258 if (VF.isScalar() && Legal->blockNeedsPredication(BB)) 6259 BlockCost.first /= getReciprocalPredBlockProb(); 6260 6261 Cost.first += BlockCost.first; 6262 Cost.second |= BlockCost.second; 6263 } 6264 6265 return Cost; 6266 } 6267 6268 /// Gets Address Access SCEV after verifying that the access pattern 6269 /// is loop invariant except the induction variable dependence. 6270 /// 6271 /// This SCEV can be sent to the Target in order to estimate the address 6272 /// calculation cost. 6273 static const SCEV *getAddressAccessSCEV( 6274 Value *Ptr, 6275 LoopVectorizationLegality *Legal, 6276 PredicatedScalarEvolution &PSE, 6277 const Loop *TheLoop) { 6278 6279 auto *Gep = dyn_cast<GetElementPtrInst>(Ptr); 6280 if (!Gep) 6281 return nullptr; 6282 6283 // We are looking for a gep with all loop invariant indices except for one 6284 // which should be an induction variable. 6285 auto SE = PSE.getSE(); 6286 unsigned NumOperands = Gep->getNumOperands(); 6287 for (unsigned i = 1; i < NumOperands; ++i) { 6288 Value *Opd = Gep->getOperand(i); 6289 if (!SE->isLoopInvariant(SE->getSCEV(Opd), TheLoop) && 6290 !Legal->isInductionVariable(Opd)) 6291 return nullptr; 6292 } 6293 6294 // Now we know we have a GEP ptr, %inv, %ind, %inv. return the Ptr SCEV. 6295 return PSE.getSCEV(Ptr); 6296 } 6297 6298 static bool isStrideMul(Instruction *I, LoopVectorizationLegality *Legal) { 6299 return Legal->hasStride(I->getOperand(0)) || 6300 Legal->hasStride(I->getOperand(1)); 6301 } 6302 6303 InstructionCost 6304 LoopVectorizationCostModel::getMemInstScalarizationCost(Instruction *I, 6305 ElementCount VF) { 6306 assert(VF.isVector() && 6307 "Scalarization cost of instruction implies vectorization."); 6308 if (VF.isScalable()) 6309 return InstructionCost::getInvalid(); 6310 6311 Type *ValTy = getLoadStoreType(I); 6312 auto SE = PSE.getSE(); 6313 6314 unsigned AS = getLoadStoreAddressSpace(I); 6315 Value *Ptr = getLoadStorePointerOperand(I); 6316 Type *PtrTy = ToVectorTy(Ptr->getType(), VF); 6317 // NOTE: PtrTy is a vector to signal `TTI::getAddressComputationCost` 6318 // that it is being called from this specific place. 6319 6320 // Figure out whether the access is strided and get the stride value 6321 // if it's known in compile time 6322 const SCEV *PtrSCEV = getAddressAccessSCEV(Ptr, Legal, PSE, TheLoop); 6323 6324 // Get the cost of the scalar memory instruction and address computation. 6325 InstructionCost Cost = 6326 VF.getKnownMinValue() * TTI.getAddressComputationCost(PtrTy, SE, PtrSCEV); 6327 6328 // Don't pass *I here, since it is scalar but will actually be part of a 6329 // vectorized loop where the user of it is a vectorized instruction. 6330 const Align Alignment = getLoadStoreAlignment(I); 6331 Cost += VF.getKnownMinValue() * 6332 TTI.getMemoryOpCost(I->getOpcode(), ValTy->getScalarType(), Alignment, 6333 AS, TTI::TCK_RecipThroughput); 6334 6335 // Get the overhead of the extractelement and insertelement instructions 6336 // we might create due to scalarization. 6337 Cost += getScalarizationOverhead(I, VF); 6338 6339 // If we have a predicated load/store, it will need extra i1 extracts and 6340 // conditional branches, but may not be executed for each vector lane. Scale 6341 // the cost by the probability of executing the predicated block. 6342 if (isPredicatedInst(I, VF)) { 6343 Cost /= getReciprocalPredBlockProb(); 6344 6345 // Add the cost of an i1 extract and a branch 6346 auto *Vec_i1Ty = 6347 VectorType::get(IntegerType::getInt1Ty(ValTy->getContext()), VF); 6348 Cost += TTI.getScalarizationOverhead( 6349 Vec_i1Ty, APInt::getAllOnes(VF.getKnownMinValue()), 6350 /*Insert=*/false, /*Extract=*/true); 6351 Cost += TTI.getCFInstrCost(Instruction::Br, TTI::TCK_RecipThroughput); 6352 6353 if (useEmulatedMaskMemRefHack(I, VF)) 6354 // Artificially setting to a high enough value to practically disable 6355 // vectorization with such operations. 6356 Cost = 3000000; 6357 } 6358 6359 return Cost; 6360 } 6361 6362 InstructionCost 6363 LoopVectorizationCostModel::getConsecutiveMemOpCost(Instruction *I, 6364 ElementCount VF) { 6365 Type *ValTy = getLoadStoreType(I); 6366 auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF)); 6367 Value *Ptr = getLoadStorePointerOperand(I); 6368 unsigned AS = getLoadStoreAddressSpace(I); 6369 int ConsecutiveStride = Legal->isConsecutivePtr(ValTy, Ptr); 6370 enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 6371 6372 assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) && 6373 "Stride should be 1 or -1 for consecutive memory access"); 6374 const Align Alignment = getLoadStoreAlignment(I); 6375 InstructionCost Cost = 0; 6376 if (Legal->isMaskRequired(I)) 6377 Cost += TTI.getMaskedMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS, 6378 CostKind); 6379 else 6380 Cost += TTI.getMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS, 6381 CostKind, I); 6382 6383 bool Reverse = ConsecutiveStride < 0; 6384 if (Reverse) 6385 Cost += 6386 TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, None, 0); 6387 return Cost; 6388 } 6389 6390 InstructionCost 6391 LoopVectorizationCostModel::getUniformMemOpCost(Instruction *I, 6392 ElementCount VF) { 6393 assert(Legal->isUniformMemOp(*I)); 6394 6395 Type *ValTy = getLoadStoreType(I); 6396 auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF)); 6397 const Align Alignment = getLoadStoreAlignment(I); 6398 unsigned AS = getLoadStoreAddressSpace(I); 6399 enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 6400 if (isa<LoadInst>(I)) { 6401 return TTI.getAddressComputationCost(ValTy) + 6402 TTI.getMemoryOpCost(Instruction::Load, ValTy, Alignment, AS, 6403 CostKind) + 6404 TTI.getShuffleCost(TargetTransformInfo::SK_Broadcast, VectorTy); 6405 } 6406 StoreInst *SI = cast<StoreInst>(I); 6407 6408 bool isLoopInvariantStoreValue = Legal->isUniform(SI->getValueOperand()); 6409 return TTI.getAddressComputationCost(ValTy) + 6410 TTI.getMemoryOpCost(Instruction::Store, ValTy, Alignment, AS, 6411 CostKind) + 6412 (isLoopInvariantStoreValue 6413 ? 0 6414 : TTI.getVectorInstrCost(Instruction::ExtractElement, VectorTy, 6415 VF.getKnownMinValue() - 1)); 6416 } 6417 6418 InstructionCost 6419 LoopVectorizationCostModel::getGatherScatterCost(Instruction *I, 6420 ElementCount VF) { 6421 Type *ValTy = getLoadStoreType(I); 6422 auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF)); 6423 const Align Alignment = getLoadStoreAlignment(I); 6424 const Value *Ptr = getLoadStorePointerOperand(I); 6425 6426 return TTI.getAddressComputationCost(VectorTy) + 6427 TTI.getGatherScatterOpCost( 6428 I->getOpcode(), VectorTy, Ptr, Legal->isMaskRequired(I), Alignment, 6429 TargetTransformInfo::TCK_RecipThroughput, I); 6430 } 6431 6432 InstructionCost 6433 LoopVectorizationCostModel::getInterleaveGroupCost(Instruction *I, 6434 ElementCount VF) { 6435 // TODO: Once we have support for interleaving with scalable vectors 6436 // we can calculate the cost properly here. 6437 if (VF.isScalable()) 6438 return InstructionCost::getInvalid(); 6439 6440 Type *ValTy = getLoadStoreType(I); 6441 auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF)); 6442 unsigned AS = getLoadStoreAddressSpace(I); 6443 6444 auto Group = getInterleavedAccessGroup(I); 6445 assert(Group && "Fail to get an interleaved access group."); 6446 6447 unsigned InterleaveFactor = Group->getFactor(); 6448 auto *WideVecTy = VectorType::get(ValTy, VF * InterleaveFactor); 6449 6450 // Holds the indices of existing members in the interleaved group. 6451 SmallVector<unsigned, 4> Indices; 6452 for (unsigned IF = 0; IF < InterleaveFactor; IF++) 6453 if (Group->getMember(IF)) 6454 Indices.push_back(IF); 6455 6456 // Calculate the cost of the whole interleaved group. 6457 bool UseMaskForGaps = 6458 (Group->requiresScalarEpilogue() && !isScalarEpilogueAllowed()) || 6459 (isa<StoreInst>(I) && (Group->getNumMembers() < Group->getFactor())); 6460 InstructionCost Cost = TTI.getInterleavedMemoryOpCost( 6461 I->getOpcode(), WideVecTy, Group->getFactor(), Indices, Group->getAlign(), 6462 AS, TTI::TCK_RecipThroughput, Legal->isMaskRequired(I), UseMaskForGaps); 6463 6464 if (Group->isReverse()) { 6465 // TODO: Add support for reversed masked interleaved access. 6466 assert(!Legal->isMaskRequired(I) && 6467 "Reverse masked interleaved access not supported."); 6468 Cost += 6469 Group->getNumMembers() * 6470 TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, None, 0); 6471 } 6472 return Cost; 6473 } 6474 6475 Optional<InstructionCost> LoopVectorizationCostModel::getReductionPatternCost( 6476 Instruction *I, ElementCount VF, Type *Ty, TTI::TargetCostKind CostKind) { 6477 using namespace llvm::PatternMatch; 6478 // Early exit for no inloop reductions 6479 if (InLoopReductionChains.empty() || VF.isScalar() || !isa<VectorType>(Ty)) 6480 return None; 6481 auto *VectorTy = cast<VectorType>(Ty); 6482 6483 // We are looking for a pattern of, and finding the minimal acceptable cost: 6484 // reduce(mul(ext(A), ext(B))) or 6485 // reduce(mul(A, B)) or 6486 // reduce(ext(A)) or 6487 // reduce(A). 6488 // The basic idea is that we walk down the tree to do that, finding the root 6489 // reduction instruction in InLoopReductionImmediateChains. From there we find 6490 // the pattern of mul/ext and test the cost of the entire pattern vs the cost 6491 // of the components. If the reduction cost is lower then we return it for the 6492 // reduction instruction and 0 for the other instructions in the pattern. If 6493 // it is not we return an invalid cost specifying the orignal cost method 6494 // should be used. 6495 Instruction *RetI = I; 6496 if (match(RetI, m_ZExtOrSExt(m_Value()))) { 6497 if (!RetI->hasOneUser()) 6498 return None; 6499 RetI = RetI->user_back(); 6500 } 6501 if (match(RetI, m_Mul(m_Value(), m_Value())) && 6502 RetI->user_back()->getOpcode() == Instruction::Add) { 6503 if (!RetI->hasOneUser()) 6504 return None; 6505 RetI = RetI->user_back(); 6506 } 6507 6508 // Test if the found instruction is a reduction, and if not return an invalid 6509 // cost specifying the parent to use the original cost modelling. 6510 if (!InLoopReductionImmediateChains.count(RetI)) 6511 return None; 6512 6513 // Find the reduction this chain is a part of and calculate the basic cost of 6514 // the reduction on its own. 6515 Instruction *LastChain = InLoopReductionImmediateChains[RetI]; 6516 Instruction *ReductionPhi = LastChain; 6517 while (!isa<PHINode>(ReductionPhi)) 6518 ReductionPhi = InLoopReductionImmediateChains[ReductionPhi]; 6519 6520 const RecurrenceDescriptor &RdxDesc = 6521 Legal->getReductionVars().find(cast<PHINode>(ReductionPhi))->second; 6522 6523 InstructionCost BaseCost = TTI.getArithmeticReductionCost( 6524 RdxDesc.getOpcode(), VectorTy, RdxDesc.getFastMathFlags(), CostKind); 6525 6526 // For a call to the llvm.fmuladd intrinsic we need to add the cost of a 6527 // normal fmul instruction to the cost of the fadd reduction. 6528 if (RdxDesc.getRecurrenceKind() == RecurKind::FMulAdd) 6529 BaseCost += 6530 TTI.getArithmeticInstrCost(Instruction::FMul, VectorTy, CostKind); 6531 6532 // If we're using ordered reductions then we can just return the base cost 6533 // here, since getArithmeticReductionCost calculates the full ordered 6534 // reduction cost when FP reassociation is not allowed. 6535 if (useOrderedReductions(RdxDesc)) 6536 return BaseCost; 6537 6538 // Get the operand that was not the reduction chain and match it to one of the 6539 // patterns, returning the better cost if it is found. 6540 Instruction *RedOp = RetI->getOperand(1) == LastChain 6541 ? dyn_cast<Instruction>(RetI->getOperand(0)) 6542 : dyn_cast<Instruction>(RetI->getOperand(1)); 6543 6544 VectorTy = VectorType::get(I->getOperand(0)->getType(), VectorTy); 6545 6546 Instruction *Op0, *Op1; 6547 if (RedOp && 6548 match(RedOp, 6549 m_ZExtOrSExt(m_Mul(m_Instruction(Op0), m_Instruction(Op1)))) && 6550 match(Op0, m_ZExtOrSExt(m_Value())) && 6551 Op0->getOpcode() == Op1->getOpcode() && 6552 Op0->getOperand(0)->getType() == Op1->getOperand(0)->getType() && 6553 !TheLoop->isLoopInvariant(Op0) && !TheLoop->isLoopInvariant(Op1) && 6554 (Op0->getOpcode() == RedOp->getOpcode() || Op0 == Op1)) { 6555 6556 // Matched reduce(ext(mul(ext(A), ext(B))) 6557 // Note that the extend opcodes need to all match, or if A==B they will have 6558 // been converted to zext(mul(sext(A), sext(A))) as it is known positive, 6559 // which is equally fine. 6560 bool IsUnsigned = isa<ZExtInst>(Op0); 6561 auto *ExtType = VectorType::get(Op0->getOperand(0)->getType(), VectorTy); 6562 auto *MulType = VectorType::get(Op0->getType(), VectorTy); 6563 6564 InstructionCost ExtCost = 6565 TTI.getCastInstrCost(Op0->getOpcode(), MulType, ExtType, 6566 TTI::CastContextHint::None, CostKind, Op0); 6567 InstructionCost MulCost = 6568 TTI.getArithmeticInstrCost(Instruction::Mul, MulType, CostKind); 6569 InstructionCost Ext2Cost = 6570 TTI.getCastInstrCost(RedOp->getOpcode(), VectorTy, MulType, 6571 TTI::CastContextHint::None, CostKind, RedOp); 6572 6573 InstructionCost RedCost = TTI.getExtendedAddReductionCost( 6574 /*IsMLA=*/true, IsUnsigned, RdxDesc.getRecurrenceType(), ExtType, 6575 CostKind); 6576 6577 if (RedCost.isValid() && 6578 RedCost < ExtCost * 2 + MulCost + Ext2Cost + BaseCost) 6579 return I == RetI ? RedCost : 0; 6580 } else if (RedOp && match(RedOp, m_ZExtOrSExt(m_Value())) && 6581 !TheLoop->isLoopInvariant(RedOp)) { 6582 // Matched reduce(ext(A)) 6583 bool IsUnsigned = isa<ZExtInst>(RedOp); 6584 auto *ExtType = VectorType::get(RedOp->getOperand(0)->getType(), VectorTy); 6585 InstructionCost RedCost = TTI.getExtendedAddReductionCost( 6586 /*IsMLA=*/false, IsUnsigned, RdxDesc.getRecurrenceType(), ExtType, 6587 CostKind); 6588 6589 InstructionCost ExtCost = 6590 TTI.getCastInstrCost(RedOp->getOpcode(), VectorTy, ExtType, 6591 TTI::CastContextHint::None, CostKind, RedOp); 6592 if (RedCost.isValid() && RedCost < BaseCost + ExtCost) 6593 return I == RetI ? RedCost : 0; 6594 } else if (RedOp && 6595 match(RedOp, m_Mul(m_Instruction(Op0), m_Instruction(Op1)))) { 6596 if (match(Op0, m_ZExtOrSExt(m_Value())) && 6597 Op0->getOpcode() == Op1->getOpcode() && 6598 !TheLoop->isLoopInvariant(Op0) && !TheLoop->isLoopInvariant(Op1)) { 6599 bool IsUnsigned = isa<ZExtInst>(Op0); 6600 Type *Op0Ty = Op0->getOperand(0)->getType(); 6601 Type *Op1Ty = Op1->getOperand(0)->getType(); 6602 Type *LargestOpTy = 6603 Op0Ty->getIntegerBitWidth() < Op1Ty->getIntegerBitWidth() ? Op1Ty 6604 : Op0Ty; 6605 auto *ExtType = VectorType::get(LargestOpTy, VectorTy); 6606 6607 // Matched reduce(mul(ext(A), ext(B))), where the two ext may be of 6608 // different sizes. We take the largest type as the ext to reduce, and add 6609 // the remaining cost as, for example reduce(mul(ext(ext(A)), ext(B))). 6610 InstructionCost ExtCost0 = TTI.getCastInstrCost( 6611 Op0->getOpcode(), VectorTy, VectorType::get(Op0Ty, VectorTy), 6612 TTI::CastContextHint::None, CostKind, Op0); 6613 InstructionCost ExtCost1 = TTI.getCastInstrCost( 6614 Op1->getOpcode(), VectorTy, VectorType::get(Op1Ty, VectorTy), 6615 TTI::CastContextHint::None, CostKind, Op1); 6616 InstructionCost MulCost = 6617 TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind); 6618 6619 InstructionCost RedCost = TTI.getExtendedAddReductionCost( 6620 /*IsMLA=*/true, IsUnsigned, RdxDesc.getRecurrenceType(), ExtType, 6621 CostKind); 6622 InstructionCost ExtraExtCost = 0; 6623 if (Op0Ty != LargestOpTy || Op1Ty != LargestOpTy) { 6624 Instruction *ExtraExtOp = (Op0Ty != LargestOpTy) ? Op0 : Op1; 6625 ExtraExtCost = TTI.getCastInstrCost( 6626 ExtraExtOp->getOpcode(), ExtType, 6627 VectorType::get(ExtraExtOp->getOperand(0)->getType(), VectorTy), 6628 TTI::CastContextHint::None, CostKind, ExtraExtOp); 6629 } 6630 6631 if (RedCost.isValid() && 6632 (RedCost + ExtraExtCost) < (ExtCost0 + ExtCost1 + MulCost + BaseCost)) 6633 return I == RetI ? RedCost : 0; 6634 } else if (!match(I, m_ZExtOrSExt(m_Value()))) { 6635 // Matched reduce(mul()) 6636 InstructionCost MulCost = 6637 TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind); 6638 6639 InstructionCost RedCost = TTI.getExtendedAddReductionCost( 6640 /*IsMLA=*/true, true, RdxDesc.getRecurrenceType(), VectorTy, 6641 CostKind); 6642 6643 if (RedCost.isValid() && RedCost < MulCost + BaseCost) 6644 return I == RetI ? RedCost : 0; 6645 } 6646 } 6647 6648 return I == RetI ? Optional<InstructionCost>(BaseCost) : None; 6649 } 6650 6651 InstructionCost 6652 LoopVectorizationCostModel::getMemoryInstructionCost(Instruction *I, 6653 ElementCount VF) { 6654 // Calculate scalar cost only. Vectorization cost should be ready at this 6655 // moment. 6656 if (VF.isScalar()) { 6657 Type *ValTy = getLoadStoreType(I); 6658 const Align Alignment = getLoadStoreAlignment(I); 6659 unsigned AS = getLoadStoreAddressSpace(I); 6660 6661 return TTI.getAddressComputationCost(ValTy) + 6662 TTI.getMemoryOpCost(I->getOpcode(), ValTy, Alignment, AS, 6663 TTI::TCK_RecipThroughput, I); 6664 } 6665 return getWideningCost(I, VF); 6666 } 6667 6668 LoopVectorizationCostModel::VectorizationCostTy 6669 LoopVectorizationCostModel::getInstructionCost(Instruction *I, 6670 ElementCount VF) { 6671 // If we know that this instruction will remain uniform, check the cost of 6672 // the scalar version. 6673 if (isUniformAfterVectorization(I, VF)) 6674 VF = ElementCount::getFixed(1); 6675 6676 if (VF.isVector() && isProfitableToScalarize(I, VF)) 6677 return VectorizationCostTy(InstsToScalarize[VF][I], false); 6678 6679 // Forced scalars do not have any scalarization overhead. 6680 auto ForcedScalar = ForcedScalars.find(VF); 6681 if (VF.isVector() && ForcedScalar != ForcedScalars.end()) { 6682 auto InstSet = ForcedScalar->second; 6683 if (InstSet.count(I)) 6684 return VectorizationCostTy( 6685 (getInstructionCost(I, ElementCount::getFixed(1)).first * 6686 VF.getKnownMinValue()), 6687 false); 6688 } 6689 6690 Type *VectorTy; 6691 InstructionCost C = getInstructionCost(I, VF, VectorTy); 6692 6693 bool TypeNotScalarized = false; 6694 if (VF.isVector() && VectorTy->isVectorTy()) { 6695 if (unsigned NumParts = TTI.getNumberOfParts(VectorTy)) { 6696 if (VF.isScalable()) 6697 // <vscale x 1 x iN> is assumed to be profitable over iN because 6698 // scalable registers are a distinct register class from scalar ones. 6699 // If we ever find a target which wants to lower scalable vectors 6700 // back to scalars, we'll need to update this code to explicitly 6701 // ask TTI about the register class uses for each part. 6702 TypeNotScalarized = NumParts <= VF.getKnownMinValue(); 6703 else 6704 TypeNotScalarized = NumParts < VF.getKnownMinValue(); 6705 } else 6706 C = InstructionCost::getInvalid(); 6707 } 6708 return VectorizationCostTy(C, TypeNotScalarized); 6709 } 6710 6711 InstructionCost 6712 LoopVectorizationCostModel::getScalarizationOverhead(Instruction *I, 6713 ElementCount VF) const { 6714 6715 // There is no mechanism yet to create a scalable scalarization loop, 6716 // so this is currently Invalid. 6717 if (VF.isScalable()) 6718 return InstructionCost::getInvalid(); 6719 6720 if (VF.isScalar()) 6721 return 0; 6722 6723 InstructionCost Cost = 0; 6724 Type *RetTy = ToVectorTy(I->getType(), VF); 6725 if (!RetTy->isVoidTy() && 6726 (!isa<LoadInst>(I) || !TTI.supportsEfficientVectorElementLoadStore())) 6727 Cost += TTI.getScalarizationOverhead( 6728 cast<VectorType>(RetTy), APInt::getAllOnes(VF.getKnownMinValue()), true, 6729 false); 6730 6731 // Some targets keep addresses scalar. 6732 if (isa<LoadInst>(I) && !TTI.prefersVectorizedAddressing()) 6733 return Cost; 6734 6735 // Some targets support efficient element stores. 6736 if (isa<StoreInst>(I) && TTI.supportsEfficientVectorElementLoadStore()) 6737 return Cost; 6738 6739 // Collect operands to consider. 6740 CallInst *CI = dyn_cast<CallInst>(I); 6741 Instruction::op_range Ops = CI ? CI->args() : I->operands(); 6742 6743 // Skip operands that do not require extraction/scalarization and do not incur 6744 // any overhead. 6745 SmallVector<Type *> Tys; 6746 for (auto *V : filterExtractingOperands(Ops, VF)) 6747 Tys.push_back(MaybeVectorizeType(V->getType(), VF)); 6748 return Cost + TTI.getOperandsScalarizationOverhead( 6749 filterExtractingOperands(Ops, VF), Tys); 6750 } 6751 6752 void LoopVectorizationCostModel::setCostBasedWideningDecision(ElementCount VF) { 6753 if (VF.isScalar()) 6754 return; 6755 NumPredStores = 0; 6756 for (BasicBlock *BB : TheLoop->blocks()) { 6757 // For each instruction in the old loop. 6758 for (Instruction &I : *BB) { 6759 Value *Ptr = getLoadStorePointerOperand(&I); 6760 if (!Ptr) 6761 continue; 6762 6763 // TODO: We should generate better code and update the cost model for 6764 // predicated uniform stores. Today they are treated as any other 6765 // predicated store (see added test cases in 6766 // invariant-store-vectorization.ll). 6767 if (isa<StoreInst>(&I) && isScalarWithPredication(&I, VF)) 6768 NumPredStores++; 6769 6770 if (Legal->isUniformMemOp(I)) { 6771 // Lowering story for uniform memory ops is currently a bit complicated. 6772 // Scalarization works for everything which isn't a store with scalable 6773 // VF. Fixed len VFs just scalarize and then DCE later; scalarization 6774 // knows how to handle uniform-per-part values (i.e. the first lane 6775 // in each unrolled VF) and can thus handle scalable loads too. For 6776 // scalable stores, we use a scatter if legal. If not, we have no way 6777 // to lower (currently) and thus have to abort vectorization. 6778 if (isa<StoreInst>(&I) && VF.isScalable()) { 6779 if (isLegalGatherOrScatter(&I, VF)) 6780 setWideningDecision(&I, VF, CM_GatherScatter, 6781 getGatherScatterCost(&I, VF)); 6782 else 6783 // Error case, abort vectorization 6784 setWideningDecision(&I, VF, CM_Scalarize, 6785 InstructionCost::getInvalid()); 6786 continue; 6787 } 6788 // Load: Scalar load + broadcast 6789 // Store: Scalar store + isLoopInvariantStoreValue ? 0 : extract 6790 // TODO: Avoid replicating loads and stores instead of relying on 6791 // instcombine to remove them. 6792 setWideningDecision(&I, VF, CM_Scalarize, 6793 getUniformMemOpCost(&I, VF)); 6794 continue; 6795 } 6796 6797 // We assume that widening is the best solution when possible. 6798 if (memoryInstructionCanBeWidened(&I, VF)) { 6799 InstructionCost Cost = getConsecutiveMemOpCost(&I, VF); 6800 int ConsecutiveStride = Legal->isConsecutivePtr( 6801 getLoadStoreType(&I), getLoadStorePointerOperand(&I)); 6802 assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) && 6803 "Expected consecutive stride."); 6804 InstWidening Decision = 6805 ConsecutiveStride == 1 ? CM_Widen : CM_Widen_Reverse; 6806 setWideningDecision(&I, VF, Decision, Cost); 6807 continue; 6808 } 6809 6810 // Choose between Interleaving, Gather/Scatter or Scalarization. 6811 InstructionCost InterleaveCost = InstructionCost::getInvalid(); 6812 unsigned NumAccesses = 1; 6813 if (isAccessInterleaved(&I)) { 6814 auto Group = getInterleavedAccessGroup(&I); 6815 assert(Group && "Fail to get an interleaved access group."); 6816 6817 // Make one decision for the whole group. 6818 if (getWideningDecision(&I, VF) != CM_Unknown) 6819 continue; 6820 6821 NumAccesses = Group->getNumMembers(); 6822 if (interleavedAccessCanBeWidened(&I, VF)) 6823 InterleaveCost = getInterleaveGroupCost(&I, VF); 6824 } 6825 6826 InstructionCost GatherScatterCost = 6827 isLegalGatherOrScatter(&I, VF) 6828 ? getGatherScatterCost(&I, VF) * NumAccesses 6829 : InstructionCost::getInvalid(); 6830 6831 InstructionCost ScalarizationCost = 6832 getMemInstScalarizationCost(&I, VF) * NumAccesses; 6833 6834 // Choose better solution for the current VF, 6835 // write down this decision and use it during vectorization. 6836 InstructionCost Cost; 6837 InstWidening Decision; 6838 if (InterleaveCost <= GatherScatterCost && 6839 InterleaveCost < ScalarizationCost) { 6840 Decision = CM_Interleave; 6841 Cost = InterleaveCost; 6842 } else if (GatherScatterCost < ScalarizationCost) { 6843 Decision = CM_GatherScatter; 6844 Cost = GatherScatterCost; 6845 } else { 6846 Decision = CM_Scalarize; 6847 Cost = ScalarizationCost; 6848 } 6849 // If the instructions belongs to an interleave group, the whole group 6850 // receives the same decision. The whole group receives the cost, but 6851 // the cost will actually be assigned to one instruction. 6852 if (auto Group = getInterleavedAccessGroup(&I)) 6853 setWideningDecision(Group, VF, Decision, Cost); 6854 else 6855 setWideningDecision(&I, VF, Decision, Cost); 6856 } 6857 } 6858 6859 // Make sure that any load of address and any other address computation 6860 // remains scalar unless there is gather/scatter support. This avoids 6861 // inevitable extracts into address registers, and also has the benefit of 6862 // activating LSR more, since that pass can't optimize vectorized 6863 // addresses. 6864 if (TTI.prefersVectorizedAddressing()) 6865 return; 6866 6867 // Start with all scalar pointer uses. 6868 SmallPtrSet<Instruction *, 8> AddrDefs; 6869 for (BasicBlock *BB : TheLoop->blocks()) 6870 for (Instruction &I : *BB) { 6871 Instruction *PtrDef = 6872 dyn_cast_or_null<Instruction>(getLoadStorePointerOperand(&I)); 6873 if (PtrDef && TheLoop->contains(PtrDef) && 6874 getWideningDecision(&I, VF) != CM_GatherScatter) 6875 AddrDefs.insert(PtrDef); 6876 } 6877 6878 // Add all instructions used to generate the addresses. 6879 SmallVector<Instruction *, 4> Worklist; 6880 append_range(Worklist, AddrDefs); 6881 while (!Worklist.empty()) { 6882 Instruction *I = Worklist.pop_back_val(); 6883 for (auto &Op : I->operands()) 6884 if (auto *InstOp = dyn_cast<Instruction>(Op)) 6885 if ((InstOp->getParent() == I->getParent()) && !isa<PHINode>(InstOp) && 6886 AddrDefs.insert(InstOp).second) 6887 Worklist.push_back(InstOp); 6888 } 6889 6890 for (auto *I : AddrDefs) { 6891 if (isa<LoadInst>(I)) { 6892 // Setting the desired widening decision should ideally be handled in 6893 // by cost functions, but since this involves the task of finding out 6894 // if the loaded register is involved in an address computation, it is 6895 // instead changed here when we know this is the case. 6896 InstWidening Decision = getWideningDecision(I, VF); 6897 if (Decision == CM_Widen || Decision == CM_Widen_Reverse) 6898 // Scalarize a widened load of address. 6899 setWideningDecision( 6900 I, VF, CM_Scalarize, 6901 (VF.getKnownMinValue() * 6902 getMemoryInstructionCost(I, ElementCount::getFixed(1)))); 6903 else if (auto Group = getInterleavedAccessGroup(I)) { 6904 // Scalarize an interleave group of address loads. 6905 for (unsigned I = 0; I < Group->getFactor(); ++I) { 6906 if (Instruction *Member = Group->getMember(I)) 6907 setWideningDecision( 6908 Member, VF, CM_Scalarize, 6909 (VF.getKnownMinValue() * 6910 getMemoryInstructionCost(Member, ElementCount::getFixed(1)))); 6911 } 6912 } 6913 } else 6914 // Make sure I gets scalarized and a cost estimate without 6915 // scalarization overhead. 6916 ForcedScalars[VF].insert(I); 6917 } 6918 } 6919 6920 InstructionCost 6921 LoopVectorizationCostModel::getInstructionCost(Instruction *I, ElementCount VF, 6922 Type *&VectorTy) { 6923 Type *RetTy = I->getType(); 6924 if (canTruncateToMinimalBitwidth(I, VF)) 6925 RetTy = IntegerType::get(RetTy->getContext(), MinBWs[I]); 6926 auto SE = PSE.getSE(); 6927 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 6928 6929 auto hasSingleCopyAfterVectorization = [this](Instruction *I, 6930 ElementCount VF) -> bool { 6931 if (VF.isScalar()) 6932 return true; 6933 6934 auto Scalarized = InstsToScalarize.find(VF); 6935 assert(Scalarized != InstsToScalarize.end() && 6936 "VF not yet analyzed for scalarization profitability"); 6937 return !Scalarized->second.count(I) && 6938 llvm::all_of(I->users(), [&](User *U) { 6939 auto *UI = cast<Instruction>(U); 6940 return !Scalarized->second.count(UI); 6941 }); 6942 }; 6943 (void) hasSingleCopyAfterVectorization; 6944 6945 if (isScalarAfterVectorization(I, VF)) { 6946 // With the exception of GEPs and PHIs, after scalarization there should 6947 // only be one copy of the instruction generated in the loop. This is 6948 // because the VF is either 1, or any instructions that need scalarizing 6949 // have already been dealt with by the the time we get here. As a result, 6950 // it means we don't have to multiply the instruction cost by VF. 6951 assert(I->getOpcode() == Instruction::GetElementPtr || 6952 I->getOpcode() == Instruction::PHI || 6953 (I->getOpcode() == Instruction::BitCast && 6954 I->getType()->isPointerTy()) || 6955 hasSingleCopyAfterVectorization(I, VF)); 6956 VectorTy = RetTy; 6957 } else 6958 VectorTy = ToVectorTy(RetTy, VF); 6959 6960 // TODO: We need to estimate the cost of intrinsic calls. 6961 switch (I->getOpcode()) { 6962 case Instruction::GetElementPtr: 6963 // We mark this instruction as zero-cost because the cost of GEPs in 6964 // vectorized code depends on whether the corresponding memory instruction 6965 // is scalarized or not. Therefore, we handle GEPs with the memory 6966 // instruction cost. 6967 return 0; 6968 case Instruction::Br: { 6969 // In cases of scalarized and predicated instructions, there will be VF 6970 // predicated blocks in the vectorized loop. Each branch around these 6971 // blocks requires also an extract of its vector compare i1 element. 6972 bool ScalarPredicatedBB = false; 6973 BranchInst *BI = cast<BranchInst>(I); 6974 if (VF.isVector() && BI->isConditional() && 6975 (PredicatedBBsAfterVectorization[VF].count(BI->getSuccessor(0)) || 6976 PredicatedBBsAfterVectorization[VF].count(BI->getSuccessor(1)))) 6977 ScalarPredicatedBB = true; 6978 6979 if (ScalarPredicatedBB) { 6980 // Not possible to scalarize scalable vector with predicated instructions. 6981 if (VF.isScalable()) 6982 return InstructionCost::getInvalid(); 6983 // Return cost for branches around scalarized and predicated blocks. 6984 auto *Vec_i1Ty = 6985 VectorType::get(IntegerType::getInt1Ty(RetTy->getContext()), VF); 6986 return ( 6987 TTI.getScalarizationOverhead( 6988 Vec_i1Ty, APInt::getAllOnes(VF.getFixedValue()), false, true) + 6989 (TTI.getCFInstrCost(Instruction::Br, CostKind) * VF.getFixedValue())); 6990 } else if (I->getParent() == TheLoop->getLoopLatch() || VF.isScalar()) 6991 // The back-edge branch will remain, as will all scalar branches. 6992 return TTI.getCFInstrCost(Instruction::Br, CostKind); 6993 else 6994 // This branch will be eliminated by if-conversion. 6995 return 0; 6996 // Note: We currently assume zero cost for an unconditional branch inside 6997 // a predicated block since it will become a fall-through, although we 6998 // may decide in the future to call TTI for all branches. 6999 } 7000 case Instruction::PHI: { 7001 auto *Phi = cast<PHINode>(I); 7002 7003 // First-order recurrences are replaced by vector shuffles inside the loop. 7004 // NOTE: Don't use ToVectorTy as SK_ExtractSubvector expects a vector type. 7005 if (VF.isVector() && Legal->isFirstOrderRecurrence(Phi)) 7006 return TTI.getShuffleCost( 7007 TargetTransformInfo::SK_ExtractSubvector, cast<VectorType>(VectorTy), 7008 None, VF.getKnownMinValue() - 1, FixedVectorType::get(RetTy, 1)); 7009 7010 // Phi nodes in non-header blocks (not inductions, reductions, etc.) are 7011 // converted into select instructions. We require N - 1 selects per phi 7012 // node, where N is the number of incoming values. 7013 if (VF.isVector() && Phi->getParent() != TheLoop->getHeader()) 7014 return (Phi->getNumIncomingValues() - 1) * 7015 TTI.getCmpSelInstrCost( 7016 Instruction::Select, ToVectorTy(Phi->getType(), VF), 7017 ToVectorTy(Type::getInt1Ty(Phi->getContext()), VF), 7018 CmpInst::BAD_ICMP_PREDICATE, CostKind); 7019 7020 return TTI.getCFInstrCost(Instruction::PHI, CostKind); 7021 } 7022 case Instruction::UDiv: 7023 case Instruction::SDiv: 7024 case Instruction::URem: 7025 case Instruction::SRem: 7026 // If we have a predicated instruction, it may not be executed for each 7027 // vector lane. Get the scalarization cost and scale this amount by the 7028 // probability of executing the predicated block. If the instruction is not 7029 // predicated, we fall through to the next case. 7030 if (VF.isVector() && isScalarWithPredication(I, VF)) { 7031 InstructionCost Cost = 0; 7032 7033 // These instructions have a non-void type, so account for the phi nodes 7034 // that we will create. This cost is likely to be zero. The phi node 7035 // cost, if any, should be scaled by the block probability because it 7036 // models a copy at the end of each predicated block. 7037 Cost += VF.getKnownMinValue() * 7038 TTI.getCFInstrCost(Instruction::PHI, CostKind); 7039 7040 // The cost of the non-predicated instruction. 7041 Cost += VF.getKnownMinValue() * 7042 TTI.getArithmeticInstrCost(I->getOpcode(), RetTy, CostKind); 7043 7044 // The cost of insertelement and extractelement instructions needed for 7045 // scalarization. 7046 Cost += getScalarizationOverhead(I, VF); 7047 7048 // Scale the cost by the probability of executing the predicated blocks. 7049 // This assumes the predicated block for each vector lane is equally 7050 // likely. 7051 return Cost / getReciprocalPredBlockProb(); 7052 } 7053 LLVM_FALLTHROUGH; 7054 case Instruction::Add: 7055 case Instruction::FAdd: 7056 case Instruction::Sub: 7057 case Instruction::FSub: 7058 case Instruction::Mul: 7059 case Instruction::FMul: 7060 case Instruction::FDiv: 7061 case Instruction::FRem: 7062 case Instruction::Shl: 7063 case Instruction::LShr: 7064 case Instruction::AShr: 7065 case Instruction::And: 7066 case Instruction::Or: 7067 case Instruction::Xor: { 7068 // Since we will replace the stride by 1 the multiplication should go away. 7069 if (I->getOpcode() == Instruction::Mul && isStrideMul(I, Legal)) 7070 return 0; 7071 7072 // Detect reduction patterns 7073 if (auto RedCost = getReductionPatternCost(I, VF, VectorTy, CostKind)) 7074 return *RedCost; 7075 7076 // Certain instructions can be cheaper to vectorize if they have a constant 7077 // second vector operand. One example of this are shifts on x86. 7078 Value *Op2 = I->getOperand(1); 7079 TargetTransformInfo::OperandValueProperties Op2VP; 7080 TargetTransformInfo::OperandValueKind Op2VK = 7081 TTI.getOperandInfo(Op2, Op2VP); 7082 if (Op2VK == TargetTransformInfo::OK_AnyValue && Legal->isUniform(Op2)) 7083 Op2VK = TargetTransformInfo::OK_UniformValue; 7084 7085 SmallVector<const Value *, 4> Operands(I->operand_values()); 7086 return TTI.getArithmeticInstrCost( 7087 I->getOpcode(), VectorTy, CostKind, TargetTransformInfo::OK_AnyValue, 7088 Op2VK, TargetTransformInfo::OP_None, Op2VP, Operands, I); 7089 } 7090 case Instruction::FNeg: { 7091 return TTI.getArithmeticInstrCost( 7092 I->getOpcode(), VectorTy, CostKind, TargetTransformInfo::OK_AnyValue, 7093 TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None, 7094 TargetTransformInfo::OP_None, I->getOperand(0), I); 7095 } 7096 case Instruction::Select: { 7097 SelectInst *SI = cast<SelectInst>(I); 7098 const SCEV *CondSCEV = SE->getSCEV(SI->getCondition()); 7099 bool ScalarCond = (SE->isLoopInvariant(CondSCEV, TheLoop)); 7100 7101 const Value *Op0, *Op1; 7102 using namespace llvm::PatternMatch; 7103 if (!ScalarCond && (match(I, m_LogicalAnd(m_Value(Op0), m_Value(Op1))) || 7104 match(I, m_LogicalOr(m_Value(Op0), m_Value(Op1))))) { 7105 // select x, y, false --> x & y 7106 // select x, true, y --> x | y 7107 TTI::OperandValueProperties Op1VP = TTI::OP_None; 7108 TTI::OperandValueProperties Op2VP = TTI::OP_None; 7109 TTI::OperandValueKind Op1VK = TTI::getOperandInfo(Op0, Op1VP); 7110 TTI::OperandValueKind Op2VK = TTI::getOperandInfo(Op1, Op2VP); 7111 assert(Op0->getType()->getScalarSizeInBits() == 1 && 7112 Op1->getType()->getScalarSizeInBits() == 1); 7113 7114 SmallVector<const Value *, 2> Operands{Op0, Op1}; 7115 return TTI.getArithmeticInstrCost( 7116 match(I, m_LogicalOr()) ? Instruction::Or : Instruction::And, VectorTy, 7117 CostKind, Op1VK, Op2VK, Op1VP, Op2VP, Operands, I); 7118 } 7119 7120 Type *CondTy = SI->getCondition()->getType(); 7121 if (!ScalarCond) 7122 CondTy = VectorType::get(CondTy, VF); 7123 7124 CmpInst::Predicate Pred = CmpInst::BAD_ICMP_PREDICATE; 7125 if (auto *Cmp = dyn_cast<CmpInst>(SI->getCondition())) 7126 Pred = Cmp->getPredicate(); 7127 return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, CondTy, Pred, 7128 CostKind, I); 7129 } 7130 case Instruction::ICmp: 7131 case Instruction::FCmp: { 7132 Type *ValTy = I->getOperand(0)->getType(); 7133 Instruction *Op0AsInstruction = dyn_cast<Instruction>(I->getOperand(0)); 7134 if (canTruncateToMinimalBitwidth(Op0AsInstruction, VF)) 7135 ValTy = IntegerType::get(ValTy->getContext(), MinBWs[Op0AsInstruction]); 7136 VectorTy = ToVectorTy(ValTy, VF); 7137 return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, nullptr, 7138 cast<CmpInst>(I)->getPredicate(), CostKind, 7139 I); 7140 } 7141 case Instruction::Store: 7142 case Instruction::Load: { 7143 ElementCount Width = VF; 7144 if (Width.isVector()) { 7145 InstWidening Decision = getWideningDecision(I, Width); 7146 assert(Decision != CM_Unknown && 7147 "CM decision should be taken at this point"); 7148 if (getWideningCost(I, VF) == InstructionCost::getInvalid()) 7149 return InstructionCost::getInvalid(); 7150 if (Decision == CM_Scalarize) 7151 Width = ElementCount::getFixed(1); 7152 } 7153 VectorTy = ToVectorTy(getLoadStoreType(I), Width); 7154 return getMemoryInstructionCost(I, VF); 7155 } 7156 case Instruction::BitCast: 7157 if (I->getType()->isPointerTy()) 7158 return 0; 7159 LLVM_FALLTHROUGH; 7160 case Instruction::ZExt: 7161 case Instruction::SExt: 7162 case Instruction::FPToUI: 7163 case Instruction::FPToSI: 7164 case Instruction::FPExt: 7165 case Instruction::PtrToInt: 7166 case Instruction::IntToPtr: 7167 case Instruction::SIToFP: 7168 case Instruction::UIToFP: 7169 case Instruction::Trunc: 7170 case Instruction::FPTrunc: { 7171 // Computes the CastContextHint from a Load/Store instruction. 7172 auto ComputeCCH = [&](Instruction *I) -> TTI::CastContextHint { 7173 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) && 7174 "Expected a load or a store!"); 7175 7176 if (VF.isScalar() || !TheLoop->contains(I)) 7177 return TTI::CastContextHint::Normal; 7178 7179 switch (getWideningDecision(I, VF)) { 7180 case LoopVectorizationCostModel::CM_GatherScatter: 7181 return TTI::CastContextHint::GatherScatter; 7182 case LoopVectorizationCostModel::CM_Interleave: 7183 return TTI::CastContextHint::Interleave; 7184 case LoopVectorizationCostModel::CM_Scalarize: 7185 case LoopVectorizationCostModel::CM_Widen: 7186 return Legal->isMaskRequired(I) ? TTI::CastContextHint::Masked 7187 : TTI::CastContextHint::Normal; 7188 case LoopVectorizationCostModel::CM_Widen_Reverse: 7189 return TTI::CastContextHint::Reversed; 7190 case LoopVectorizationCostModel::CM_Unknown: 7191 llvm_unreachable("Instr did not go through cost modelling?"); 7192 } 7193 7194 llvm_unreachable("Unhandled case!"); 7195 }; 7196 7197 unsigned Opcode = I->getOpcode(); 7198 TTI::CastContextHint CCH = TTI::CastContextHint::None; 7199 // For Trunc, the context is the only user, which must be a StoreInst. 7200 if (Opcode == Instruction::Trunc || Opcode == Instruction::FPTrunc) { 7201 if (I->hasOneUse()) 7202 if (StoreInst *Store = dyn_cast<StoreInst>(*I->user_begin())) 7203 CCH = ComputeCCH(Store); 7204 } 7205 // For Z/Sext, the context is the operand, which must be a LoadInst. 7206 else if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt || 7207 Opcode == Instruction::FPExt) { 7208 if (LoadInst *Load = dyn_cast<LoadInst>(I->getOperand(0))) 7209 CCH = ComputeCCH(Load); 7210 } 7211 7212 // We optimize the truncation of induction variables having constant 7213 // integer steps. The cost of these truncations is the same as the scalar 7214 // operation. 7215 if (isOptimizableIVTruncate(I, VF)) { 7216 auto *Trunc = cast<TruncInst>(I); 7217 return TTI.getCastInstrCost(Instruction::Trunc, Trunc->getDestTy(), 7218 Trunc->getSrcTy(), CCH, CostKind, Trunc); 7219 } 7220 7221 // Detect reduction patterns 7222 if (auto RedCost = getReductionPatternCost(I, VF, VectorTy, CostKind)) 7223 return *RedCost; 7224 7225 Type *SrcScalarTy = I->getOperand(0)->getType(); 7226 Type *SrcVecTy = 7227 VectorTy->isVectorTy() ? ToVectorTy(SrcScalarTy, VF) : SrcScalarTy; 7228 if (canTruncateToMinimalBitwidth(I, VF)) { 7229 // This cast is going to be shrunk. This may remove the cast or it might 7230 // turn it into slightly different cast. For example, if MinBW == 16, 7231 // "zext i8 %1 to i32" becomes "zext i8 %1 to i16". 7232 // 7233 // Calculate the modified src and dest types. 7234 Type *MinVecTy = VectorTy; 7235 if (Opcode == Instruction::Trunc) { 7236 SrcVecTy = smallestIntegerVectorType(SrcVecTy, MinVecTy); 7237 VectorTy = 7238 largestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy); 7239 } else if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt) { 7240 SrcVecTy = largestIntegerVectorType(SrcVecTy, MinVecTy); 7241 VectorTy = 7242 smallestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy); 7243 } 7244 } 7245 7246 return TTI.getCastInstrCost(Opcode, VectorTy, SrcVecTy, CCH, CostKind, I); 7247 } 7248 case Instruction::Call: { 7249 if (RecurrenceDescriptor::isFMulAddIntrinsic(I)) 7250 if (auto RedCost = getReductionPatternCost(I, VF, VectorTy, CostKind)) 7251 return *RedCost; 7252 bool NeedToScalarize; 7253 CallInst *CI = cast<CallInst>(I); 7254 InstructionCost CallCost = getVectorCallCost(CI, VF, NeedToScalarize); 7255 if (getVectorIntrinsicIDForCall(CI, TLI)) { 7256 InstructionCost IntrinsicCost = getVectorIntrinsicCost(CI, VF); 7257 return std::min(CallCost, IntrinsicCost); 7258 } 7259 return CallCost; 7260 } 7261 case Instruction::ExtractValue: 7262 return TTI.getInstructionCost(I, TTI::TCK_RecipThroughput); 7263 case Instruction::Alloca: 7264 // We cannot easily widen alloca to a scalable alloca, as 7265 // the result would need to be a vector of pointers. 7266 if (VF.isScalable()) 7267 return InstructionCost::getInvalid(); 7268 LLVM_FALLTHROUGH; 7269 default: 7270 // This opcode is unknown. Assume that it is the same as 'mul'. 7271 return TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind); 7272 } // end of switch. 7273 } 7274 7275 char LoopVectorize::ID = 0; 7276 7277 static const char lv_name[] = "Loop Vectorization"; 7278 7279 INITIALIZE_PASS_BEGIN(LoopVectorize, LV_NAME, lv_name, false, false) 7280 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 7281 INITIALIZE_PASS_DEPENDENCY(BasicAAWrapperPass) 7282 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 7283 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass) 7284 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 7285 INITIALIZE_PASS_DEPENDENCY(BlockFrequencyInfoWrapperPass) 7286 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 7287 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass) 7288 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) 7289 INITIALIZE_PASS_DEPENDENCY(LoopAccessLegacyAnalysis) 7290 INITIALIZE_PASS_DEPENDENCY(DemandedBitsWrapperPass) 7291 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass) 7292 INITIALIZE_PASS_DEPENDENCY(ProfileSummaryInfoWrapperPass) 7293 INITIALIZE_PASS_DEPENDENCY(InjectTLIMappingsLegacy) 7294 INITIALIZE_PASS_END(LoopVectorize, LV_NAME, lv_name, false, false) 7295 7296 namespace llvm { 7297 7298 Pass *createLoopVectorizePass() { return new LoopVectorize(); } 7299 7300 Pass *createLoopVectorizePass(bool InterleaveOnlyWhenForced, 7301 bool VectorizeOnlyWhenForced) { 7302 return new LoopVectorize(InterleaveOnlyWhenForced, VectorizeOnlyWhenForced); 7303 } 7304 7305 } // end namespace llvm 7306 7307 void LoopVectorizationCostModel::collectValuesToIgnore() { 7308 // Ignore ephemeral values. 7309 CodeMetrics::collectEphemeralValues(TheLoop, AC, ValuesToIgnore); 7310 7311 // Find all stores to invariant variables. Since they are going to sink 7312 // outside the loop we do not need calculate cost for them. 7313 for (BasicBlock *BB : TheLoop->blocks()) 7314 for (Instruction &I : *BB) { 7315 StoreInst *SI; 7316 if ((SI = dyn_cast<StoreInst>(&I)) && 7317 Legal->isInvariantAddressOfReduction(SI->getPointerOperand())) 7318 ValuesToIgnore.insert(&I); 7319 } 7320 7321 // Ignore type-promoting instructions we identified during reduction 7322 // detection. 7323 for (auto &Reduction : Legal->getReductionVars()) { 7324 const RecurrenceDescriptor &RedDes = Reduction.second; 7325 const SmallPtrSetImpl<Instruction *> &Casts = RedDes.getCastInsts(); 7326 VecValuesToIgnore.insert(Casts.begin(), Casts.end()); 7327 } 7328 // Ignore type-casting instructions we identified during induction 7329 // detection. 7330 for (auto &Induction : Legal->getInductionVars()) { 7331 const InductionDescriptor &IndDes = Induction.second; 7332 const SmallVectorImpl<Instruction *> &Casts = IndDes.getCastInsts(); 7333 VecValuesToIgnore.insert(Casts.begin(), Casts.end()); 7334 } 7335 } 7336 7337 void LoopVectorizationCostModel::collectInLoopReductions() { 7338 for (auto &Reduction : Legal->getReductionVars()) { 7339 PHINode *Phi = Reduction.first; 7340 const RecurrenceDescriptor &RdxDesc = Reduction.second; 7341 7342 // We don't collect reductions that are type promoted (yet). 7343 if (RdxDesc.getRecurrenceType() != Phi->getType()) 7344 continue; 7345 7346 // If the target would prefer this reduction to happen "in-loop", then we 7347 // want to record it as such. 7348 unsigned Opcode = RdxDesc.getOpcode(); 7349 if (!PreferInLoopReductions && !useOrderedReductions(RdxDesc) && 7350 !TTI.preferInLoopReduction(Opcode, Phi->getType(), 7351 TargetTransformInfo::ReductionFlags())) 7352 continue; 7353 7354 // Check that we can correctly put the reductions into the loop, by 7355 // finding the chain of operations that leads from the phi to the loop 7356 // exit value. 7357 SmallVector<Instruction *, 4> ReductionOperations = 7358 RdxDesc.getReductionOpChain(Phi, TheLoop); 7359 bool InLoop = !ReductionOperations.empty(); 7360 if (InLoop) { 7361 InLoopReductionChains[Phi] = ReductionOperations; 7362 // Add the elements to InLoopReductionImmediateChains for cost modelling. 7363 Instruction *LastChain = Phi; 7364 for (auto *I : ReductionOperations) { 7365 InLoopReductionImmediateChains[I] = LastChain; 7366 LastChain = I; 7367 } 7368 } 7369 LLVM_DEBUG(dbgs() << "LV: Using " << (InLoop ? "inloop" : "out of loop") 7370 << " reduction for phi: " << *Phi << "\n"); 7371 } 7372 } 7373 7374 // TODO: we could return a pair of values that specify the max VF and 7375 // min VF, to be used in `buildVPlans(MinVF, MaxVF)` instead of 7376 // `buildVPlans(VF, VF)`. We cannot do it because VPLAN at the moment 7377 // doesn't have a cost model that can choose which plan to execute if 7378 // more than one is generated. 7379 static unsigned determineVPlanVF(const unsigned WidestVectorRegBits, 7380 LoopVectorizationCostModel &CM) { 7381 unsigned WidestType; 7382 std::tie(std::ignore, WidestType) = CM.getSmallestAndWidestTypes(); 7383 return WidestVectorRegBits / WidestType; 7384 } 7385 7386 VectorizationFactor 7387 LoopVectorizationPlanner::planInVPlanNativePath(ElementCount UserVF) { 7388 assert(!UserVF.isScalable() && "scalable vectors not yet supported"); 7389 ElementCount VF = UserVF; 7390 // Outer loop handling: They may require CFG and instruction level 7391 // transformations before even evaluating whether vectorization is profitable. 7392 // Since we cannot modify the incoming IR, we need to build VPlan upfront in 7393 // the vectorization pipeline. 7394 if (!OrigLoop->isInnermost()) { 7395 // If the user doesn't provide a vectorization factor, determine a 7396 // reasonable one. 7397 if (UserVF.isZero()) { 7398 VF = ElementCount::getFixed(determineVPlanVF( 7399 TTI->getRegisterBitWidth(TargetTransformInfo::RGK_FixedWidthVector) 7400 .getFixedSize(), 7401 CM)); 7402 LLVM_DEBUG(dbgs() << "LV: VPlan computed VF " << VF << ".\n"); 7403 7404 // Make sure we have a VF > 1 for stress testing. 7405 if (VPlanBuildStressTest && (VF.isScalar() || VF.isZero())) { 7406 LLVM_DEBUG(dbgs() << "LV: VPlan stress testing: " 7407 << "overriding computed VF.\n"); 7408 VF = ElementCount::getFixed(4); 7409 } 7410 } 7411 assert(EnableVPlanNativePath && "VPlan-native path is not enabled."); 7412 assert(isPowerOf2_32(VF.getKnownMinValue()) && 7413 "VF needs to be a power of two"); 7414 LLVM_DEBUG(dbgs() << "LV: Using " << (!UserVF.isZero() ? "user " : "") 7415 << "VF " << VF << " to build VPlans.\n"); 7416 buildVPlans(VF, VF); 7417 7418 // For VPlan build stress testing, we bail out after VPlan construction. 7419 if (VPlanBuildStressTest) 7420 return VectorizationFactor::Disabled(); 7421 7422 return {VF, 0 /*Cost*/, 0 /* ScalarCost */}; 7423 } 7424 7425 LLVM_DEBUG( 7426 dbgs() << "LV: Not vectorizing. Inner loops aren't supported in the " 7427 "VPlan-native path.\n"); 7428 return VectorizationFactor::Disabled(); 7429 } 7430 7431 Optional<VectorizationFactor> 7432 LoopVectorizationPlanner::plan(ElementCount UserVF, unsigned UserIC) { 7433 assert(OrigLoop->isInnermost() && "Inner loop expected."); 7434 FixedScalableVFPair MaxFactors = CM.computeMaxVF(UserVF, UserIC); 7435 if (!MaxFactors) // Cases that should not to be vectorized nor interleaved. 7436 return None; 7437 7438 // Invalidate interleave groups if all blocks of loop will be predicated. 7439 if (CM.blockNeedsPredicationForAnyReason(OrigLoop->getHeader()) && 7440 !useMaskedInterleavedAccesses(*TTI)) { 7441 LLVM_DEBUG( 7442 dbgs() 7443 << "LV: Invalidate all interleaved groups due to fold-tail by masking " 7444 "which requires masked-interleaved support.\n"); 7445 if (CM.InterleaveInfo.invalidateGroups()) 7446 // Invalidating interleave groups also requires invalidating all decisions 7447 // based on them, which includes widening decisions and uniform and scalar 7448 // values. 7449 CM.invalidateCostModelingDecisions(); 7450 } 7451 7452 ElementCount MaxUserVF = 7453 UserVF.isScalable() ? MaxFactors.ScalableVF : MaxFactors.FixedVF; 7454 bool UserVFIsLegal = ElementCount::isKnownLE(UserVF, MaxUserVF); 7455 if (!UserVF.isZero() && UserVFIsLegal) { 7456 assert(isPowerOf2_32(UserVF.getKnownMinValue()) && 7457 "VF needs to be a power of two"); 7458 // Collect the instructions (and their associated costs) that will be more 7459 // profitable to scalarize. 7460 if (CM.selectUserVectorizationFactor(UserVF)) { 7461 LLVM_DEBUG(dbgs() << "LV: Using user VF " << UserVF << ".\n"); 7462 CM.collectInLoopReductions(); 7463 buildVPlansWithVPRecipes(UserVF, UserVF); 7464 LLVM_DEBUG(printPlans(dbgs())); 7465 return {{UserVF, 0, 0}}; 7466 } else 7467 reportVectorizationInfo("UserVF ignored because of invalid costs.", 7468 "InvalidCost", ORE, OrigLoop); 7469 } 7470 7471 // Populate the set of Vectorization Factor Candidates. 7472 ElementCountSet VFCandidates; 7473 for (auto VF = ElementCount::getFixed(1); 7474 ElementCount::isKnownLE(VF, MaxFactors.FixedVF); VF *= 2) 7475 VFCandidates.insert(VF); 7476 for (auto VF = ElementCount::getScalable(1); 7477 ElementCount::isKnownLE(VF, MaxFactors.ScalableVF); VF *= 2) 7478 VFCandidates.insert(VF); 7479 7480 for (const auto &VF : VFCandidates) { 7481 // Collect Uniform and Scalar instructions after vectorization with VF. 7482 CM.collectUniformsAndScalars(VF); 7483 7484 // Collect the instructions (and their associated costs) that will be more 7485 // profitable to scalarize. 7486 if (VF.isVector()) 7487 CM.collectInstsToScalarize(VF); 7488 } 7489 7490 CM.collectInLoopReductions(); 7491 buildVPlansWithVPRecipes(ElementCount::getFixed(1), MaxFactors.FixedVF); 7492 buildVPlansWithVPRecipes(ElementCount::getScalable(1), MaxFactors.ScalableVF); 7493 7494 LLVM_DEBUG(printPlans(dbgs())); 7495 if (!MaxFactors.hasVector()) 7496 return VectorizationFactor::Disabled(); 7497 7498 // Select the optimal vectorization factor. 7499 VectorizationFactor VF = CM.selectVectorizationFactor(VFCandidates); 7500 assert((VF.Width.isScalar() || VF.ScalarCost > 0) && "when vectorizing, the scalar cost must be non-zero."); 7501 return VF; 7502 } 7503 7504 VPlan &LoopVectorizationPlanner::getBestPlanFor(ElementCount VF) const { 7505 assert(count_if(VPlans, 7506 [VF](const VPlanPtr &Plan) { return Plan->hasVF(VF); }) == 7507 1 && 7508 "Best VF has not a single VPlan."); 7509 7510 for (const VPlanPtr &Plan : VPlans) { 7511 if (Plan->hasVF(VF)) 7512 return *Plan.get(); 7513 } 7514 llvm_unreachable("No plan found!"); 7515 } 7516 7517 static void AddRuntimeUnrollDisableMetaData(Loop *L) { 7518 SmallVector<Metadata *, 4> MDs; 7519 // Reserve first location for self reference to the LoopID metadata node. 7520 MDs.push_back(nullptr); 7521 bool IsUnrollMetadata = false; 7522 MDNode *LoopID = L->getLoopID(); 7523 if (LoopID) { 7524 // First find existing loop unrolling disable metadata. 7525 for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) { 7526 auto *MD = dyn_cast<MDNode>(LoopID->getOperand(i)); 7527 if (MD) { 7528 const auto *S = dyn_cast<MDString>(MD->getOperand(0)); 7529 IsUnrollMetadata = 7530 S && S->getString().startswith("llvm.loop.unroll.disable"); 7531 } 7532 MDs.push_back(LoopID->getOperand(i)); 7533 } 7534 } 7535 7536 if (!IsUnrollMetadata) { 7537 // Add runtime unroll disable metadata. 7538 LLVMContext &Context = L->getHeader()->getContext(); 7539 SmallVector<Metadata *, 1> DisableOperands; 7540 DisableOperands.push_back( 7541 MDString::get(Context, "llvm.loop.unroll.runtime.disable")); 7542 MDNode *DisableNode = MDNode::get(Context, DisableOperands); 7543 MDs.push_back(DisableNode); 7544 MDNode *NewLoopID = MDNode::get(Context, MDs); 7545 // Set operand 0 to refer to the loop id itself. 7546 NewLoopID->replaceOperandWith(0, NewLoopID); 7547 L->setLoopID(NewLoopID); 7548 } 7549 } 7550 7551 void LoopVectorizationPlanner::executePlan(ElementCount BestVF, unsigned BestUF, 7552 VPlan &BestVPlan, 7553 InnerLoopVectorizer &ILV, 7554 DominatorTree *DT, 7555 bool IsEpilogueVectorization) { 7556 LLVM_DEBUG(dbgs() << "Executing best plan with VF=" << BestVF << ", UF=" << BestUF 7557 << '\n'); 7558 7559 // Perform the actual loop transformation. 7560 7561 // 1. Set up the skeleton for vectorization, including vector pre-header and 7562 // middle block. The vector loop is created during VPlan execution. 7563 VPTransformState State{BestVF, BestUF, LI, DT, ILV.Builder, &ILV, &BestVPlan}; 7564 Value *CanonicalIVStartValue; 7565 std::tie(State.CFG.PrevBB, CanonicalIVStartValue) = 7566 ILV.createVectorizedLoopSkeleton(); 7567 7568 // Only use noalias metadata when using memory checks guaranteeing no overlap 7569 // across all iterations. 7570 const LoopAccessInfo *LAI = ILV.Legal->getLAI(); 7571 if (LAI && !LAI->getRuntimePointerChecking()->getChecks().empty() && 7572 !LAI->getRuntimePointerChecking()->getDiffChecks()) { 7573 7574 // We currently don't use LoopVersioning for the actual loop cloning but we 7575 // still use it to add the noalias metadata. 7576 // TODO: Find a better way to re-use LoopVersioning functionality to add 7577 // metadata. 7578 State.LVer = std::make_unique<LoopVersioning>( 7579 *LAI, LAI->getRuntimePointerChecking()->getChecks(), OrigLoop, LI, DT, 7580 PSE.getSE()); 7581 State.LVer->prepareNoAliasMetadata(); 7582 } 7583 7584 ILV.collectPoisonGeneratingRecipes(State); 7585 7586 ILV.printDebugTracesAtStart(); 7587 7588 //===------------------------------------------------===// 7589 // 7590 // Notice: any optimization or new instruction that go 7591 // into the code below should also be implemented in 7592 // the cost-model. 7593 // 7594 //===------------------------------------------------===// 7595 7596 // 2. Copy and widen instructions from the old loop into the new loop. 7597 BestVPlan.prepareToExecute(ILV.getOrCreateTripCount(nullptr), 7598 ILV.getOrCreateVectorTripCount(nullptr), 7599 CanonicalIVStartValue, State, 7600 IsEpilogueVectorization); 7601 7602 BestVPlan.execute(&State); 7603 7604 // Keep all loop hints from the original loop on the vector loop (we'll 7605 // replace the vectorizer-specific hints below). 7606 MDNode *OrigLoopID = OrigLoop->getLoopID(); 7607 7608 Optional<MDNode *> VectorizedLoopID = 7609 makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll, 7610 LLVMLoopVectorizeFollowupVectorized}); 7611 7612 VPBasicBlock *HeaderVPBB = 7613 BestVPlan.getVectorLoopRegion()->getEntryBasicBlock(); 7614 Loop *L = LI->getLoopFor(State.CFG.VPBB2IRBB[HeaderVPBB]); 7615 if (VectorizedLoopID) 7616 L->setLoopID(VectorizedLoopID.value()); 7617 else { 7618 // Keep all loop hints from the original loop on the vector loop (we'll 7619 // replace the vectorizer-specific hints below). 7620 if (MDNode *LID = OrigLoop->getLoopID()) 7621 L->setLoopID(LID); 7622 7623 LoopVectorizeHints Hints(L, true, *ORE); 7624 Hints.setAlreadyVectorized(); 7625 } 7626 // Disable runtime unrolling when vectorizing the epilogue loop. 7627 if (CanonicalIVStartValue) 7628 AddRuntimeUnrollDisableMetaData(L); 7629 7630 // 3. Fix the vectorized code: take care of header phi's, live-outs, 7631 // predication, updating analyses. 7632 ILV.fixVectorizedLoop(State, BestVPlan); 7633 7634 ILV.printDebugTracesAtEnd(); 7635 } 7636 7637 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 7638 void LoopVectorizationPlanner::printPlans(raw_ostream &O) { 7639 for (const auto &Plan : VPlans) 7640 if (PrintVPlansInDotFormat) 7641 Plan->printDOT(O); 7642 else 7643 Plan->print(O); 7644 } 7645 #endif 7646 7647 Value *InnerLoopUnroller::getBroadcastInstrs(Value *V) { return V; } 7648 7649 //===--------------------------------------------------------------------===// 7650 // EpilogueVectorizerMainLoop 7651 //===--------------------------------------------------------------------===// 7652 7653 /// This function is partially responsible for generating the control flow 7654 /// depicted in https://llvm.org/docs/Vectorizers.html#epilogue-vectorization. 7655 std::pair<BasicBlock *, Value *> 7656 EpilogueVectorizerMainLoop::createEpilogueVectorizedLoopSkeleton() { 7657 MDNode *OrigLoopID = OrigLoop->getLoopID(); 7658 7659 // Workaround! Compute the trip count of the original loop and cache it 7660 // before we start modifying the CFG. This code has a systemic problem 7661 // wherein it tries to run analysis over partially constructed IR; this is 7662 // wrong, and not simply for SCEV. The trip count of the original loop 7663 // simply happens to be prone to hitting this in practice. In theory, we 7664 // can hit the same issue for any SCEV, or ValueTracking query done during 7665 // mutation. See PR49900. 7666 getOrCreateTripCount(OrigLoop->getLoopPreheader()); 7667 createVectorLoopSkeleton(""); 7668 7669 // Generate the code to check the minimum iteration count of the vector 7670 // epilogue (see below). 7671 EPI.EpilogueIterationCountCheck = 7672 emitIterationCountCheck(LoopScalarPreHeader, true); 7673 EPI.EpilogueIterationCountCheck->setName("iter.check"); 7674 7675 // Generate the code to check any assumptions that we've made for SCEV 7676 // expressions. 7677 EPI.SCEVSafetyCheck = emitSCEVChecks(LoopScalarPreHeader); 7678 7679 // Generate the code that checks at runtime if arrays overlap. We put the 7680 // checks into a separate block to make the more common case of few elements 7681 // faster. 7682 EPI.MemSafetyCheck = emitMemRuntimeChecks(LoopScalarPreHeader); 7683 7684 // Generate the iteration count check for the main loop, *after* the check 7685 // for the epilogue loop, so that the path-length is shorter for the case 7686 // that goes directly through the vector epilogue. The longer-path length for 7687 // the main loop is compensated for, by the gain from vectorizing the larger 7688 // trip count. Note: the branch will get updated later on when we vectorize 7689 // the epilogue. 7690 EPI.MainLoopIterationCountCheck = 7691 emitIterationCountCheck(LoopScalarPreHeader, false); 7692 7693 // Generate the induction variable. 7694 EPI.VectorTripCount = getOrCreateVectorTripCount(LoopVectorPreHeader); 7695 7696 // Skip induction resume value creation here because they will be created in 7697 // the second pass. If we created them here, they wouldn't be used anyway, 7698 // because the vplan in the second pass still contains the inductions from the 7699 // original loop. 7700 7701 return {completeLoopSkeleton(OrigLoopID), nullptr}; 7702 } 7703 7704 void EpilogueVectorizerMainLoop::printDebugTracesAtStart() { 7705 LLVM_DEBUG({ 7706 dbgs() << "Create Skeleton for epilogue vectorized loop (first pass)\n" 7707 << "Main Loop VF:" << EPI.MainLoopVF 7708 << ", Main Loop UF:" << EPI.MainLoopUF 7709 << ", Epilogue Loop VF:" << EPI.EpilogueVF 7710 << ", Epilogue Loop UF:" << EPI.EpilogueUF << "\n"; 7711 }); 7712 } 7713 7714 void EpilogueVectorizerMainLoop::printDebugTracesAtEnd() { 7715 DEBUG_WITH_TYPE(VerboseDebug, { 7716 dbgs() << "intermediate fn:\n" 7717 << *OrigLoop->getHeader()->getParent() << "\n"; 7718 }); 7719 } 7720 7721 BasicBlock * 7722 EpilogueVectorizerMainLoop::emitIterationCountCheck(BasicBlock *Bypass, 7723 bool ForEpilogue) { 7724 assert(Bypass && "Expected valid bypass basic block."); 7725 ElementCount VFactor = ForEpilogue ? EPI.EpilogueVF : VF; 7726 unsigned UFactor = ForEpilogue ? EPI.EpilogueUF : UF; 7727 Value *Count = getOrCreateTripCount(LoopVectorPreHeader); 7728 // Reuse existing vector loop preheader for TC checks. 7729 // Note that new preheader block is generated for vector loop. 7730 BasicBlock *const TCCheckBlock = LoopVectorPreHeader; 7731 IRBuilder<> Builder(TCCheckBlock->getTerminator()); 7732 7733 // Generate code to check if the loop's trip count is less than VF * UF of the 7734 // main vector loop. 7735 auto P = Cost->requiresScalarEpilogue(ForEpilogue ? EPI.EpilogueVF : VF) ? 7736 ICmpInst::ICMP_ULE : ICmpInst::ICMP_ULT; 7737 7738 Value *CheckMinIters = Builder.CreateICmp( 7739 P, Count, createStepForVF(Builder, Count->getType(), VFactor, UFactor), 7740 "min.iters.check"); 7741 7742 if (!ForEpilogue) 7743 TCCheckBlock->setName("vector.main.loop.iter.check"); 7744 7745 // Create new preheader for vector loop. 7746 LoopVectorPreHeader = SplitBlock(TCCheckBlock, TCCheckBlock->getTerminator(), 7747 DT, LI, nullptr, "vector.ph"); 7748 7749 if (ForEpilogue) { 7750 assert(DT->properlyDominates(DT->getNode(TCCheckBlock), 7751 DT->getNode(Bypass)->getIDom()) && 7752 "TC check is expected to dominate Bypass"); 7753 7754 // Update dominator for Bypass & LoopExit. 7755 DT->changeImmediateDominator(Bypass, TCCheckBlock); 7756 if (!Cost->requiresScalarEpilogue(EPI.EpilogueVF)) 7757 // For loops with multiple exits, there's no edge from the middle block 7758 // to exit blocks (as the epilogue must run) and thus no need to update 7759 // the immediate dominator of the exit blocks. 7760 DT->changeImmediateDominator(LoopExitBlock, TCCheckBlock); 7761 7762 LoopBypassBlocks.push_back(TCCheckBlock); 7763 7764 // Save the trip count so we don't have to regenerate it in the 7765 // vec.epilog.iter.check. This is safe to do because the trip count 7766 // generated here dominates the vector epilog iter check. 7767 EPI.TripCount = Count; 7768 } 7769 7770 ReplaceInstWithInst( 7771 TCCheckBlock->getTerminator(), 7772 BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters)); 7773 7774 return TCCheckBlock; 7775 } 7776 7777 //===--------------------------------------------------------------------===// 7778 // EpilogueVectorizerEpilogueLoop 7779 //===--------------------------------------------------------------------===// 7780 7781 /// This function is partially responsible for generating the control flow 7782 /// depicted in https://llvm.org/docs/Vectorizers.html#epilogue-vectorization. 7783 std::pair<BasicBlock *, Value *> 7784 EpilogueVectorizerEpilogueLoop::createEpilogueVectorizedLoopSkeleton() { 7785 MDNode *OrigLoopID = OrigLoop->getLoopID(); 7786 createVectorLoopSkeleton("vec.epilog."); 7787 7788 // Now, compare the remaining count and if there aren't enough iterations to 7789 // execute the vectorized epilogue skip to the scalar part. 7790 BasicBlock *VecEpilogueIterationCountCheck = LoopVectorPreHeader; 7791 VecEpilogueIterationCountCheck->setName("vec.epilog.iter.check"); 7792 LoopVectorPreHeader = 7793 SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT, 7794 LI, nullptr, "vec.epilog.ph"); 7795 emitMinimumVectorEpilogueIterCountCheck(LoopScalarPreHeader, 7796 VecEpilogueIterationCountCheck); 7797 7798 // Adjust the control flow taking the state info from the main loop 7799 // vectorization into account. 7800 assert(EPI.MainLoopIterationCountCheck && EPI.EpilogueIterationCountCheck && 7801 "expected this to be saved from the previous pass."); 7802 EPI.MainLoopIterationCountCheck->getTerminator()->replaceUsesOfWith( 7803 VecEpilogueIterationCountCheck, LoopVectorPreHeader); 7804 7805 DT->changeImmediateDominator(LoopVectorPreHeader, 7806 EPI.MainLoopIterationCountCheck); 7807 7808 EPI.EpilogueIterationCountCheck->getTerminator()->replaceUsesOfWith( 7809 VecEpilogueIterationCountCheck, LoopScalarPreHeader); 7810 7811 if (EPI.SCEVSafetyCheck) 7812 EPI.SCEVSafetyCheck->getTerminator()->replaceUsesOfWith( 7813 VecEpilogueIterationCountCheck, LoopScalarPreHeader); 7814 if (EPI.MemSafetyCheck) 7815 EPI.MemSafetyCheck->getTerminator()->replaceUsesOfWith( 7816 VecEpilogueIterationCountCheck, LoopScalarPreHeader); 7817 7818 DT->changeImmediateDominator( 7819 VecEpilogueIterationCountCheck, 7820 VecEpilogueIterationCountCheck->getSinglePredecessor()); 7821 7822 DT->changeImmediateDominator(LoopScalarPreHeader, 7823 EPI.EpilogueIterationCountCheck); 7824 if (!Cost->requiresScalarEpilogue(EPI.EpilogueVF)) 7825 // If there is an epilogue which must run, there's no edge from the 7826 // middle block to exit blocks and thus no need to update the immediate 7827 // dominator of the exit blocks. 7828 DT->changeImmediateDominator(LoopExitBlock, 7829 EPI.EpilogueIterationCountCheck); 7830 7831 // Keep track of bypass blocks, as they feed start values to the induction 7832 // phis in the scalar loop preheader. 7833 if (EPI.SCEVSafetyCheck) 7834 LoopBypassBlocks.push_back(EPI.SCEVSafetyCheck); 7835 if (EPI.MemSafetyCheck) 7836 LoopBypassBlocks.push_back(EPI.MemSafetyCheck); 7837 LoopBypassBlocks.push_back(EPI.EpilogueIterationCountCheck); 7838 7839 // The vec.epilog.iter.check block may contain Phi nodes from reductions which 7840 // merge control-flow from the latch block and the middle block. Update the 7841 // incoming values here and move the Phi into the preheader. 7842 SmallVector<PHINode *, 4> PhisInBlock; 7843 for (PHINode &Phi : VecEpilogueIterationCountCheck->phis()) 7844 PhisInBlock.push_back(&Phi); 7845 7846 for (PHINode *Phi : PhisInBlock) { 7847 Phi->replaceIncomingBlockWith( 7848 VecEpilogueIterationCountCheck->getSinglePredecessor(), 7849 VecEpilogueIterationCountCheck); 7850 Phi->removeIncomingValue(EPI.EpilogueIterationCountCheck); 7851 if (EPI.SCEVSafetyCheck) 7852 Phi->removeIncomingValue(EPI.SCEVSafetyCheck); 7853 if (EPI.MemSafetyCheck) 7854 Phi->removeIncomingValue(EPI.MemSafetyCheck); 7855 Phi->moveBefore(LoopVectorPreHeader->getFirstNonPHI()); 7856 } 7857 7858 // Generate a resume induction for the vector epilogue and put it in the 7859 // vector epilogue preheader 7860 Type *IdxTy = Legal->getWidestInductionType(); 7861 PHINode *EPResumeVal = PHINode::Create(IdxTy, 2, "vec.epilog.resume.val", 7862 LoopVectorPreHeader->getFirstNonPHI()); 7863 EPResumeVal->addIncoming(EPI.VectorTripCount, VecEpilogueIterationCountCheck); 7864 EPResumeVal->addIncoming(ConstantInt::get(IdxTy, 0), 7865 EPI.MainLoopIterationCountCheck); 7866 7867 // Generate induction resume values. These variables save the new starting 7868 // indexes for the scalar loop. They are used to test if there are any tail 7869 // iterations left once the vector loop has completed. 7870 // Note that when the vectorized epilogue is skipped due to iteration count 7871 // check, then the resume value for the induction variable comes from 7872 // the trip count of the main vector loop, hence passing the AdditionalBypass 7873 // argument. 7874 createInductionResumeValues({VecEpilogueIterationCountCheck, 7875 EPI.VectorTripCount} /* AdditionalBypass */); 7876 7877 return {completeLoopSkeleton(OrigLoopID), EPResumeVal}; 7878 } 7879 7880 BasicBlock * 7881 EpilogueVectorizerEpilogueLoop::emitMinimumVectorEpilogueIterCountCheck( 7882 BasicBlock *Bypass, BasicBlock *Insert) { 7883 7884 assert(EPI.TripCount && 7885 "Expected trip count to have been safed in the first pass."); 7886 assert( 7887 (!isa<Instruction>(EPI.TripCount) || 7888 DT->dominates(cast<Instruction>(EPI.TripCount)->getParent(), Insert)) && 7889 "saved trip count does not dominate insertion point."); 7890 Value *TC = EPI.TripCount; 7891 IRBuilder<> Builder(Insert->getTerminator()); 7892 Value *Count = Builder.CreateSub(TC, EPI.VectorTripCount, "n.vec.remaining"); 7893 7894 // Generate code to check if the loop's trip count is less than VF * UF of the 7895 // vector epilogue loop. 7896 auto P = Cost->requiresScalarEpilogue(EPI.EpilogueVF) ? 7897 ICmpInst::ICMP_ULE : ICmpInst::ICMP_ULT; 7898 7899 Value *CheckMinIters = 7900 Builder.CreateICmp(P, Count, 7901 createStepForVF(Builder, Count->getType(), 7902 EPI.EpilogueVF, EPI.EpilogueUF), 7903 "min.epilog.iters.check"); 7904 7905 ReplaceInstWithInst( 7906 Insert->getTerminator(), 7907 BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters)); 7908 7909 LoopBypassBlocks.push_back(Insert); 7910 return Insert; 7911 } 7912 7913 void EpilogueVectorizerEpilogueLoop::printDebugTracesAtStart() { 7914 LLVM_DEBUG({ 7915 dbgs() << "Create Skeleton for epilogue vectorized loop (second pass)\n" 7916 << "Epilogue Loop VF:" << EPI.EpilogueVF 7917 << ", Epilogue Loop UF:" << EPI.EpilogueUF << "\n"; 7918 }); 7919 } 7920 7921 void EpilogueVectorizerEpilogueLoop::printDebugTracesAtEnd() { 7922 DEBUG_WITH_TYPE(VerboseDebug, { 7923 dbgs() << "final fn:\n" << *OrigLoop->getHeader()->getParent() << "\n"; 7924 }); 7925 } 7926 7927 bool LoopVectorizationPlanner::getDecisionAndClampRange( 7928 const std::function<bool(ElementCount)> &Predicate, VFRange &Range) { 7929 assert(!Range.isEmpty() && "Trying to test an empty VF range."); 7930 bool PredicateAtRangeStart = Predicate(Range.Start); 7931 7932 for (ElementCount TmpVF = Range.Start * 2; 7933 ElementCount::isKnownLT(TmpVF, Range.End); TmpVF *= 2) 7934 if (Predicate(TmpVF) != PredicateAtRangeStart) { 7935 Range.End = TmpVF; 7936 break; 7937 } 7938 7939 return PredicateAtRangeStart; 7940 } 7941 7942 /// Build VPlans for the full range of feasible VF's = {\p MinVF, 2 * \p MinVF, 7943 /// 4 * \p MinVF, ..., \p MaxVF} by repeatedly building a VPlan for a sub-range 7944 /// of VF's starting at a given VF and extending it as much as possible. Each 7945 /// vectorization decision can potentially shorten this sub-range during 7946 /// buildVPlan(). 7947 void LoopVectorizationPlanner::buildVPlans(ElementCount MinVF, 7948 ElementCount MaxVF) { 7949 auto MaxVFPlusOne = MaxVF.getWithIncrement(1); 7950 for (ElementCount VF = MinVF; ElementCount::isKnownLT(VF, MaxVFPlusOne);) { 7951 VFRange SubRange = {VF, MaxVFPlusOne}; 7952 VPlans.push_back(buildVPlan(SubRange)); 7953 VF = SubRange.End; 7954 } 7955 } 7956 7957 VPValue *VPRecipeBuilder::createEdgeMask(BasicBlock *Src, BasicBlock *Dst, 7958 VPlanPtr &Plan) { 7959 assert(is_contained(predecessors(Dst), Src) && "Invalid edge"); 7960 7961 // Look for cached value. 7962 std::pair<BasicBlock *, BasicBlock *> Edge(Src, Dst); 7963 EdgeMaskCacheTy::iterator ECEntryIt = EdgeMaskCache.find(Edge); 7964 if (ECEntryIt != EdgeMaskCache.end()) 7965 return ECEntryIt->second; 7966 7967 VPValue *SrcMask = createBlockInMask(Src, Plan); 7968 7969 // The terminator has to be a branch inst! 7970 BranchInst *BI = dyn_cast<BranchInst>(Src->getTerminator()); 7971 assert(BI && "Unexpected terminator found"); 7972 7973 if (!BI->isConditional() || BI->getSuccessor(0) == BI->getSuccessor(1)) 7974 return EdgeMaskCache[Edge] = SrcMask; 7975 7976 // If source is an exiting block, we know the exit edge is dynamically dead 7977 // in the vector loop, and thus we don't need to restrict the mask. Avoid 7978 // adding uses of an otherwise potentially dead instruction. 7979 if (OrigLoop->isLoopExiting(Src)) 7980 return EdgeMaskCache[Edge] = SrcMask; 7981 7982 VPValue *EdgeMask = Plan->getOrAddVPValue(BI->getCondition()); 7983 assert(EdgeMask && "No Edge Mask found for condition"); 7984 7985 if (BI->getSuccessor(0) != Dst) 7986 EdgeMask = Builder.createNot(EdgeMask, BI->getDebugLoc()); 7987 7988 if (SrcMask) { // Otherwise block in-mask is all-one, no need to AND. 7989 // The condition is 'SrcMask && EdgeMask', which is equivalent to 7990 // 'select i1 SrcMask, i1 EdgeMask, i1 false'. 7991 // The select version does not introduce new UB if SrcMask is false and 7992 // EdgeMask is poison. Using 'and' here introduces undefined behavior. 7993 VPValue *False = Plan->getOrAddVPValue( 7994 ConstantInt::getFalse(BI->getCondition()->getType())); 7995 EdgeMask = 7996 Builder.createSelect(SrcMask, EdgeMask, False, BI->getDebugLoc()); 7997 } 7998 7999 return EdgeMaskCache[Edge] = EdgeMask; 8000 } 8001 8002 VPValue *VPRecipeBuilder::createBlockInMask(BasicBlock *BB, VPlanPtr &Plan) { 8003 assert(OrigLoop->contains(BB) && "Block is not a part of a loop"); 8004 8005 // Look for cached value. 8006 BlockMaskCacheTy::iterator BCEntryIt = BlockMaskCache.find(BB); 8007 if (BCEntryIt != BlockMaskCache.end()) 8008 return BCEntryIt->second; 8009 8010 // All-one mask is modelled as no-mask following the convention for masked 8011 // load/store/gather/scatter. Initialize BlockMask to no-mask. 8012 VPValue *BlockMask = nullptr; 8013 8014 if (OrigLoop->getHeader() == BB) { 8015 if (!CM.blockNeedsPredicationForAnyReason(BB)) 8016 return BlockMaskCache[BB] = BlockMask; // Loop incoming mask is all-one. 8017 8018 assert(CM.foldTailByMasking() && "must fold the tail"); 8019 8020 // If we're using the active lane mask for control flow, then we get the 8021 // mask from the active lane mask PHI that is cached in the VPlan. 8022 PredicationStyle EmitGetActiveLaneMask = CM.TTI.emitGetActiveLaneMask(); 8023 if (EmitGetActiveLaneMask == PredicationStyle::DataAndControlFlow) 8024 return BlockMaskCache[BB] = Plan->getActiveLaneMaskPhi(); 8025 8026 // Introduce the early-exit compare IV <= BTC to form header block mask. 8027 // This is used instead of IV < TC because TC may wrap, unlike BTC. Start by 8028 // constructing the desired canonical IV in the header block as its first 8029 // non-phi instructions. 8030 8031 VPBasicBlock *HeaderVPBB = 8032 Plan->getVectorLoopRegion()->getEntryBasicBlock(); 8033 auto NewInsertionPoint = HeaderVPBB->getFirstNonPhi(); 8034 auto *IV = new VPWidenCanonicalIVRecipe(Plan->getCanonicalIV()); 8035 HeaderVPBB->insert(IV, HeaderVPBB->getFirstNonPhi()); 8036 8037 VPBuilder::InsertPointGuard Guard(Builder); 8038 Builder.setInsertPoint(HeaderVPBB, NewInsertionPoint); 8039 if (EmitGetActiveLaneMask != PredicationStyle::None) { 8040 VPValue *TC = Plan->getOrCreateTripCount(); 8041 BlockMask = Builder.createNaryOp(VPInstruction::ActiveLaneMask, {IV, TC}, 8042 nullptr, "active.lane.mask"); 8043 } else { 8044 VPValue *BTC = Plan->getOrCreateBackedgeTakenCount(); 8045 BlockMask = Builder.createNaryOp(VPInstruction::ICmpULE, {IV, BTC}); 8046 } 8047 return BlockMaskCache[BB] = BlockMask; 8048 } 8049 8050 // This is the block mask. We OR all incoming edges. 8051 for (auto *Predecessor : predecessors(BB)) { 8052 VPValue *EdgeMask = createEdgeMask(Predecessor, BB, Plan); 8053 if (!EdgeMask) // Mask of predecessor is all-one so mask of block is too. 8054 return BlockMaskCache[BB] = EdgeMask; 8055 8056 if (!BlockMask) { // BlockMask has its initialized nullptr value. 8057 BlockMask = EdgeMask; 8058 continue; 8059 } 8060 8061 BlockMask = Builder.createOr(BlockMask, EdgeMask, {}); 8062 } 8063 8064 return BlockMaskCache[BB] = BlockMask; 8065 } 8066 8067 VPRecipeBase *VPRecipeBuilder::tryToWidenMemory(Instruction *I, 8068 ArrayRef<VPValue *> Operands, 8069 VFRange &Range, 8070 VPlanPtr &Plan) { 8071 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) && 8072 "Must be called with either a load or store"); 8073 8074 auto willWiden = [&](ElementCount VF) -> bool { 8075 LoopVectorizationCostModel::InstWidening Decision = 8076 CM.getWideningDecision(I, VF); 8077 assert(Decision != LoopVectorizationCostModel::CM_Unknown && 8078 "CM decision should be taken at this point."); 8079 if (Decision == LoopVectorizationCostModel::CM_Interleave) 8080 return true; 8081 if (CM.isScalarAfterVectorization(I, VF) || 8082 CM.isProfitableToScalarize(I, VF)) 8083 return false; 8084 return Decision != LoopVectorizationCostModel::CM_Scalarize; 8085 }; 8086 8087 if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range)) 8088 return nullptr; 8089 8090 VPValue *Mask = nullptr; 8091 if (Legal->isMaskRequired(I)) 8092 Mask = createBlockInMask(I->getParent(), Plan); 8093 8094 // Determine if the pointer operand of the access is either consecutive or 8095 // reverse consecutive. 8096 LoopVectorizationCostModel::InstWidening Decision = 8097 CM.getWideningDecision(I, Range.Start); 8098 bool Reverse = Decision == LoopVectorizationCostModel::CM_Widen_Reverse; 8099 bool Consecutive = 8100 Reverse || Decision == LoopVectorizationCostModel::CM_Widen; 8101 8102 if (LoadInst *Load = dyn_cast<LoadInst>(I)) 8103 return new VPWidenMemoryInstructionRecipe(*Load, Operands[0], Mask, 8104 Consecutive, Reverse); 8105 8106 StoreInst *Store = cast<StoreInst>(I); 8107 return new VPWidenMemoryInstructionRecipe(*Store, Operands[1], Operands[0], 8108 Mask, Consecutive, Reverse); 8109 } 8110 8111 /// Creates a VPWidenIntOrFpInductionRecpipe for \p Phi. If needed, it will also 8112 /// insert a recipe to expand the step for the induction recipe. 8113 static VPWidenIntOrFpInductionRecipe *createWidenInductionRecipes( 8114 PHINode *Phi, Instruction *PhiOrTrunc, VPValue *Start, 8115 const InductionDescriptor &IndDesc, LoopVectorizationCostModel &CM, 8116 VPlan &Plan, ScalarEvolution &SE, Loop &OrigLoop, VFRange &Range) { 8117 // Returns true if an instruction \p I should be scalarized instead of 8118 // vectorized for the chosen vectorization factor. 8119 auto ShouldScalarizeInstruction = [&CM](Instruction *I, ElementCount VF) { 8120 return CM.isScalarAfterVectorization(I, VF) || 8121 CM.isProfitableToScalarize(I, VF); 8122 }; 8123 8124 bool NeedsScalarIVOnly = LoopVectorizationPlanner::getDecisionAndClampRange( 8125 [&](ElementCount VF) { 8126 return ShouldScalarizeInstruction(PhiOrTrunc, VF); 8127 }, 8128 Range); 8129 assert(IndDesc.getStartValue() == 8130 Phi->getIncomingValueForBlock(OrigLoop.getLoopPreheader())); 8131 assert(SE.isLoopInvariant(IndDesc.getStep(), &OrigLoop) && 8132 "step must be loop invariant"); 8133 8134 VPValue *Step = 8135 vputils::getOrCreateVPValueForSCEVExpr(Plan, IndDesc.getStep(), SE); 8136 if (auto *TruncI = dyn_cast<TruncInst>(PhiOrTrunc)) { 8137 return new VPWidenIntOrFpInductionRecipe(Phi, Start, Step, IndDesc, TruncI, 8138 !NeedsScalarIVOnly); 8139 } 8140 assert(isa<PHINode>(PhiOrTrunc) && "must be a phi node here"); 8141 return new VPWidenIntOrFpInductionRecipe(Phi, Start, Step, IndDesc, 8142 !NeedsScalarIVOnly); 8143 } 8144 8145 VPRecipeBase *VPRecipeBuilder::tryToOptimizeInductionPHI( 8146 PHINode *Phi, ArrayRef<VPValue *> Operands, VPlan &Plan, VFRange &Range) { 8147 8148 // Check if this is an integer or fp induction. If so, build the recipe that 8149 // produces its scalar and vector values. 8150 if (auto *II = Legal->getIntOrFpInductionDescriptor(Phi)) 8151 return createWidenInductionRecipes(Phi, Phi, Operands[0], *II, CM, Plan, 8152 *PSE.getSE(), *OrigLoop, Range); 8153 8154 // Check if this is pointer induction. If so, build the recipe for it. 8155 if (auto *II = Legal->getPointerInductionDescriptor(Phi)) 8156 return new VPWidenPointerInductionRecipe(Phi, Operands[0], *II, 8157 *PSE.getSE()); 8158 return nullptr; 8159 } 8160 8161 VPWidenIntOrFpInductionRecipe *VPRecipeBuilder::tryToOptimizeInductionTruncate( 8162 TruncInst *I, ArrayRef<VPValue *> Operands, VFRange &Range, VPlan &Plan) { 8163 // Optimize the special case where the source is a constant integer 8164 // induction variable. Notice that we can only optimize the 'trunc' case 8165 // because (a) FP conversions lose precision, (b) sext/zext may wrap, and 8166 // (c) other casts depend on pointer size. 8167 8168 // Determine whether \p K is a truncation based on an induction variable that 8169 // can be optimized. 8170 auto isOptimizableIVTruncate = 8171 [&](Instruction *K) -> std::function<bool(ElementCount)> { 8172 return [=](ElementCount VF) -> bool { 8173 return CM.isOptimizableIVTruncate(K, VF); 8174 }; 8175 }; 8176 8177 if (LoopVectorizationPlanner::getDecisionAndClampRange( 8178 isOptimizableIVTruncate(I), Range)) { 8179 8180 auto *Phi = cast<PHINode>(I->getOperand(0)); 8181 const InductionDescriptor &II = *Legal->getIntOrFpInductionDescriptor(Phi); 8182 VPValue *Start = Plan.getOrAddVPValue(II.getStartValue()); 8183 return createWidenInductionRecipes(Phi, I, Start, II, CM, Plan, 8184 *PSE.getSE(), *OrigLoop, Range); 8185 } 8186 return nullptr; 8187 } 8188 8189 VPRecipeOrVPValueTy VPRecipeBuilder::tryToBlend(PHINode *Phi, 8190 ArrayRef<VPValue *> Operands, 8191 VPlanPtr &Plan) { 8192 // If all incoming values are equal, the incoming VPValue can be used directly 8193 // instead of creating a new VPBlendRecipe. 8194 VPValue *FirstIncoming = Operands[0]; 8195 if (all_of(Operands, [FirstIncoming](const VPValue *Inc) { 8196 return FirstIncoming == Inc; 8197 })) { 8198 return Operands[0]; 8199 } 8200 8201 unsigned NumIncoming = Phi->getNumIncomingValues(); 8202 // For in-loop reductions, we do not need to create an additional select. 8203 VPValue *InLoopVal = nullptr; 8204 for (unsigned In = 0; In < NumIncoming; In++) { 8205 PHINode *PhiOp = 8206 dyn_cast_or_null<PHINode>(Operands[In]->getUnderlyingValue()); 8207 if (PhiOp && CM.isInLoopReduction(PhiOp)) { 8208 assert(!InLoopVal && "Found more than one in-loop reduction!"); 8209 InLoopVal = Operands[In]; 8210 } 8211 } 8212 8213 assert((!InLoopVal || NumIncoming == 2) && 8214 "Found an in-loop reduction for PHI with unexpected number of " 8215 "incoming values"); 8216 if (InLoopVal) 8217 return Operands[Operands[0] == InLoopVal ? 1 : 0]; 8218 8219 // We know that all PHIs in non-header blocks are converted into selects, so 8220 // we don't have to worry about the insertion order and we can just use the 8221 // builder. At this point we generate the predication tree. There may be 8222 // duplications since this is a simple recursive scan, but future 8223 // optimizations will clean it up. 8224 SmallVector<VPValue *, 2> OperandsWithMask; 8225 8226 for (unsigned In = 0; In < NumIncoming; In++) { 8227 VPValue *EdgeMask = 8228 createEdgeMask(Phi->getIncomingBlock(In), Phi->getParent(), Plan); 8229 assert((EdgeMask || NumIncoming == 1) && 8230 "Multiple predecessors with one having a full mask"); 8231 OperandsWithMask.push_back(Operands[In]); 8232 if (EdgeMask) 8233 OperandsWithMask.push_back(EdgeMask); 8234 } 8235 return toVPRecipeResult(new VPBlendRecipe(Phi, OperandsWithMask)); 8236 } 8237 8238 VPWidenCallRecipe *VPRecipeBuilder::tryToWidenCall(CallInst *CI, 8239 ArrayRef<VPValue *> Operands, 8240 VFRange &Range) const { 8241 8242 bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange( 8243 [this, CI](ElementCount VF) { 8244 return CM.isScalarWithPredication(CI, VF); 8245 }, 8246 Range); 8247 8248 if (IsPredicated) 8249 return nullptr; 8250 8251 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 8252 if (ID && (ID == Intrinsic::assume || ID == Intrinsic::lifetime_end || 8253 ID == Intrinsic::lifetime_start || ID == Intrinsic::sideeffect || 8254 ID == Intrinsic::pseudoprobe || 8255 ID == Intrinsic::experimental_noalias_scope_decl)) 8256 return nullptr; 8257 8258 auto willWiden = [&](ElementCount VF) -> bool { 8259 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); 8260 // The following case may be scalarized depending on the VF. 8261 // The flag shows whether we use Intrinsic or a usual Call for vectorized 8262 // version of the instruction. 8263 // Is it beneficial to perform intrinsic call compared to lib call? 8264 bool NeedToScalarize = false; 8265 InstructionCost CallCost = CM.getVectorCallCost(CI, VF, NeedToScalarize); 8266 InstructionCost IntrinsicCost = ID ? CM.getVectorIntrinsicCost(CI, VF) : 0; 8267 bool UseVectorIntrinsic = ID && IntrinsicCost <= CallCost; 8268 return UseVectorIntrinsic || !NeedToScalarize; 8269 }; 8270 8271 if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range)) 8272 return nullptr; 8273 8274 ArrayRef<VPValue *> Ops = Operands.take_front(CI->arg_size()); 8275 return new VPWidenCallRecipe(*CI, make_range(Ops.begin(), Ops.end())); 8276 } 8277 8278 bool VPRecipeBuilder::shouldWiden(Instruction *I, VFRange &Range) const { 8279 assert(!isa<BranchInst>(I) && !isa<PHINode>(I) && !isa<LoadInst>(I) && 8280 !isa<StoreInst>(I) && "Instruction should have been handled earlier"); 8281 // Instruction should be widened, unless it is scalar after vectorization, 8282 // scalarization is profitable or it is predicated. 8283 auto WillScalarize = [this, I](ElementCount VF) -> bool { 8284 return CM.isScalarAfterVectorization(I, VF) || 8285 CM.isProfitableToScalarize(I, VF) || 8286 CM.isScalarWithPredication(I, VF); 8287 }; 8288 return !LoopVectorizationPlanner::getDecisionAndClampRange(WillScalarize, 8289 Range); 8290 } 8291 8292 VPWidenRecipe *VPRecipeBuilder::tryToWiden(Instruction *I, 8293 ArrayRef<VPValue *> Operands) const { 8294 auto IsVectorizableOpcode = [](unsigned Opcode) { 8295 switch (Opcode) { 8296 case Instruction::Add: 8297 case Instruction::And: 8298 case Instruction::AShr: 8299 case Instruction::BitCast: 8300 case Instruction::FAdd: 8301 case Instruction::FCmp: 8302 case Instruction::FDiv: 8303 case Instruction::FMul: 8304 case Instruction::FNeg: 8305 case Instruction::FPExt: 8306 case Instruction::FPToSI: 8307 case Instruction::FPToUI: 8308 case Instruction::FPTrunc: 8309 case Instruction::FRem: 8310 case Instruction::FSub: 8311 case Instruction::ICmp: 8312 case Instruction::IntToPtr: 8313 case Instruction::LShr: 8314 case Instruction::Mul: 8315 case Instruction::Or: 8316 case Instruction::PtrToInt: 8317 case Instruction::SDiv: 8318 case Instruction::Select: 8319 case Instruction::SExt: 8320 case Instruction::Shl: 8321 case Instruction::SIToFP: 8322 case Instruction::SRem: 8323 case Instruction::Sub: 8324 case Instruction::Trunc: 8325 case Instruction::UDiv: 8326 case Instruction::UIToFP: 8327 case Instruction::URem: 8328 case Instruction::Xor: 8329 case Instruction::ZExt: 8330 case Instruction::Freeze: 8331 return true; 8332 } 8333 return false; 8334 }; 8335 8336 if (!IsVectorizableOpcode(I->getOpcode())) 8337 return nullptr; 8338 8339 // Success: widen this instruction. 8340 return new VPWidenRecipe(*I, make_range(Operands.begin(), Operands.end())); 8341 } 8342 8343 void VPRecipeBuilder::fixHeaderPhis() { 8344 BasicBlock *OrigLatch = OrigLoop->getLoopLatch(); 8345 for (VPHeaderPHIRecipe *R : PhisToFix) { 8346 auto *PN = cast<PHINode>(R->getUnderlyingValue()); 8347 VPRecipeBase *IncR = 8348 getRecipe(cast<Instruction>(PN->getIncomingValueForBlock(OrigLatch))); 8349 R->addOperand(IncR->getVPSingleValue()); 8350 } 8351 } 8352 8353 VPBasicBlock *VPRecipeBuilder::handleReplication( 8354 Instruction *I, VFRange &Range, VPBasicBlock *VPBB, 8355 VPlanPtr &Plan) { 8356 bool IsUniform = LoopVectorizationPlanner::getDecisionAndClampRange( 8357 [&](ElementCount VF) { return CM.isUniformAfterVectorization(I, VF); }, 8358 Range); 8359 8360 bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange( 8361 [&](ElementCount VF) { return CM.isPredicatedInst(I, VF); }, 8362 Range); 8363 8364 // Even if the instruction is not marked as uniform, there are certain 8365 // intrinsic calls that can be effectively treated as such, so we check for 8366 // them here. Conservatively, we only do this for scalable vectors, since 8367 // for fixed-width VFs we can always fall back on full scalarization. 8368 if (!IsUniform && Range.Start.isScalable() && isa<IntrinsicInst>(I)) { 8369 switch (cast<IntrinsicInst>(I)->getIntrinsicID()) { 8370 case Intrinsic::assume: 8371 case Intrinsic::lifetime_start: 8372 case Intrinsic::lifetime_end: 8373 // For scalable vectors if one of the operands is variant then we still 8374 // want to mark as uniform, which will generate one instruction for just 8375 // the first lane of the vector. We can't scalarize the call in the same 8376 // way as for fixed-width vectors because we don't know how many lanes 8377 // there are. 8378 // 8379 // The reasons for doing it this way for scalable vectors are: 8380 // 1. For the assume intrinsic generating the instruction for the first 8381 // lane is still be better than not generating any at all. For 8382 // example, the input may be a splat across all lanes. 8383 // 2. For the lifetime start/end intrinsics the pointer operand only 8384 // does anything useful when the input comes from a stack object, 8385 // which suggests it should always be uniform. For non-stack objects 8386 // the effect is to poison the object, which still allows us to 8387 // remove the call. 8388 IsUniform = true; 8389 break; 8390 default: 8391 break; 8392 } 8393 } 8394 8395 auto *Recipe = new VPReplicateRecipe(I, Plan->mapToVPValues(I->operands()), 8396 IsUniform, IsPredicated); 8397 8398 // Find if I uses a predicated instruction. If so, it will use its scalar 8399 // value. Avoid hoisting the insert-element which packs the scalar value into 8400 // a vector value, as that happens iff all users use the vector value. 8401 for (VPValue *Op : Recipe->operands()) { 8402 auto *PredR = dyn_cast_or_null<VPPredInstPHIRecipe>(Op->getDef()); 8403 if (!PredR) 8404 continue; 8405 auto *RepR = 8406 cast_or_null<VPReplicateRecipe>(PredR->getOperand(0)->getDef()); 8407 assert(RepR->isPredicated() && 8408 "expected Replicate recipe to be predicated"); 8409 RepR->setAlsoPack(false); 8410 } 8411 8412 // Finalize the recipe for Instr, first if it is not predicated. 8413 if (!IsPredicated) { 8414 LLVM_DEBUG(dbgs() << "LV: Scalarizing:" << *I << "\n"); 8415 setRecipe(I, Recipe); 8416 Plan->addVPValue(I, Recipe); 8417 VPBB->appendRecipe(Recipe); 8418 return VPBB; 8419 } 8420 LLVM_DEBUG(dbgs() << "LV: Scalarizing and predicating:" << *I << "\n"); 8421 8422 VPBlockBase *SingleSucc = VPBB->getSingleSuccessor(); 8423 assert(SingleSucc && "VPBB must have a single successor when handling " 8424 "predicated replication."); 8425 VPBlockUtils::disconnectBlocks(VPBB, SingleSucc); 8426 // Record predicated instructions for above packing optimizations. 8427 VPBlockBase *Region = createReplicateRegion(Recipe, Plan); 8428 VPBlockUtils::insertBlockAfter(Region, VPBB); 8429 auto *RegSucc = new VPBasicBlock(); 8430 VPBlockUtils::insertBlockAfter(RegSucc, Region); 8431 VPBlockUtils::connectBlocks(RegSucc, SingleSucc); 8432 return RegSucc; 8433 } 8434 8435 VPRegionBlock * 8436 VPRecipeBuilder::createReplicateRegion(VPReplicateRecipe *PredRecipe, 8437 VPlanPtr &Plan) { 8438 Instruction *Instr = PredRecipe->getUnderlyingInstr(); 8439 // Instructions marked for predication are replicated and placed under an 8440 // if-then construct to prevent side-effects. 8441 // Generate recipes to compute the block mask for this region. 8442 VPValue *BlockInMask = createBlockInMask(Instr->getParent(), Plan); 8443 8444 // Build the triangular if-then region. 8445 std::string RegionName = (Twine("pred.") + Instr->getOpcodeName()).str(); 8446 assert(Instr->getParent() && "Predicated instruction not in any basic block"); 8447 auto *BOMRecipe = new VPBranchOnMaskRecipe(BlockInMask); 8448 auto *Entry = new VPBasicBlock(Twine(RegionName) + ".entry", BOMRecipe); 8449 auto *PHIRecipe = Instr->getType()->isVoidTy() 8450 ? nullptr 8451 : new VPPredInstPHIRecipe(PredRecipe); 8452 if (PHIRecipe) { 8453 setRecipe(Instr, PHIRecipe); 8454 Plan->addVPValue(Instr, PHIRecipe); 8455 } else { 8456 setRecipe(Instr, PredRecipe); 8457 Plan->addVPValue(Instr, PredRecipe); 8458 } 8459 8460 auto *Exiting = new VPBasicBlock(Twine(RegionName) + ".continue", PHIRecipe); 8461 auto *Pred = new VPBasicBlock(Twine(RegionName) + ".if", PredRecipe); 8462 VPRegionBlock *Region = new VPRegionBlock(Entry, Exiting, RegionName, true); 8463 8464 // Note: first set Entry as region entry and then connect successors starting 8465 // from it in order, to propagate the "parent" of each VPBasicBlock. 8466 VPBlockUtils::insertTwoBlocksAfter(Pred, Exiting, Entry); 8467 VPBlockUtils::connectBlocks(Pred, Exiting); 8468 8469 return Region; 8470 } 8471 8472 VPRecipeOrVPValueTy 8473 VPRecipeBuilder::tryToCreateWidenRecipe(Instruction *Instr, 8474 ArrayRef<VPValue *> Operands, 8475 VFRange &Range, VPlanPtr &Plan) { 8476 // First, check for specific widening recipes that deal with inductions, Phi 8477 // nodes, calls and memory operations. 8478 VPRecipeBase *Recipe; 8479 if (auto Phi = dyn_cast<PHINode>(Instr)) { 8480 if (Phi->getParent() != OrigLoop->getHeader()) 8481 return tryToBlend(Phi, Operands, Plan); 8482 if ((Recipe = tryToOptimizeInductionPHI(Phi, Operands, *Plan, Range))) 8483 return toVPRecipeResult(Recipe); 8484 8485 VPHeaderPHIRecipe *PhiRecipe = nullptr; 8486 assert((Legal->isReductionVariable(Phi) || 8487 Legal->isFirstOrderRecurrence(Phi)) && 8488 "can only widen reductions and first-order recurrences here"); 8489 VPValue *StartV = Operands[0]; 8490 if (Legal->isReductionVariable(Phi)) { 8491 const RecurrenceDescriptor &RdxDesc = 8492 Legal->getReductionVars().find(Phi)->second; 8493 assert(RdxDesc.getRecurrenceStartValue() == 8494 Phi->getIncomingValueForBlock(OrigLoop->getLoopPreheader())); 8495 PhiRecipe = new VPReductionPHIRecipe(Phi, RdxDesc, *StartV, 8496 CM.isInLoopReduction(Phi), 8497 CM.useOrderedReductions(RdxDesc)); 8498 } else { 8499 PhiRecipe = new VPFirstOrderRecurrencePHIRecipe(Phi, *StartV); 8500 } 8501 8502 // Record the incoming value from the backedge, so we can add the incoming 8503 // value from the backedge after all recipes have been created. 8504 recordRecipeOf(cast<Instruction>( 8505 Phi->getIncomingValueForBlock(OrigLoop->getLoopLatch()))); 8506 PhisToFix.push_back(PhiRecipe); 8507 return toVPRecipeResult(PhiRecipe); 8508 } 8509 8510 if (isa<TruncInst>(Instr) && 8511 (Recipe = tryToOptimizeInductionTruncate(cast<TruncInst>(Instr), Operands, 8512 Range, *Plan))) 8513 return toVPRecipeResult(Recipe); 8514 8515 // All widen recipes below deal only with VF > 1. 8516 if (LoopVectorizationPlanner::getDecisionAndClampRange( 8517 [&](ElementCount VF) { return VF.isScalar(); }, Range)) 8518 return nullptr; 8519 8520 if (auto *CI = dyn_cast<CallInst>(Instr)) 8521 return toVPRecipeResult(tryToWidenCall(CI, Operands, Range)); 8522 8523 if (isa<LoadInst>(Instr) || isa<StoreInst>(Instr)) 8524 return toVPRecipeResult(tryToWidenMemory(Instr, Operands, Range, Plan)); 8525 8526 if (!shouldWiden(Instr, Range)) 8527 return nullptr; 8528 8529 if (auto GEP = dyn_cast<GetElementPtrInst>(Instr)) 8530 return toVPRecipeResult(new VPWidenGEPRecipe( 8531 GEP, make_range(Operands.begin(), Operands.end()), OrigLoop)); 8532 8533 if (auto *SI = dyn_cast<SelectInst>(Instr)) { 8534 bool InvariantCond = 8535 PSE.getSE()->isLoopInvariant(PSE.getSCEV(SI->getOperand(0)), OrigLoop); 8536 return toVPRecipeResult(new VPWidenSelectRecipe( 8537 *SI, make_range(Operands.begin(), Operands.end()), InvariantCond)); 8538 } 8539 8540 return toVPRecipeResult(tryToWiden(Instr, Operands)); 8541 } 8542 8543 void LoopVectorizationPlanner::buildVPlansWithVPRecipes(ElementCount MinVF, 8544 ElementCount MaxVF) { 8545 assert(OrigLoop->isInnermost() && "Inner loop expected."); 8546 8547 // Add assume instructions we need to drop to DeadInstructions, to prevent 8548 // them from being added to the VPlan. 8549 // TODO: We only need to drop assumes in blocks that get flattend. If the 8550 // control flow is preserved, we should keep them. 8551 SmallPtrSet<Instruction *, 4> DeadInstructions; 8552 auto &ConditionalAssumes = Legal->getConditionalAssumes(); 8553 DeadInstructions.insert(ConditionalAssumes.begin(), ConditionalAssumes.end()); 8554 8555 MapVector<Instruction *, Instruction *> &SinkAfter = Legal->getSinkAfter(); 8556 // Dead instructions do not need sinking. Remove them from SinkAfter. 8557 for (Instruction *I : DeadInstructions) 8558 SinkAfter.erase(I); 8559 8560 // Cannot sink instructions after dead instructions (there won't be any 8561 // recipes for them). Instead, find the first non-dead previous instruction. 8562 for (auto &P : Legal->getSinkAfter()) { 8563 Instruction *SinkTarget = P.second; 8564 Instruction *FirstInst = &*SinkTarget->getParent()->begin(); 8565 (void)FirstInst; 8566 while (DeadInstructions.contains(SinkTarget)) { 8567 assert( 8568 SinkTarget != FirstInst && 8569 "Must find a live instruction (at least the one feeding the " 8570 "first-order recurrence PHI) before reaching beginning of the block"); 8571 SinkTarget = SinkTarget->getPrevNode(); 8572 assert(SinkTarget != P.first && 8573 "sink source equals target, no sinking required"); 8574 } 8575 P.second = SinkTarget; 8576 } 8577 8578 auto MaxVFPlusOne = MaxVF.getWithIncrement(1); 8579 for (ElementCount VF = MinVF; ElementCount::isKnownLT(VF, MaxVFPlusOne);) { 8580 VFRange SubRange = {VF, MaxVFPlusOne}; 8581 VPlans.push_back( 8582 buildVPlanWithVPRecipes(SubRange, DeadInstructions, SinkAfter)); 8583 VF = SubRange.End; 8584 } 8585 } 8586 8587 // Add the necessary canonical IV and branch recipes required to control the 8588 // loop. 8589 static void addCanonicalIVRecipes(VPlan &Plan, Type *IdxTy, DebugLoc DL, 8590 bool HasNUW, 8591 bool UseLaneMaskForLoopControlFlow) { 8592 Value *StartIdx = ConstantInt::get(IdxTy, 0); 8593 auto *StartV = Plan.getOrAddVPValue(StartIdx); 8594 8595 // Add a VPCanonicalIVPHIRecipe starting at 0 to the header. 8596 auto *CanonicalIVPHI = new VPCanonicalIVPHIRecipe(StartV, DL); 8597 VPRegionBlock *TopRegion = Plan.getVectorLoopRegion(); 8598 VPBasicBlock *Header = TopRegion->getEntryBasicBlock(); 8599 Header->insert(CanonicalIVPHI, Header->begin()); 8600 8601 // Add a CanonicalIVIncrement{NUW} VPInstruction to increment the scalar 8602 // IV by VF * UF. 8603 auto *CanonicalIVIncrement = 8604 new VPInstruction(HasNUW ? VPInstruction::CanonicalIVIncrementNUW 8605 : VPInstruction::CanonicalIVIncrement, 8606 {CanonicalIVPHI}, DL, "index.next"); 8607 CanonicalIVPHI->addOperand(CanonicalIVIncrement); 8608 8609 VPBasicBlock *EB = TopRegion->getExitingBasicBlock(); 8610 EB->appendRecipe(CanonicalIVIncrement); 8611 8612 if (UseLaneMaskForLoopControlFlow) { 8613 // Create the active lane mask instruction in the vplan preheader. 8614 VPBasicBlock *Preheader = Plan.getEntry()->getEntryBasicBlock(); 8615 8616 // We can't use StartV directly in the ActiveLaneMask VPInstruction, since 8617 // we have to take unrolling into account. Each part needs to start at 8618 // Part * VF 8619 auto *CanonicalIVIncrementParts = 8620 new VPInstruction(HasNUW ? VPInstruction::CanonicalIVIncrementForPartNUW 8621 : VPInstruction::CanonicalIVIncrementForPart, 8622 {StartV}, DL, "index.part.next"); 8623 Preheader->appendRecipe(CanonicalIVIncrementParts); 8624 8625 // Create the ActiveLaneMask instruction using the correct start values. 8626 VPValue *TC = Plan.getOrCreateTripCount(); 8627 auto *EntryALM = new VPInstruction(VPInstruction::ActiveLaneMask, 8628 {CanonicalIVIncrementParts, TC}, DL, 8629 "active.lane.mask.entry"); 8630 Preheader->appendRecipe(EntryALM); 8631 8632 // Now create the ActiveLaneMaskPhi recipe in the main loop using the 8633 // preheader ActiveLaneMask instruction. 8634 auto *LaneMaskPhi = new VPActiveLaneMaskPHIRecipe(EntryALM, DebugLoc()); 8635 Header->insert(LaneMaskPhi, Header->getFirstNonPhi()); 8636 8637 // Create the active lane mask for the next iteration of the loop. 8638 CanonicalIVIncrementParts = 8639 new VPInstruction(HasNUW ? VPInstruction::CanonicalIVIncrementForPartNUW 8640 : VPInstruction::CanonicalIVIncrementForPart, 8641 {CanonicalIVIncrement}, DL); 8642 EB->appendRecipe(CanonicalIVIncrementParts); 8643 8644 auto *ALM = new VPInstruction(VPInstruction::ActiveLaneMask, 8645 {CanonicalIVIncrementParts, TC}, DL, 8646 "active.lane.mask.next"); 8647 EB->appendRecipe(ALM); 8648 LaneMaskPhi->addOperand(ALM); 8649 8650 // We have to invert the mask here because a true condition means jumping 8651 // to the exit block. 8652 auto *NotMask = new VPInstruction(VPInstruction::Not, ALM, DL); 8653 EB->appendRecipe(NotMask); 8654 8655 VPInstruction *BranchBack = 8656 new VPInstruction(VPInstruction::BranchOnCond, {NotMask}, DL); 8657 EB->appendRecipe(BranchBack); 8658 } else { 8659 // Add the BranchOnCount VPInstruction to the latch. 8660 VPInstruction *BranchBack = new VPInstruction( 8661 VPInstruction::BranchOnCount, 8662 {CanonicalIVIncrement, &Plan.getVectorTripCount()}, DL); 8663 EB->appendRecipe(BranchBack); 8664 } 8665 } 8666 8667 // Add exit values to \p Plan. VPLiveOuts are added for each LCSSA phi in the 8668 // original exit block. 8669 static void addUsersInExitBlock(VPBasicBlock *HeaderVPBB, 8670 VPBasicBlock *MiddleVPBB, Loop *OrigLoop, 8671 VPlan &Plan) { 8672 BasicBlock *ExitBB = OrigLoop->getUniqueExitBlock(); 8673 BasicBlock *ExitingBB = OrigLoop->getExitingBlock(); 8674 // Only handle single-exit loops with unique exit blocks for now. 8675 if (!ExitBB || !ExitBB->getSinglePredecessor() || !ExitingBB) 8676 return; 8677 8678 // Introduce VPUsers modeling the exit values. 8679 for (PHINode &ExitPhi : ExitBB->phis()) { 8680 Value *IncomingValue = 8681 ExitPhi.getIncomingValueForBlock(ExitingBB); 8682 VPValue *V = Plan.getOrAddVPValue(IncomingValue, true); 8683 Plan.addLiveOut(&ExitPhi, V); 8684 } 8685 } 8686 8687 VPlanPtr LoopVectorizationPlanner::buildVPlanWithVPRecipes( 8688 VFRange &Range, SmallPtrSetImpl<Instruction *> &DeadInstructions, 8689 const MapVector<Instruction *, Instruction *> &SinkAfter) { 8690 8691 SmallPtrSet<const InterleaveGroup<Instruction> *, 1> InterleaveGroups; 8692 8693 VPRecipeBuilder RecipeBuilder(OrigLoop, TLI, Legal, CM, PSE, Builder); 8694 8695 // --------------------------------------------------------------------------- 8696 // Pre-construction: record ingredients whose recipes we'll need to further 8697 // process after constructing the initial VPlan. 8698 // --------------------------------------------------------------------------- 8699 8700 // Mark instructions we'll need to sink later and their targets as 8701 // ingredients whose recipe we'll need to record. 8702 for (auto &Entry : SinkAfter) { 8703 RecipeBuilder.recordRecipeOf(Entry.first); 8704 RecipeBuilder.recordRecipeOf(Entry.second); 8705 } 8706 for (auto &Reduction : CM.getInLoopReductionChains()) { 8707 PHINode *Phi = Reduction.first; 8708 RecurKind Kind = 8709 Legal->getReductionVars().find(Phi)->second.getRecurrenceKind(); 8710 const SmallVector<Instruction *, 4> &ReductionOperations = Reduction.second; 8711 8712 RecipeBuilder.recordRecipeOf(Phi); 8713 for (auto &R : ReductionOperations) { 8714 RecipeBuilder.recordRecipeOf(R); 8715 // For min/max reductions, where we have a pair of icmp/select, we also 8716 // need to record the ICmp recipe, so it can be removed later. 8717 assert(!RecurrenceDescriptor::isSelectCmpRecurrenceKind(Kind) && 8718 "Only min/max recurrences allowed for inloop reductions"); 8719 if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) 8720 RecipeBuilder.recordRecipeOf(cast<Instruction>(R->getOperand(0))); 8721 } 8722 } 8723 8724 // For each interleave group which is relevant for this (possibly trimmed) 8725 // Range, add it to the set of groups to be later applied to the VPlan and add 8726 // placeholders for its members' Recipes which we'll be replacing with a 8727 // single VPInterleaveRecipe. 8728 for (InterleaveGroup<Instruction> *IG : IAI.getInterleaveGroups()) { 8729 auto applyIG = [IG, this](ElementCount VF) -> bool { 8730 return (VF.isVector() && // Query is illegal for VF == 1 8731 CM.getWideningDecision(IG->getInsertPos(), VF) == 8732 LoopVectorizationCostModel::CM_Interleave); 8733 }; 8734 if (!getDecisionAndClampRange(applyIG, Range)) 8735 continue; 8736 InterleaveGroups.insert(IG); 8737 for (unsigned i = 0; i < IG->getFactor(); i++) 8738 if (Instruction *Member = IG->getMember(i)) 8739 RecipeBuilder.recordRecipeOf(Member); 8740 }; 8741 8742 // --------------------------------------------------------------------------- 8743 // Build initial VPlan: Scan the body of the loop in a topological order to 8744 // visit each basic block after having visited its predecessor basic blocks. 8745 // --------------------------------------------------------------------------- 8746 8747 // Create initial VPlan skeleton, starting with a block for the pre-header, 8748 // followed by a region for the vector loop, followed by the middle block. The 8749 // skeleton vector loop region contains a header and latch block. 8750 VPBasicBlock *Preheader = new VPBasicBlock("vector.ph"); 8751 auto Plan = std::make_unique<VPlan>(Preheader); 8752 8753 VPBasicBlock *HeaderVPBB = new VPBasicBlock("vector.body"); 8754 VPBasicBlock *LatchVPBB = new VPBasicBlock("vector.latch"); 8755 VPBlockUtils::insertBlockAfter(LatchVPBB, HeaderVPBB); 8756 auto *TopRegion = new VPRegionBlock(HeaderVPBB, LatchVPBB, "vector loop"); 8757 VPBlockUtils::insertBlockAfter(TopRegion, Preheader); 8758 VPBasicBlock *MiddleVPBB = new VPBasicBlock("middle.block"); 8759 VPBlockUtils::insertBlockAfter(MiddleVPBB, TopRegion); 8760 8761 Instruction *DLInst = 8762 getDebugLocFromInstOrOperands(Legal->getPrimaryInduction()); 8763 addCanonicalIVRecipes(*Plan, Legal->getWidestInductionType(), 8764 DLInst ? DLInst->getDebugLoc() : DebugLoc(), 8765 !CM.foldTailByMasking(), 8766 CM.useActiveLaneMaskForControlFlow()); 8767 8768 // Scan the body of the loop in a topological order to visit each basic block 8769 // after having visited its predecessor basic blocks. 8770 LoopBlocksDFS DFS(OrigLoop); 8771 DFS.perform(LI); 8772 8773 VPBasicBlock *VPBB = HeaderVPBB; 8774 SmallVector<VPWidenIntOrFpInductionRecipe *> InductionsToMove; 8775 for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) { 8776 // Relevant instructions from basic block BB will be grouped into VPRecipe 8777 // ingredients and fill a new VPBasicBlock. 8778 unsigned VPBBsForBB = 0; 8779 if (VPBB != HeaderVPBB) 8780 VPBB->setName(BB->getName()); 8781 Builder.setInsertPoint(VPBB); 8782 8783 // Introduce each ingredient into VPlan. 8784 // TODO: Model and preserve debug intrinsics in VPlan. 8785 for (Instruction &I : BB->instructionsWithoutDebug()) { 8786 Instruction *Instr = &I; 8787 8788 // First filter out irrelevant instructions, to ensure no recipes are 8789 // built for them. 8790 if (isa<BranchInst>(Instr) || DeadInstructions.count(Instr)) 8791 continue; 8792 8793 SmallVector<VPValue *, 4> Operands; 8794 auto *Phi = dyn_cast<PHINode>(Instr); 8795 if (Phi && Phi->getParent() == OrigLoop->getHeader()) { 8796 Operands.push_back(Plan->getOrAddVPValue( 8797 Phi->getIncomingValueForBlock(OrigLoop->getLoopPreheader()))); 8798 } else { 8799 auto OpRange = Plan->mapToVPValues(Instr->operands()); 8800 Operands = {OpRange.begin(), OpRange.end()}; 8801 } 8802 8803 // Invariant stores inside loop will be deleted and a single store 8804 // with the final reduction value will be added to the exit block 8805 StoreInst *SI; 8806 if ((SI = dyn_cast<StoreInst>(&I)) && 8807 Legal->isInvariantAddressOfReduction(SI->getPointerOperand())) 8808 continue; 8809 8810 if (auto RecipeOrValue = RecipeBuilder.tryToCreateWidenRecipe( 8811 Instr, Operands, Range, Plan)) { 8812 // If Instr can be simplified to an existing VPValue, use it. 8813 if (RecipeOrValue.is<VPValue *>()) { 8814 auto *VPV = RecipeOrValue.get<VPValue *>(); 8815 Plan->addVPValue(Instr, VPV); 8816 // If the re-used value is a recipe, register the recipe for the 8817 // instruction, in case the recipe for Instr needs to be recorded. 8818 if (auto *R = dyn_cast_or_null<VPRecipeBase>(VPV->getDef())) 8819 RecipeBuilder.setRecipe(Instr, R); 8820 continue; 8821 } 8822 // Otherwise, add the new recipe. 8823 VPRecipeBase *Recipe = RecipeOrValue.get<VPRecipeBase *>(); 8824 for (auto *Def : Recipe->definedValues()) { 8825 auto *UV = Def->getUnderlyingValue(); 8826 Plan->addVPValue(UV, Def); 8827 } 8828 8829 if (isa<VPWidenIntOrFpInductionRecipe>(Recipe) && 8830 HeaderVPBB->getFirstNonPhi() != VPBB->end()) { 8831 // Keep track of VPWidenIntOrFpInductionRecipes not in the phi section 8832 // of the header block. That can happen for truncates of induction 8833 // variables. Those recipes are moved to the phi section of the header 8834 // block after applying SinkAfter, which relies on the original 8835 // position of the trunc. 8836 assert(isa<TruncInst>(Instr)); 8837 InductionsToMove.push_back( 8838 cast<VPWidenIntOrFpInductionRecipe>(Recipe)); 8839 } 8840 RecipeBuilder.setRecipe(Instr, Recipe); 8841 VPBB->appendRecipe(Recipe); 8842 continue; 8843 } 8844 8845 // Otherwise, if all widening options failed, Instruction is to be 8846 // replicated. This may create a successor for VPBB. 8847 VPBasicBlock *NextVPBB = 8848 RecipeBuilder.handleReplication(Instr, Range, VPBB, Plan); 8849 if (NextVPBB != VPBB) { 8850 VPBB = NextVPBB; 8851 VPBB->setName(BB->hasName() ? BB->getName() + "." + Twine(VPBBsForBB++) 8852 : ""); 8853 } 8854 } 8855 8856 VPBlockUtils::insertBlockAfter(new VPBasicBlock(), VPBB); 8857 VPBB = cast<VPBasicBlock>(VPBB->getSingleSuccessor()); 8858 } 8859 8860 HeaderVPBB->setName("vector.body"); 8861 8862 // Fold the last, empty block into its predecessor. 8863 VPBB = VPBlockUtils::tryToMergeBlockIntoPredecessor(VPBB); 8864 assert(VPBB && "expected to fold last (empty) block"); 8865 // After here, VPBB should not be used. 8866 VPBB = nullptr; 8867 8868 addUsersInExitBlock(HeaderVPBB, MiddleVPBB, OrigLoop, *Plan); 8869 8870 assert(isa<VPRegionBlock>(Plan->getVectorLoopRegion()) && 8871 !Plan->getVectorLoopRegion()->getEntryBasicBlock()->empty() && 8872 "entry block must be set to a VPRegionBlock having a non-empty entry " 8873 "VPBasicBlock"); 8874 RecipeBuilder.fixHeaderPhis(); 8875 8876 // --------------------------------------------------------------------------- 8877 // Transform initial VPlan: Apply previously taken decisions, in order, to 8878 // bring the VPlan to its final state. 8879 // --------------------------------------------------------------------------- 8880 8881 // Apply Sink-After legal constraints. 8882 auto GetReplicateRegion = [](VPRecipeBase *R) -> VPRegionBlock * { 8883 auto *Region = dyn_cast_or_null<VPRegionBlock>(R->getParent()->getParent()); 8884 if (Region && Region->isReplicator()) { 8885 assert(Region->getNumSuccessors() == 1 && 8886 Region->getNumPredecessors() == 1 && "Expected SESE region!"); 8887 assert(R->getParent()->size() == 1 && 8888 "A recipe in an original replicator region must be the only " 8889 "recipe in its block"); 8890 return Region; 8891 } 8892 return nullptr; 8893 }; 8894 for (auto &Entry : SinkAfter) { 8895 VPRecipeBase *Sink = RecipeBuilder.getRecipe(Entry.first); 8896 VPRecipeBase *Target = RecipeBuilder.getRecipe(Entry.second); 8897 8898 auto *TargetRegion = GetReplicateRegion(Target); 8899 auto *SinkRegion = GetReplicateRegion(Sink); 8900 if (!SinkRegion) { 8901 // If the sink source is not a replicate region, sink the recipe directly. 8902 if (TargetRegion) { 8903 // The target is in a replication region, make sure to move Sink to 8904 // the block after it, not into the replication region itself. 8905 VPBasicBlock *NextBlock = 8906 cast<VPBasicBlock>(TargetRegion->getSuccessors().front()); 8907 Sink->moveBefore(*NextBlock, NextBlock->getFirstNonPhi()); 8908 } else 8909 Sink->moveAfter(Target); 8910 continue; 8911 } 8912 8913 // The sink source is in a replicate region. Unhook the region from the CFG. 8914 auto *SinkPred = SinkRegion->getSinglePredecessor(); 8915 auto *SinkSucc = SinkRegion->getSingleSuccessor(); 8916 VPBlockUtils::disconnectBlocks(SinkPred, SinkRegion); 8917 VPBlockUtils::disconnectBlocks(SinkRegion, SinkSucc); 8918 VPBlockUtils::connectBlocks(SinkPred, SinkSucc); 8919 8920 if (TargetRegion) { 8921 // The target recipe is also in a replicate region, move the sink region 8922 // after the target region. 8923 auto *TargetSucc = TargetRegion->getSingleSuccessor(); 8924 VPBlockUtils::disconnectBlocks(TargetRegion, TargetSucc); 8925 VPBlockUtils::connectBlocks(TargetRegion, SinkRegion); 8926 VPBlockUtils::connectBlocks(SinkRegion, TargetSucc); 8927 } else { 8928 // The sink source is in a replicate region, we need to move the whole 8929 // replicate region, which should only contain a single recipe in the 8930 // main block. 8931 auto *SplitBlock = 8932 Target->getParent()->splitAt(std::next(Target->getIterator())); 8933 8934 auto *SplitPred = SplitBlock->getSinglePredecessor(); 8935 8936 VPBlockUtils::disconnectBlocks(SplitPred, SplitBlock); 8937 VPBlockUtils::connectBlocks(SplitPred, SinkRegion); 8938 VPBlockUtils::connectBlocks(SinkRegion, SplitBlock); 8939 } 8940 } 8941 8942 VPlanTransforms::removeRedundantCanonicalIVs(*Plan); 8943 VPlanTransforms::removeRedundantInductionCasts(*Plan); 8944 8945 // Now that sink-after is done, move induction recipes for optimized truncates 8946 // to the phi section of the header block. 8947 for (VPWidenIntOrFpInductionRecipe *Ind : InductionsToMove) 8948 Ind->moveBefore(*HeaderVPBB, HeaderVPBB->getFirstNonPhi()); 8949 8950 // Adjust the recipes for any inloop reductions. 8951 adjustRecipesForReductions(cast<VPBasicBlock>(TopRegion->getExiting()), Plan, 8952 RecipeBuilder, Range.Start); 8953 8954 // Introduce a recipe to combine the incoming and previous values of a 8955 // first-order recurrence. 8956 for (VPRecipeBase &R : 8957 Plan->getVectorLoopRegion()->getEntryBasicBlock()->phis()) { 8958 auto *RecurPhi = dyn_cast<VPFirstOrderRecurrencePHIRecipe>(&R); 8959 if (!RecurPhi) 8960 continue; 8961 8962 VPRecipeBase *PrevRecipe = RecurPhi->getBackedgeRecipe(); 8963 VPBasicBlock *InsertBlock = PrevRecipe->getParent(); 8964 auto *Region = GetReplicateRegion(PrevRecipe); 8965 if (Region) 8966 InsertBlock = dyn_cast<VPBasicBlock>(Region->getSingleSuccessor()); 8967 if (!InsertBlock) { 8968 InsertBlock = new VPBasicBlock(Region->getName() + ".succ"); 8969 VPBlockUtils::insertBlockAfter(InsertBlock, Region); 8970 } 8971 if (Region || PrevRecipe->isPhi()) 8972 Builder.setInsertPoint(InsertBlock, InsertBlock->getFirstNonPhi()); 8973 else 8974 Builder.setInsertPoint(InsertBlock, std::next(PrevRecipe->getIterator())); 8975 8976 auto *RecurSplice = cast<VPInstruction>( 8977 Builder.createNaryOp(VPInstruction::FirstOrderRecurrenceSplice, 8978 {RecurPhi, RecurPhi->getBackedgeValue()})); 8979 8980 RecurPhi->replaceAllUsesWith(RecurSplice); 8981 // Set the first operand of RecurSplice to RecurPhi again, after replacing 8982 // all users. 8983 RecurSplice->setOperand(0, RecurPhi); 8984 } 8985 8986 // Interleave memory: for each Interleave Group we marked earlier as relevant 8987 // for this VPlan, replace the Recipes widening its memory instructions with a 8988 // single VPInterleaveRecipe at its insertion point. 8989 for (auto IG : InterleaveGroups) { 8990 auto *Recipe = cast<VPWidenMemoryInstructionRecipe>( 8991 RecipeBuilder.getRecipe(IG->getInsertPos())); 8992 SmallVector<VPValue *, 4> StoredValues; 8993 for (unsigned i = 0; i < IG->getFactor(); ++i) 8994 if (auto *SI = dyn_cast_or_null<StoreInst>(IG->getMember(i))) { 8995 auto *StoreR = 8996 cast<VPWidenMemoryInstructionRecipe>(RecipeBuilder.getRecipe(SI)); 8997 StoredValues.push_back(StoreR->getStoredValue()); 8998 } 8999 9000 auto *VPIG = new VPInterleaveRecipe(IG, Recipe->getAddr(), StoredValues, 9001 Recipe->getMask()); 9002 VPIG->insertBefore(Recipe); 9003 unsigned J = 0; 9004 for (unsigned i = 0; i < IG->getFactor(); ++i) 9005 if (Instruction *Member = IG->getMember(i)) { 9006 if (!Member->getType()->isVoidTy()) { 9007 VPValue *OriginalV = Plan->getVPValue(Member); 9008 Plan->removeVPValueFor(Member); 9009 Plan->addVPValue(Member, VPIG->getVPValue(J)); 9010 OriginalV->replaceAllUsesWith(VPIG->getVPValue(J)); 9011 J++; 9012 } 9013 RecipeBuilder.getRecipe(Member)->eraseFromParent(); 9014 } 9015 } 9016 9017 std::string PlanName; 9018 raw_string_ostream RSO(PlanName); 9019 ElementCount VF = Range.Start; 9020 Plan->addVF(VF); 9021 RSO << "Initial VPlan for VF={" << VF; 9022 for (VF *= 2; ElementCount::isKnownLT(VF, Range.End); VF *= 2) { 9023 Plan->addVF(VF); 9024 RSO << "," << VF; 9025 } 9026 RSO << "},UF>=1"; 9027 RSO.flush(); 9028 Plan->setName(PlanName); 9029 9030 // From this point onwards, VPlan-to-VPlan transformations may change the plan 9031 // in ways that accessing values using original IR values is incorrect. 9032 Plan->disableValue2VPValue(); 9033 9034 VPlanTransforms::optimizeInductions(*Plan, *PSE.getSE()); 9035 VPlanTransforms::sinkScalarOperands(*Plan); 9036 VPlanTransforms::removeDeadRecipes(*Plan); 9037 VPlanTransforms::mergeReplicateRegions(*Plan); 9038 VPlanTransforms::removeRedundantExpandSCEVRecipes(*Plan); 9039 9040 // Fold Exit block into its predecessor if possible. 9041 // TODO: Fold block earlier once all VPlan transforms properly maintain a 9042 // VPBasicBlock as exit. 9043 VPBlockUtils::tryToMergeBlockIntoPredecessor(TopRegion->getExiting()); 9044 9045 assert(VPlanVerifier::verifyPlanIsValid(*Plan) && "VPlan is invalid"); 9046 return Plan; 9047 } 9048 9049 VPlanPtr LoopVectorizationPlanner::buildVPlan(VFRange &Range) { 9050 // Outer loop handling: They may require CFG and instruction level 9051 // transformations before even evaluating whether vectorization is profitable. 9052 // Since we cannot modify the incoming IR, we need to build VPlan upfront in 9053 // the vectorization pipeline. 9054 assert(!OrigLoop->isInnermost()); 9055 assert(EnableVPlanNativePath && "VPlan-native path is not enabled."); 9056 9057 // Create new empty VPlan 9058 auto Plan = std::make_unique<VPlan>(); 9059 9060 // Build hierarchical CFG 9061 VPlanHCFGBuilder HCFGBuilder(OrigLoop, LI, *Plan); 9062 HCFGBuilder.buildHierarchicalCFG(); 9063 9064 for (ElementCount VF = Range.Start; ElementCount::isKnownLT(VF, Range.End); 9065 VF *= 2) 9066 Plan->addVF(VF); 9067 9068 SmallPtrSet<Instruction *, 1> DeadInstructions; 9069 VPlanTransforms::VPInstructionsToVPRecipes( 9070 OrigLoop, Plan, 9071 [this](PHINode *P) { return Legal->getIntOrFpInductionDescriptor(P); }, 9072 DeadInstructions, *PSE.getSE()); 9073 9074 // Remove the existing terminator of the exiting block of the top-most region. 9075 // A BranchOnCount will be added instead when adding the canonical IV recipes. 9076 auto *Term = 9077 Plan->getVectorLoopRegion()->getExitingBasicBlock()->getTerminator(); 9078 Term->eraseFromParent(); 9079 9080 addCanonicalIVRecipes(*Plan, Legal->getWidestInductionType(), DebugLoc(), 9081 true, CM.useActiveLaneMaskForControlFlow()); 9082 return Plan; 9083 } 9084 9085 // Adjust the recipes for reductions. For in-loop reductions the chain of 9086 // instructions leading from the loop exit instr to the phi need to be converted 9087 // to reductions, with one operand being vector and the other being the scalar 9088 // reduction chain. For other reductions, a select is introduced between the phi 9089 // and live-out recipes when folding the tail. 9090 void LoopVectorizationPlanner::adjustRecipesForReductions( 9091 VPBasicBlock *LatchVPBB, VPlanPtr &Plan, VPRecipeBuilder &RecipeBuilder, 9092 ElementCount MinVF) { 9093 for (auto &Reduction : CM.getInLoopReductionChains()) { 9094 PHINode *Phi = Reduction.first; 9095 const RecurrenceDescriptor &RdxDesc = 9096 Legal->getReductionVars().find(Phi)->second; 9097 const SmallVector<Instruction *, 4> &ReductionOperations = Reduction.second; 9098 9099 if (MinVF.isScalar() && !CM.useOrderedReductions(RdxDesc)) 9100 continue; 9101 9102 // ReductionOperations are orders top-down from the phi's use to the 9103 // LoopExitValue. We keep a track of the previous item (the Chain) to tell 9104 // which of the two operands will remain scalar and which will be reduced. 9105 // For minmax the chain will be the select instructions. 9106 Instruction *Chain = Phi; 9107 for (Instruction *R : ReductionOperations) { 9108 VPRecipeBase *WidenRecipe = RecipeBuilder.getRecipe(R); 9109 RecurKind Kind = RdxDesc.getRecurrenceKind(); 9110 9111 VPValue *ChainOp = Plan->getVPValue(Chain); 9112 unsigned FirstOpId; 9113 assert(!RecurrenceDescriptor::isSelectCmpRecurrenceKind(Kind) && 9114 "Only min/max recurrences allowed for inloop reductions"); 9115 // Recognize a call to the llvm.fmuladd intrinsic. 9116 bool IsFMulAdd = (Kind == RecurKind::FMulAdd); 9117 assert((!IsFMulAdd || RecurrenceDescriptor::isFMulAddIntrinsic(R)) && 9118 "Expected instruction to be a call to the llvm.fmuladd intrinsic"); 9119 if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) { 9120 assert(isa<VPWidenSelectRecipe>(WidenRecipe) && 9121 "Expected to replace a VPWidenSelectSC"); 9122 FirstOpId = 1; 9123 } else { 9124 assert((MinVF.isScalar() || isa<VPWidenRecipe>(WidenRecipe) || 9125 (IsFMulAdd && isa<VPWidenCallRecipe>(WidenRecipe))) && 9126 "Expected to replace a VPWidenSC"); 9127 FirstOpId = 0; 9128 } 9129 unsigned VecOpId = 9130 R->getOperand(FirstOpId) == Chain ? FirstOpId + 1 : FirstOpId; 9131 VPValue *VecOp = Plan->getVPValue(R->getOperand(VecOpId)); 9132 9133 auto *CondOp = CM.blockNeedsPredicationForAnyReason(R->getParent()) 9134 ? RecipeBuilder.createBlockInMask(R->getParent(), Plan) 9135 : nullptr; 9136 9137 if (IsFMulAdd) { 9138 // If the instruction is a call to the llvm.fmuladd intrinsic then we 9139 // need to create an fmul recipe to use as the vector operand for the 9140 // fadd reduction. 9141 VPInstruction *FMulRecipe = new VPInstruction( 9142 Instruction::FMul, {VecOp, Plan->getVPValue(R->getOperand(1))}); 9143 FMulRecipe->setFastMathFlags(R->getFastMathFlags()); 9144 WidenRecipe->getParent()->insert(FMulRecipe, 9145 WidenRecipe->getIterator()); 9146 VecOp = FMulRecipe; 9147 } 9148 VPReductionRecipe *RedRecipe = 9149 new VPReductionRecipe(&RdxDesc, R, ChainOp, VecOp, CondOp, TTI); 9150 WidenRecipe->getVPSingleValue()->replaceAllUsesWith(RedRecipe); 9151 Plan->removeVPValueFor(R); 9152 Plan->addVPValue(R, RedRecipe); 9153 // Append the recipe to the end of the VPBasicBlock because we need to 9154 // ensure that it comes after all of it's inputs, including CondOp. 9155 WidenRecipe->getParent()->appendRecipe(RedRecipe); 9156 WidenRecipe->getVPSingleValue()->replaceAllUsesWith(RedRecipe); 9157 WidenRecipe->eraseFromParent(); 9158 9159 if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) { 9160 VPRecipeBase *CompareRecipe = 9161 RecipeBuilder.getRecipe(cast<Instruction>(R->getOperand(0))); 9162 assert(isa<VPWidenRecipe>(CompareRecipe) && 9163 "Expected to replace a VPWidenSC"); 9164 assert(cast<VPWidenRecipe>(CompareRecipe)->getNumUsers() == 0 && 9165 "Expected no remaining users"); 9166 CompareRecipe->eraseFromParent(); 9167 } 9168 Chain = R; 9169 } 9170 } 9171 9172 // If tail is folded by masking, introduce selects between the phi 9173 // and the live-out instruction of each reduction, at the beginning of the 9174 // dedicated latch block. 9175 if (CM.foldTailByMasking()) { 9176 Builder.setInsertPoint(LatchVPBB, LatchVPBB->begin()); 9177 for (VPRecipeBase &R : 9178 Plan->getVectorLoopRegion()->getEntryBasicBlock()->phis()) { 9179 VPReductionPHIRecipe *PhiR = dyn_cast<VPReductionPHIRecipe>(&R); 9180 if (!PhiR || PhiR->isInLoop()) 9181 continue; 9182 VPValue *Cond = 9183 RecipeBuilder.createBlockInMask(OrigLoop->getHeader(), Plan); 9184 VPValue *Red = PhiR->getBackedgeValue(); 9185 assert(cast<VPRecipeBase>(Red->getDef())->getParent() != LatchVPBB && 9186 "reduction recipe must be defined before latch"); 9187 Builder.createNaryOp(Instruction::Select, {Cond, Red, PhiR}); 9188 } 9189 } 9190 } 9191 9192 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 9193 void VPInterleaveRecipe::print(raw_ostream &O, const Twine &Indent, 9194 VPSlotTracker &SlotTracker) const { 9195 O << Indent << "INTERLEAVE-GROUP with factor " << IG->getFactor() << " at "; 9196 IG->getInsertPos()->printAsOperand(O, false); 9197 O << ", "; 9198 getAddr()->printAsOperand(O, SlotTracker); 9199 VPValue *Mask = getMask(); 9200 if (Mask) { 9201 O << ", "; 9202 Mask->printAsOperand(O, SlotTracker); 9203 } 9204 9205 unsigned OpIdx = 0; 9206 for (unsigned i = 0; i < IG->getFactor(); ++i) { 9207 if (!IG->getMember(i)) 9208 continue; 9209 if (getNumStoreOperands() > 0) { 9210 O << "\n" << Indent << " store "; 9211 getOperand(1 + OpIdx)->printAsOperand(O, SlotTracker); 9212 O << " to index " << i; 9213 } else { 9214 O << "\n" << Indent << " "; 9215 getVPValue(OpIdx)->printAsOperand(O, SlotTracker); 9216 O << " = load from index " << i; 9217 } 9218 ++OpIdx; 9219 } 9220 } 9221 #endif 9222 9223 void VPWidenCallRecipe::execute(VPTransformState &State) { 9224 State.ILV->widenCallInstruction(*cast<CallInst>(getUnderlyingInstr()), this, 9225 *this, State); 9226 } 9227 9228 void VPWidenIntOrFpInductionRecipe::execute(VPTransformState &State) { 9229 assert(!State.Instance && "Int or FP induction being replicated."); 9230 9231 Value *Start = getStartValue()->getLiveInIRValue(); 9232 const InductionDescriptor &ID = getInductionDescriptor(); 9233 TruncInst *Trunc = getTruncInst(); 9234 IRBuilderBase &Builder = State.Builder; 9235 assert(IV->getType() == ID.getStartValue()->getType() && "Types must match"); 9236 assert(State.VF.isVector() && "must have vector VF"); 9237 9238 // The value from the original loop to which we are mapping the new induction 9239 // variable. 9240 Instruction *EntryVal = Trunc ? cast<Instruction>(Trunc) : IV; 9241 9242 // Fast-math-flags propagate from the original induction instruction. 9243 IRBuilder<>::FastMathFlagGuard FMFG(Builder); 9244 if (ID.getInductionBinOp() && isa<FPMathOperator>(ID.getInductionBinOp())) 9245 Builder.setFastMathFlags(ID.getInductionBinOp()->getFastMathFlags()); 9246 9247 // Now do the actual transformations, and start with fetching the step value. 9248 Value *Step = State.get(getStepValue(), VPIteration(0, 0)); 9249 9250 assert((isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) && 9251 "Expected either an induction phi-node or a truncate of it!"); 9252 9253 // Construct the initial value of the vector IV in the vector loop preheader 9254 auto CurrIP = Builder.saveIP(); 9255 BasicBlock *VectorPH = State.CFG.getPreheaderBBFor(this); 9256 Builder.SetInsertPoint(VectorPH->getTerminator()); 9257 if (isa<TruncInst>(EntryVal)) { 9258 assert(Start->getType()->isIntegerTy() && 9259 "Truncation requires an integer type"); 9260 auto *TruncType = cast<IntegerType>(EntryVal->getType()); 9261 Step = Builder.CreateTrunc(Step, TruncType); 9262 Start = Builder.CreateCast(Instruction::Trunc, Start, TruncType); 9263 } 9264 9265 Value *Zero = getSignedIntOrFpConstant(Start->getType(), 0); 9266 Value *SplatStart = Builder.CreateVectorSplat(State.VF, Start); 9267 Value *SteppedStart = getStepVector( 9268 SplatStart, Zero, Step, ID.getInductionOpcode(), State.VF, State.Builder); 9269 9270 // We create vector phi nodes for both integer and floating-point induction 9271 // variables. Here, we determine the kind of arithmetic we will perform. 9272 Instruction::BinaryOps AddOp; 9273 Instruction::BinaryOps MulOp; 9274 if (Step->getType()->isIntegerTy()) { 9275 AddOp = Instruction::Add; 9276 MulOp = Instruction::Mul; 9277 } else { 9278 AddOp = ID.getInductionOpcode(); 9279 MulOp = Instruction::FMul; 9280 } 9281 9282 // Multiply the vectorization factor by the step using integer or 9283 // floating-point arithmetic as appropriate. 9284 Type *StepType = Step->getType(); 9285 Value *RuntimeVF; 9286 if (Step->getType()->isFloatingPointTy()) 9287 RuntimeVF = getRuntimeVFAsFloat(Builder, StepType, State.VF); 9288 else 9289 RuntimeVF = getRuntimeVF(Builder, StepType, State.VF); 9290 Value *Mul = Builder.CreateBinOp(MulOp, Step, RuntimeVF); 9291 9292 // Create a vector splat to use in the induction update. 9293 // 9294 // FIXME: If the step is non-constant, we create the vector splat with 9295 // IRBuilder. IRBuilder can constant-fold the multiply, but it doesn't 9296 // handle a constant vector splat. 9297 Value *SplatVF = isa<Constant>(Mul) 9298 ? ConstantVector::getSplat(State.VF, cast<Constant>(Mul)) 9299 : Builder.CreateVectorSplat(State.VF, Mul); 9300 Builder.restoreIP(CurrIP); 9301 9302 // We may need to add the step a number of times, depending on the unroll 9303 // factor. The last of those goes into the PHI. 9304 PHINode *VecInd = PHINode::Create(SteppedStart->getType(), 2, "vec.ind", 9305 &*State.CFG.PrevBB->getFirstInsertionPt()); 9306 VecInd->setDebugLoc(EntryVal->getDebugLoc()); 9307 Instruction *LastInduction = VecInd; 9308 for (unsigned Part = 0; Part < State.UF; ++Part) { 9309 State.set(this, LastInduction, Part); 9310 9311 if (isa<TruncInst>(EntryVal)) 9312 State.addMetadata(LastInduction, EntryVal); 9313 9314 LastInduction = cast<Instruction>( 9315 Builder.CreateBinOp(AddOp, LastInduction, SplatVF, "step.add")); 9316 LastInduction->setDebugLoc(EntryVal->getDebugLoc()); 9317 } 9318 9319 LastInduction->setName("vec.ind.next"); 9320 VecInd->addIncoming(SteppedStart, VectorPH); 9321 // Add induction update using an incorrect block temporarily. The phi node 9322 // will be fixed after VPlan execution. Note that at this point the latch 9323 // block cannot be used, as it does not exist yet. 9324 // TODO: Model increment value in VPlan, by turning the recipe into a 9325 // multi-def and a subclass of VPHeaderPHIRecipe. 9326 VecInd->addIncoming(LastInduction, VectorPH); 9327 } 9328 9329 void VPWidenPointerInductionRecipe::execute(VPTransformState &State) { 9330 assert(IndDesc.getKind() == InductionDescriptor::IK_PtrInduction && 9331 "Not a pointer induction according to InductionDescriptor!"); 9332 assert(cast<PHINode>(getUnderlyingInstr())->getType()->isPointerTy() && 9333 "Unexpected type."); 9334 9335 auto *IVR = getParent()->getPlan()->getCanonicalIV(); 9336 PHINode *CanonicalIV = cast<PHINode>(State.get(IVR, 0)); 9337 9338 if (onlyScalarsGenerated(State.VF)) { 9339 // This is the normalized GEP that starts counting at zero. 9340 Value *PtrInd = State.Builder.CreateSExtOrTrunc( 9341 CanonicalIV, IndDesc.getStep()->getType()); 9342 // Determine the number of scalars we need to generate for each unroll 9343 // iteration. If the instruction is uniform, we only need to generate the 9344 // first lane. Otherwise, we generate all VF values. 9345 bool IsUniform = vputils::onlyFirstLaneUsed(this); 9346 assert((IsUniform || !State.VF.isScalable()) && 9347 "Cannot scalarize a scalable VF"); 9348 unsigned Lanes = IsUniform ? 1 : State.VF.getFixedValue(); 9349 9350 for (unsigned Part = 0; Part < State.UF; ++Part) { 9351 Value *PartStart = 9352 createStepForVF(State.Builder, PtrInd->getType(), State.VF, Part); 9353 9354 for (unsigned Lane = 0; Lane < Lanes; ++Lane) { 9355 Value *Idx = State.Builder.CreateAdd( 9356 PartStart, ConstantInt::get(PtrInd->getType(), Lane)); 9357 Value *GlobalIdx = State.Builder.CreateAdd(PtrInd, Idx); 9358 9359 Value *Step = CreateStepValue(IndDesc.getStep(), SE, 9360 State.CFG.PrevBB->getTerminator()); 9361 Value *SclrGep = emitTransformedIndex( 9362 State.Builder, GlobalIdx, IndDesc.getStartValue(), Step, IndDesc); 9363 SclrGep->setName("next.gep"); 9364 State.set(this, SclrGep, VPIteration(Part, Lane)); 9365 } 9366 } 9367 return; 9368 } 9369 9370 assert(isa<SCEVConstant>(IndDesc.getStep()) && 9371 "Induction step not a SCEV constant!"); 9372 Type *PhiType = IndDesc.getStep()->getType(); 9373 9374 // Build a pointer phi 9375 Value *ScalarStartValue = getStartValue()->getLiveInIRValue(); 9376 Type *ScStValueType = ScalarStartValue->getType(); 9377 PHINode *NewPointerPhi = 9378 PHINode::Create(ScStValueType, 2, "pointer.phi", CanonicalIV); 9379 9380 BasicBlock *VectorPH = State.CFG.getPreheaderBBFor(this); 9381 NewPointerPhi->addIncoming(ScalarStartValue, VectorPH); 9382 9383 // A pointer induction, performed by using a gep 9384 const DataLayout &DL = NewPointerPhi->getModule()->getDataLayout(); 9385 Instruction *InductionLoc = &*State.Builder.GetInsertPoint(); 9386 9387 const SCEV *ScalarStep = IndDesc.getStep(); 9388 SCEVExpander Exp(SE, DL, "induction"); 9389 Value *ScalarStepValue = Exp.expandCodeFor(ScalarStep, PhiType, InductionLoc); 9390 Value *RuntimeVF = getRuntimeVF(State.Builder, PhiType, State.VF); 9391 Value *NumUnrolledElems = 9392 State.Builder.CreateMul(RuntimeVF, ConstantInt::get(PhiType, State.UF)); 9393 Value *InductionGEP = GetElementPtrInst::Create( 9394 IndDesc.getElementType(), NewPointerPhi, 9395 State.Builder.CreateMul(ScalarStepValue, NumUnrolledElems), "ptr.ind", 9396 InductionLoc); 9397 // Add induction update using an incorrect block temporarily. The phi node 9398 // will be fixed after VPlan execution. Note that at this point the latch 9399 // block cannot be used, as it does not exist yet. 9400 // TODO: Model increment value in VPlan, by turning the recipe into a 9401 // multi-def and a subclass of VPHeaderPHIRecipe. 9402 NewPointerPhi->addIncoming(InductionGEP, VectorPH); 9403 9404 // Create UF many actual address geps that use the pointer 9405 // phi as base and a vectorized version of the step value 9406 // (<step*0, ..., step*N>) as offset. 9407 for (unsigned Part = 0; Part < State.UF; ++Part) { 9408 Type *VecPhiType = VectorType::get(PhiType, State.VF); 9409 Value *StartOffsetScalar = 9410 State.Builder.CreateMul(RuntimeVF, ConstantInt::get(PhiType, Part)); 9411 Value *StartOffset = 9412 State.Builder.CreateVectorSplat(State.VF, StartOffsetScalar); 9413 // Create a vector of consecutive numbers from zero to VF. 9414 StartOffset = State.Builder.CreateAdd( 9415 StartOffset, State.Builder.CreateStepVector(VecPhiType)); 9416 9417 Value *GEP = State.Builder.CreateGEP( 9418 IndDesc.getElementType(), NewPointerPhi, 9419 State.Builder.CreateMul( 9420 StartOffset, 9421 State.Builder.CreateVectorSplat(State.VF, ScalarStepValue), 9422 "vector.gep")); 9423 State.set(this, GEP, Part); 9424 } 9425 } 9426 9427 void VPScalarIVStepsRecipe::execute(VPTransformState &State) { 9428 assert(!State.Instance && "VPScalarIVStepsRecipe being replicated."); 9429 9430 // Fast-math-flags propagate from the original induction instruction. 9431 IRBuilder<>::FastMathFlagGuard FMFG(State.Builder); 9432 if (IndDesc.getInductionBinOp() && 9433 isa<FPMathOperator>(IndDesc.getInductionBinOp())) 9434 State.Builder.setFastMathFlags( 9435 IndDesc.getInductionBinOp()->getFastMathFlags()); 9436 9437 Value *Step = State.get(getStepValue(), VPIteration(0, 0)); 9438 auto CreateScalarIV = [&](Value *&Step) -> Value * { 9439 Value *ScalarIV = State.get(getCanonicalIV(), VPIteration(0, 0)); 9440 auto *CanonicalIV = State.get(getParent()->getPlan()->getCanonicalIV(), 0); 9441 if (!isCanonical() || CanonicalIV->getType() != Ty) { 9442 ScalarIV = 9443 Ty->isIntegerTy() 9444 ? State.Builder.CreateSExtOrTrunc(ScalarIV, Ty) 9445 : State.Builder.CreateCast(Instruction::SIToFP, ScalarIV, Ty); 9446 ScalarIV = emitTransformedIndex(State.Builder, ScalarIV, 9447 getStartValue()->getLiveInIRValue(), Step, 9448 IndDesc); 9449 ScalarIV->setName("offset.idx"); 9450 } 9451 if (TruncToTy) { 9452 assert(Step->getType()->isIntegerTy() && 9453 "Truncation requires an integer step"); 9454 ScalarIV = State.Builder.CreateTrunc(ScalarIV, TruncToTy); 9455 Step = State.Builder.CreateTrunc(Step, TruncToTy); 9456 } 9457 return ScalarIV; 9458 }; 9459 9460 Value *ScalarIV = CreateScalarIV(Step); 9461 if (State.VF.isVector()) { 9462 buildScalarSteps(ScalarIV, Step, IndDesc, this, State); 9463 return; 9464 } 9465 9466 for (unsigned Part = 0; Part < State.UF; ++Part) { 9467 assert(!State.VF.isScalable() && "scalable vectors not yet supported."); 9468 Value *EntryPart; 9469 if (Step->getType()->isFloatingPointTy()) { 9470 Value *StartIdx = 9471 getRuntimeVFAsFloat(State.Builder, Step->getType(), State.VF * Part); 9472 // Floating-point operations inherit FMF via the builder's flags. 9473 Value *MulOp = State.Builder.CreateFMul(StartIdx, Step); 9474 EntryPart = State.Builder.CreateBinOp(IndDesc.getInductionOpcode(), 9475 ScalarIV, MulOp); 9476 } else { 9477 Value *StartIdx = 9478 getRuntimeVF(State.Builder, Step->getType(), State.VF * Part); 9479 EntryPart = State.Builder.CreateAdd( 9480 ScalarIV, State.Builder.CreateMul(StartIdx, Step), "induction"); 9481 } 9482 State.set(this, EntryPart, Part); 9483 } 9484 } 9485 9486 void VPInterleaveRecipe::execute(VPTransformState &State) { 9487 assert(!State.Instance && "Interleave group being replicated."); 9488 State.ILV->vectorizeInterleaveGroup(IG, definedValues(), State, getAddr(), 9489 getStoredValues(), getMask()); 9490 } 9491 9492 void VPReductionRecipe::execute(VPTransformState &State) { 9493 assert(!State.Instance && "Reduction being replicated."); 9494 Value *PrevInChain = State.get(getChainOp(), 0); 9495 RecurKind Kind = RdxDesc->getRecurrenceKind(); 9496 bool IsOrdered = State.ILV->useOrderedReductions(*RdxDesc); 9497 // Propagate the fast-math flags carried by the underlying instruction. 9498 IRBuilderBase::FastMathFlagGuard FMFGuard(State.Builder); 9499 State.Builder.setFastMathFlags(RdxDesc->getFastMathFlags()); 9500 for (unsigned Part = 0; Part < State.UF; ++Part) { 9501 Value *NewVecOp = State.get(getVecOp(), Part); 9502 if (VPValue *Cond = getCondOp()) { 9503 Value *NewCond = State.get(Cond, Part); 9504 VectorType *VecTy = cast<VectorType>(NewVecOp->getType()); 9505 Value *Iden = RdxDesc->getRecurrenceIdentity( 9506 Kind, VecTy->getElementType(), RdxDesc->getFastMathFlags()); 9507 Value *IdenVec = 9508 State.Builder.CreateVectorSplat(VecTy->getElementCount(), Iden); 9509 Value *Select = State.Builder.CreateSelect(NewCond, NewVecOp, IdenVec); 9510 NewVecOp = Select; 9511 } 9512 Value *NewRed; 9513 Value *NextInChain; 9514 if (IsOrdered) { 9515 if (State.VF.isVector()) 9516 NewRed = createOrderedReduction(State.Builder, *RdxDesc, NewVecOp, 9517 PrevInChain); 9518 else 9519 NewRed = State.Builder.CreateBinOp( 9520 (Instruction::BinaryOps)RdxDesc->getOpcode(Kind), PrevInChain, 9521 NewVecOp); 9522 PrevInChain = NewRed; 9523 } else { 9524 PrevInChain = State.get(getChainOp(), Part); 9525 NewRed = createTargetReduction(State.Builder, TTI, *RdxDesc, NewVecOp); 9526 } 9527 if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) { 9528 NextInChain = 9529 createMinMaxOp(State.Builder, RdxDesc->getRecurrenceKind(), 9530 NewRed, PrevInChain); 9531 } else if (IsOrdered) 9532 NextInChain = NewRed; 9533 else 9534 NextInChain = State.Builder.CreateBinOp( 9535 (Instruction::BinaryOps)RdxDesc->getOpcode(Kind), NewRed, 9536 PrevInChain); 9537 State.set(this, NextInChain, Part); 9538 } 9539 } 9540 9541 void VPReplicateRecipe::execute(VPTransformState &State) { 9542 if (State.Instance) { // Generate a single instance. 9543 assert(!State.VF.isScalable() && "Can't scalarize a scalable vector"); 9544 State.ILV->scalarizeInstruction(getUnderlyingInstr(), this, *State.Instance, 9545 IsPredicated, State); 9546 // Insert scalar instance packing it into a vector. 9547 if (AlsoPack && State.VF.isVector()) { 9548 // If we're constructing lane 0, initialize to start from poison. 9549 if (State.Instance->Lane.isFirstLane()) { 9550 assert(!State.VF.isScalable() && "VF is assumed to be non scalable."); 9551 Value *Poison = PoisonValue::get( 9552 VectorType::get(getUnderlyingValue()->getType(), State.VF)); 9553 State.set(this, Poison, State.Instance->Part); 9554 } 9555 State.ILV->packScalarIntoVectorValue(this, *State.Instance, State); 9556 } 9557 return; 9558 } 9559 9560 if (IsUniform) { 9561 // Uniform within VL means we need to generate lane 0 only for each 9562 // unrolled copy. 9563 for (unsigned Part = 0; Part < State.UF; ++Part) 9564 State.ILV->scalarizeInstruction(getUnderlyingInstr(), this, 9565 VPIteration(Part, 0), IsPredicated, 9566 State); 9567 return; 9568 } 9569 9570 // Generate scalar instances for all VF lanes of all UF parts. 9571 assert(!State.VF.isScalable() && "Can't scalarize a scalable vector"); 9572 const unsigned EndLane = State.VF.getKnownMinValue(); 9573 for (unsigned Part = 0; Part < State.UF; ++Part) 9574 for (unsigned Lane = 0; Lane < EndLane; ++Lane) 9575 State.ILV->scalarizeInstruction(getUnderlyingInstr(), this, 9576 VPIteration(Part, Lane), IsPredicated, 9577 State); 9578 } 9579 9580 void VPWidenMemoryInstructionRecipe::execute(VPTransformState &State) { 9581 VPValue *StoredValue = isStore() ? getStoredValue() : nullptr; 9582 9583 // Attempt to issue a wide load. 9584 LoadInst *LI = dyn_cast<LoadInst>(&Ingredient); 9585 StoreInst *SI = dyn_cast<StoreInst>(&Ingredient); 9586 9587 assert((LI || SI) && "Invalid Load/Store instruction"); 9588 assert((!SI || StoredValue) && "No stored value provided for widened store"); 9589 assert((!LI || !StoredValue) && "Stored value provided for widened load"); 9590 9591 Type *ScalarDataTy = getLoadStoreType(&Ingredient); 9592 9593 auto *DataTy = VectorType::get(ScalarDataTy, State.VF); 9594 const Align Alignment = getLoadStoreAlignment(&Ingredient); 9595 bool CreateGatherScatter = !Consecutive; 9596 9597 auto &Builder = State.Builder; 9598 InnerLoopVectorizer::VectorParts BlockInMaskParts(State.UF); 9599 bool isMaskRequired = getMask(); 9600 if (isMaskRequired) 9601 for (unsigned Part = 0; Part < State.UF; ++Part) 9602 BlockInMaskParts[Part] = State.get(getMask(), Part); 9603 9604 const auto CreateVecPtr = [&](unsigned Part, Value *Ptr) -> Value * { 9605 // Calculate the pointer for the specific unroll-part. 9606 GetElementPtrInst *PartPtr = nullptr; 9607 9608 bool InBounds = false; 9609 if (auto *gep = dyn_cast<GetElementPtrInst>(Ptr->stripPointerCasts())) 9610 InBounds = gep->isInBounds(); 9611 if (Reverse) { 9612 // If the address is consecutive but reversed, then the 9613 // wide store needs to start at the last vector element. 9614 // RunTimeVF = VScale * VF.getKnownMinValue() 9615 // For fixed-width VScale is 1, then RunTimeVF = VF.getKnownMinValue() 9616 Value *RunTimeVF = getRuntimeVF(Builder, Builder.getInt32Ty(), State.VF); 9617 // NumElt = -Part * RunTimeVF 9618 Value *NumElt = Builder.CreateMul(Builder.getInt32(-Part), RunTimeVF); 9619 // LastLane = 1 - RunTimeVF 9620 Value *LastLane = Builder.CreateSub(Builder.getInt32(1), RunTimeVF); 9621 PartPtr = 9622 cast<GetElementPtrInst>(Builder.CreateGEP(ScalarDataTy, Ptr, NumElt)); 9623 PartPtr->setIsInBounds(InBounds); 9624 PartPtr = cast<GetElementPtrInst>( 9625 Builder.CreateGEP(ScalarDataTy, PartPtr, LastLane)); 9626 PartPtr->setIsInBounds(InBounds); 9627 if (isMaskRequired) // Reverse of a null all-one mask is a null mask. 9628 BlockInMaskParts[Part] = 9629 Builder.CreateVectorReverse(BlockInMaskParts[Part], "reverse"); 9630 } else { 9631 Value *Increment = 9632 createStepForVF(Builder, Builder.getInt32Ty(), State.VF, Part); 9633 PartPtr = cast<GetElementPtrInst>( 9634 Builder.CreateGEP(ScalarDataTy, Ptr, Increment)); 9635 PartPtr->setIsInBounds(InBounds); 9636 } 9637 9638 unsigned AddressSpace = Ptr->getType()->getPointerAddressSpace(); 9639 return Builder.CreateBitCast(PartPtr, DataTy->getPointerTo(AddressSpace)); 9640 }; 9641 9642 // Handle Stores: 9643 if (SI) { 9644 State.setDebugLocFromInst(SI); 9645 9646 for (unsigned Part = 0; Part < State.UF; ++Part) { 9647 Instruction *NewSI = nullptr; 9648 Value *StoredVal = State.get(StoredValue, Part); 9649 if (CreateGatherScatter) { 9650 Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr; 9651 Value *VectorGep = State.get(getAddr(), Part); 9652 NewSI = Builder.CreateMaskedScatter(StoredVal, VectorGep, Alignment, 9653 MaskPart); 9654 } else { 9655 if (Reverse) { 9656 // If we store to reverse consecutive memory locations, then we need 9657 // to reverse the order of elements in the stored value. 9658 StoredVal = Builder.CreateVectorReverse(StoredVal, "reverse"); 9659 // We don't want to update the value in the map as it might be used in 9660 // another expression. So don't call resetVectorValue(StoredVal). 9661 } 9662 auto *VecPtr = 9663 CreateVecPtr(Part, State.get(getAddr(), VPIteration(0, 0))); 9664 if (isMaskRequired) 9665 NewSI = Builder.CreateMaskedStore(StoredVal, VecPtr, Alignment, 9666 BlockInMaskParts[Part]); 9667 else 9668 NewSI = Builder.CreateAlignedStore(StoredVal, VecPtr, Alignment); 9669 } 9670 State.addMetadata(NewSI, SI); 9671 } 9672 return; 9673 } 9674 9675 // Handle loads. 9676 assert(LI && "Must have a load instruction"); 9677 State.setDebugLocFromInst(LI); 9678 for (unsigned Part = 0; Part < State.UF; ++Part) { 9679 Value *NewLI; 9680 if (CreateGatherScatter) { 9681 Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr; 9682 Value *VectorGep = State.get(getAddr(), Part); 9683 NewLI = Builder.CreateMaskedGather(DataTy, VectorGep, Alignment, MaskPart, 9684 nullptr, "wide.masked.gather"); 9685 State.addMetadata(NewLI, LI); 9686 } else { 9687 auto *VecPtr = 9688 CreateVecPtr(Part, State.get(getAddr(), VPIteration(0, 0))); 9689 if (isMaskRequired) 9690 NewLI = Builder.CreateMaskedLoad( 9691 DataTy, VecPtr, Alignment, BlockInMaskParts[Part], 9692 PoisonValue::get(DataTy), "wide.masked.load"); 9693 else 9694 NewLI = 9695 Builder.CreateAlignedLoad(DataTy, VecPtr, Alignment, "wide.load"); 9696 9697 // Add metadata to the load, but setVectorValue to the reverse shuffle. 9698 State.addMetadata(NewLI, LI); 9699 if (Reverse) 9700 NewLI = Builder.CreateVectorReverse(NewLI, "reverse"); 9701 } 9702 9703 State.set(getVPSingleValue(), NewLI, Part); 9704 } 9705 } 9706 9707 // Determine how to lower the scalar epilogue, which depends on 1) optimising 9708 // for minimum code-size, 2) predicate compiler options, 3) loop hints forcing 9709 // predication, and 4) a TTI hook that analyses whether the loop is suitable 9710 // for predication. 9711 static ScalarEpilogueLowering getScalarEpilogueLowering( 9712 Function *F, Loop *L, LoopVectorizeHints &Hints, ProfileSummaryInfo *PSI, 9713 BlockFrequencyInfo *BFI, TargetTransformInfo *TTI, TargetLibraryInfo *TLI, 9714 AssumptionCache *AC, LoopInfo *LI, ScalarEvolution *SE, DominatorTree *DT, 9715 LoopVectorizationLegality &LVL) { 9716 // 1) OptSize takes precedence over all other options, i.e. if this is set, 9717 // don't look at hints or options, and don't request a scalar epilogue. 9718 // (For PGSO, as shouldOptimizeForSize isn't currently accessible from 9719 // LoopAccessInfo (due to code dependency and not being able to reliably get 9720 // PSI/BFI from a loop analysis under NPM), we cannot suppress the collection 9721 // of strides in LoopAccessInfo::analyzeLoop() and vectorize without 9722 // versioning when the vectorization is forced, unlike hasOptSize. So revert 9723 // back to the old way and vectorize with versioning when forced. See D81345.) 9724 if (F->hasOptSize() || (llvm::shouldOptimizeForSize(L->getHeader(), PSI, BFI, 9725 PGSOQueryType::IRPass) && 9726 Hints.getForce() != LoopVectorizeHints::FK_Enabled)) 9727 return CM_ScalarEpilogueNotAllowedOptSize; 9728 9729 // 2) If set, obey the directives 9730 if (PreferPredicateOverEpilogue.getNumOccurrences()) { 9731 switch (PreferPredicateOverEpilogue) { 9732 case PreferPredicateTy::ScalarEpilogue: 9733 return CM_ScalarEpilogueAllowed; 9734 case PreferPredicateTy::PredicateElseScalarEpilogue: 9735 return CM_ScalarEpilogueNotNeededUsePredicate; 9736 case PreferPredicateTy::PredicateOrDontVectorize: 9737 return CM_ScalarEpilogueNotAllowedUsePredicate; 9738 }; 9739 } 9740 9741 // 3) If set, obey the hints 9742 switch (Hints.getPredicate()) { 9743 case LoopVectorizeHints::FK_Enabled: 9744 return CM_ScalarEpilogueNotNeededUsePredicate; 9745 case LoopVectorizeHints::FK_Disabled: 9746 return CM_ScalarEpilogueAllowed; 9747 }; 9748 9749 // 4) if the TTI hook indicates this is profitable, request predication. 9750 if (TTI->preferPredicateOverEpilogue(L, LI, *SE, *AC, TLI, DT, &LVL)) 9751 return CM_ScalarEpilogueNotNeededUsePredicate; 9752 9753 return CM_ScalarEpilogueAllowed; 9754 } 9755 9756 Value *VPTransformState::get(VPValue *Def, unsigned Part) { 9757 // If Values have been set for this Def return the one relevant for \p Part. 9758 if (hasVectorValue(Def, Part)) 9759 return Data.PerPartOutput[Def][Part]; 9760 9761 if (!hasScalarValue(Def, {Part, 0})) { 9762 Value *IRV = Def->getLiveInIRValue(); 9763 Value *B = ILV->getBroadcastInstrs(IRV); 9764 set(Def, B, Part); 9765 return B; 9766 } 9767 9768 Value *ScalarValue = get(Def, {Part, 0}); 9769 // If we aren't vectorizing, we can just copy the scalar map values over 9770 // to the vector map. 9771 if (VF.isScalar()) { 9772 set(Def, ScalarValue, Part); 9773 return ScalarValue; 9774 } 9775 9776 auto *RepR = dyn_cast<VPReplicateRecipe>(Def); 9777 bool IsUniform = RepR && RepR->isUniform(); 9778 9779 unsigned LastLane = IsUniform ? 0 : VF.getKnownMinValue() - 1; 9780 // Check if there is a scalar value for the selected lane. 9781 if (!hasScalarValue(Def, {Part, LastLane})) { 9782 // At the moment, VPWidenIntOrFpInductionRecipes can also be uniform. 9783 assert((isa<VPWidenIntOrFpInductionRecipe>(Def->getDef()) || 9784 isa<VPScalarIVStepsRecipe>(Def->getDef())) && 9785 "unexpected recipe found to be invariant"); 9786 IsUniform = true; 9787 LastLane = 0; 9788 } 9789 9790 auto *LastInst = cast<Instruction>(get(Def, {Part, LastLane})); 9791 // Set the insert point after the last scalarized instruction or after the 9792 // last PHI, if LastInst is a PHI. This ensures the insertelement sequence 9793 // will directly follow the scalar definitions. 9794 auto OldIP = Builder.saveIP(); 9795 auto NewIP = 9796 isa<PHINode>(LastInst) 9797 ? BasicBlock::iterator(LastInst->getParent()->getFirstNonPHI()) 9798 : std::next(BasicBlock::iterator(LastInst)); 9799 Builder.SetInsertPoint(&*NewIP); 9800 9801 // However, if we are vectorizing, we need to construct the vector values. 9802 // If the value is known to be uniform after vectorization, we can just 9803 // broadcast the scalar value corresponding to lane zero for each unroll 9804 // iteration. Otherwise, we construct the vector values using 9805 // insertelement instructions. Since the resulting vectors are stored in 9806 // State, we will only generate the insertelements once. 9807 Value *VectorValue = nullptr; 9808 if (IsUniform) { 9809 VectorValue = ILV->getBroadcastInstrs(ScalarValue); 9810 set(Def, VectorValue, Part); 9811 } else { 9812 // Initialize packing with insertelements to start from undef. 9813 assert(!VF.isScalable() && "VF is assumed to be non scalable."); 9814 Value *Undef = PoisonValue::get(VectorType::get(LastInst->getType(), VF)); 9815 set(Def, Undef, Part); 9816 for (unsigned Lane = 0; Lane < VF.getKnownMinValue(); ++Lane) 9817 ILV->packScalarIntoVectorValue(Def, {Part, Lane}, *this); 9818 VectorValue = get(Def, Part); 9819 } 9820 Builder.restoreIP(OldIP); 9821 return VectorValue; 9822 } 9823 9824 // Process the loop in the VPlan-native vectorization path. This path builds 9825 // VPlan upfront in the vectorization pipeline, which allows to apply 9826 // VPlan-to-VPlan transformations from the very beginning without modifying the 9827 // input LLVM IR. 9828 static bool processLoopInVPlanNativePath( 9829 Loop *L, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT, 9830 LoopVectorizationLegality *LVL, TargetTransformInfo *TTI, 9831 TargetLibraryInfo *TLI, DemandedBits *DB, AssumptionCache *AC, 9832 OptimizationRemarkEmitter *ORE, BlockFrequencyInfo *BFI, 9833 ProfileSummaryInfo *PSI, LoopVectorizeHints &Hints, 9834 LoopVectorizationRequirements &Requirements) { 9835 9836 if (isa<SCEVCouldNotCompute>(PSE.getBackedgeTakenCount())) { 9837 LLVM_DEBUG(dbgs() << "LV: cannot compute the outer-loop trip count\n"); 9838 return false; 9839 } 9840 assert(EnableVPlanNativePath && "VPlan-native path is disabled."); 9841 Function *F = L->getHeader()->getParent(); 9842 InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL->getLAI()); 9843 9844 ScalarEpilogueLowering SEL = getScalarEpilogueLowering( 9845 F, L, Hints, PSI, BFI, TTI, TLI, AC, LI, PSE.getSE(), DT, *LVL); 9846 9847 LoopVectorizationCostModel CM(SEL, L, PSE, LI, LVL, *TTI, TLI, DB, AC, ORE, F, 9848 &Hints, IAI); 9849 // Use the planner for outer loop vectorization. 9850 // TODO: CM is not used at this point inside the planner. Turn CM into an 9851 // optional argument if we don't need it in the future. 9852 LoopVectorizationPlanner LVP(L, LI, TLI, TTI, LVL, CM, IAI, PSE, Hints, ORE); 9853 9854 // Get user vectorization factor. 9855 ElementCount UserVF = Hints.getWidth(); 9856 9857 CM.collectElementTypesForWidening(); 9858 9859 // Plan how to best vectorize, return the best VF and its cost. 9860 const VectorizationFactor VF = LVP.planInVPlanNativePath(UserVF); 9861 9862 // If we are stress testing VPlan builds, do not attempt to generate vector 9863 // code. Masked vector code generation support will follow soon. 9864 // Also, do not attempt to vectorize if no vector code will be produced. 9865 if (VPlanBuildStressTest || VectorizationFactor::Disabled() == VF) 9866 return false; 9867 9868 VPlan &BestPlan = LVP.getBestPlanFor(VF.Width); 9869 9870 { 9871 GeneratedRTChecks Checks(*PSE.getSE(), DT, LI, TTI, 9872 F->getParent()->getDataLayout()); 9873 InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, 9874 VF.Width, 1, LVL, &CM, BFI, PSI, Checks); 9875 LLVM_DEBUG(dbgs() << "Vectorizing outer loop in \"" 9876 << L->getHeader()->getParent()->getName() << "\"\n"); 9877 LVP.executePlan(VF.Width, 1, BestPlan, LB, DT, false); 9878 } 9879 9880 // Mark the loop as already vectorized to avoid vectorizing again. 9881 Hints.setAlreadyVectorized(); 9882 assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs())); 9883 return true; 9884 } 9885 9886 // Emit a remark if there are stores to floats that required a floating point 9887 // extension. If the vectorized loop was generated with floating point there 9888 // will be a performance penalty from the conversion overhead and the change in 9889 // the vector width. 9890 static void checkMixedPrecision(Loop *L, OptimizationRemarkEmitter *ORE) { 9891 SmallVector<Instruction *, 4> Worklist; 9892 for (BasicBlock *BB : L->getBlocks()) { 9893 for (Instruction &Inst : *BB) { 9894 if (auto *S = dyn_cast<StoreInst>(&Inst)) { 9895 if (S->getValueOperand()->getType()->isFloatTy()) 9896 Worklist.push_back(S); 9897 } 9898 } 9899 } 9900 9901 // Traverse the floating point stores upwards searching, for floating point 9902 // conversions. 9903 SmallPtrSet<const Instruction *, 4> Visited; 9904 SmallPtrSet<const Instruction *, 4> EmittedRemark; 9905 while (!Worklist.empty()) { 9906 auto *I = Worklist.pop_back_val(); 9907 if (!L->contains(I)) 9908 continue; 9909 if (!Visited.insert(I).second) 9910 continue; 9911 9912 // Emit a remark if the floating point store required a floating 9913 // point conversion. 9914 // TODO: More work could be done to identify the root cause such as a 9915 // constant or a function return type and point the user to it. 9916 if (isa<FPExtInst>(I) && EmittedRemark.insert(I).second) 9917 ORE->emit([&]() { 9918 return OptimizationRemarkAnalysis(LV_NAME, "VectorMixedPrecision", 9919 I->getDebugLoc(), L->getHeader()) 9920 << "floating point conversion changes vector width. " 9921 << "Mixed floating point precision requires an up/down " 9922 << "cast that will negatively impact performance."; 9923 }); 9924 9925 for (Use &Op : I->operands()) 9926 if (auto *OpI = dyn_cast<Instruction>(Op)) 9927 Worklist.push_back(OpI); 9928 } 9929 } 9930 9931 static bool areRuntimeChecksProfitable(GeneratedRTChecks &Checks, 9932 VectorizationFactor &VF, 9933 Optional<unsigned> VScale, Loop *L, 9934 ScalarEvolution &SE) { 9935 InstructionCost CheckCost = Checks.getCost(); 9936 if (!CheckCost.isValid()) 9937 return false; 9938 9939 // When interleaving only scalar and vector cost will be equal, which in turn 9940 // would lead to a divide by 0. Fall back to hard threshold. 9941 if (VF.Width.isScalar()) { 9942 if (CheckCost > VectorizeMemoryCheckThreshold) { 9943 LLVM_DEBUG( 9944 dbgs() 9945 << "LV: Interleaving only is not profitable due to runtime checks\n"); 9946 return false; 9947 } 9948 return true; 9949 } 9950 9951 // The scalar cost should only be 0 when vectorizing with a user specified VF/IC. In those cases, runtime checks should always be generated. 9952 double ScalarC = *VF.ScalarCost.getValue(); 9953 if (ScalarC == 0) 9954 return true; 9955 9956 // First, compute the minimum iteration count required so that the vector 9957 // loop outperforms the scalar loop. 9958 // The total cost of the scalar loop is 9959 // ScalarC * TC 9960 // where 9961 // * TC is the actual trip count of the loop. 9962 // * ScalarC is the cost of a single scalar iteration. 9963 // 9964 // The total cost of the vector loop is 9965 // RtC + VecC * (TC / VF) + EpiC 9966 // where 9967 // * RtC is the cost of the generated runtime checks 9968 // * VecC is the cost of a single vector iteration. 9969 // * TC is the actual trip count of the loop 9970 // * VF is the vectorization factor 9971 // * EpiCost is the cost of the generated epilogue, including the cost 9972 // of the remaining scalar operations. 9973 // 9974 // Vectorization is profitable once the total vector cost is less than the 9975 // total scalar cost: 9976 // RtC + VecC * (TC / VF) + EpiC < ScalarC * TC 9977 // 9978 // Now we can compute the minimum required trip count TC as 9979 // (RtC + EpiC) / (ScalarC - (VecC / VF)) < TC 9980 // 9981 // For now we assume the epilogue cost EpiC = 0 for simplicity. Note that 9982 // the computations are performed on doubles, not integers and the result 9983 // is rounded up, hence we get an upper estimate of the TC. 9984 unsigned IntVF = VF.Width.getKnownMinValue(); 9985 if (VF.Width.isScalable()) { 9986 unsigned AssumedMinimumVscale = 1; 9987 if (VScale) 9988 AssumedMinimumVscale = *VScale; 9989 IntVF *= AssumedMinimumVscale; 9990 } 9991 double VecCOverVF = double(*VF.Cost.getValue()) / IntVF; 9992 double RtC = *CheckCost.getValue(); 9993 double MinTC1 = RtC / (ScalarC - VecCOverVF); 9994 9995 // Second, compute a minimum iteration count so that the cost of the 9996 // runtime checks is only a fraction of the total scalar loop cost. This 9997 // adds a loop-dependent bound on the overhead incurred if the runtime 9998 // checks fail. In case the runtime checks fail, the cost is RtC + ScalarC 9999 // * TC. To bound the runtime check to be a fraction 1/X of the scalar 10000 // cost, compute 10001 // RtC < ScalarC * TC * (1 / X) ==> RtC * X / ScalarC < TC 10002 double MinTC2 = RtC * 10 / ScalarC; 10003 10004 // Now pick the larger minimum. If it is not a multiple of VF, choose the 10005 // next closest multiple of VF. This should partly compensate for ignoring 10006 // the epilogue cost. 10007 uint64_t MinTC = std::ceil(std::max(MinTC1, MinTC2)); 10008 VF.MinProfitableTripCount = ElementCount::getFixed(alignTo(MinTC, IntVF)); 10009 10010 LLVM_DEBUG( 10011 dbgs() << "LV: Minimum required TC for runtime checks to be profitable:" 10012 << VF.MinProfitableTripCount << "\n"); 10013 10014 // Skip vectorization if the expected trip count is less than the minimum 10015 // required trip count. 10016 if (auto ExpectedTC = getSmallBestKnownTC(SE, L)) { 10017 if (ElementCount::isKnownLT(ElementCount::getFixed(*ExpectedTC), 10018 VF.MinProfitableTripCount)) { 10019 LLVM_DEBUG(dbgs() << "LV: Vectorization is not beneficial: expected " 10020 "trip count < minimum profitable VF (" 10021 << *ExpectedTC << " < " << VF.MinProfitableTripCount 10022 << ")\n"); 10023 10024 return false; 10025 } 10026 } 10027 return true; 10028 } 10029 10030 LoopVectorizePass::LoopVectorizePass(LoopVectorizeOptions Opts) 10031 : InterleaveOnlyWhenForced(Opts.InterleaveOnlyWhenForced || 10032 !EnableLoopInterleaving), 10033 VectorizeOnlyWhenForced(Opts.VectorizeOnlyWhenForced || 10034 !EnableLoopVectorization) {} 10035 10036 bool LoopVectorizePass::processLoop(Loop *L) { 10037 assert((EnableVPlanNativePath || L->isInnermost()) && 10038 "VPlan-native path is not enabled. Only process inner loops."); 10039 10040 #ifndef NDEBUG 10041 const std::string DebugLocStr = getDebugLocString(L); 10042 #endif /* NDEBUG */ 10043 10044 LLVM_DEBUG(dbgs() << "\nLV: Checking a loop in '" 10045 << L->getHeader()->getParent()->getName() << "' from " 10046 << DebugLocStr << "\n"); 10047 10048 LoopVectorizeHints Hints(L, InterleaveOnlyWhenForced, *ORE, TTI); 10049 10050 LLVM_DEBUG( 10051 dbgs() << "LV: Loop hints:" 10052 << " force=" 10053 << (Hints.getForce() == LoopVectorizeHints::FK_Disabled 10054 ? "disabled" 10055 : (Hints.getForce() == LoopVectorizeHints::FK_Enabled 10056 ? "enabled" 10057 : "?")) 10058 << " width=" << Hints.getWidth() 10059 << " interleave=" << Hints.getInterleave() << "\n"); 10060 10061 // Function containing loop 10062 Function *F = L->getHeader()->getParent(); 10063 10064 // Looking at the diagnostic output is the only way to determine if a loop 10065 // was vectorized (other than looking at the IR or machine code), so it 10066 // is important to generate an optimization remark for each loop. Most of 10067 // these messages are generated as OptimizationRemarkAnalysis. Remarks 10068 // generated as OptimizationRemark and OptimizationRemarkMissed are 10069 // less verbose reporting vectorized loops and unvectorized loops that may 10070 // benefit from vectorization, respectively. 10071 10072 if (!Hints.allowVectorization(F, L, VectorizeOnlyWhenForced)) { 10073 LLVM_DEBUG(dbgs() << "LV: Loop hints prevent vectorization.\n"); 10074 return false; 10075 } 10076 10077 PredicatedScalarEvolution PSE(*SE, *L); 10078 10079 // Check if it is legal to vectorize the loop. 10080 LoopVectorizationRequirements Requirements; 10081 LoopVectorizationLegality LVL(L, PSE, DT, TTI, TLI, AA, F, GetLAA, LI, ORE, 10082 &Requirements, &Hints, DB, AC, BFI, PSI); 10083 if (!LVL.canVectorize(EnableVPlanNativePath)) { 10084 LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Cannot prove legality.\n"); 10085 Hints.emitRemarkWithHints(); 10086 return false; 10087 } 10088 10089 // Check the function attributes and profiles to find out if this function 10090 // should be optimized for size. 10091 ScalarEpilogueLowering SEL = getScalarEpilogueLowering( 10092 F, L, Hints, PSI, BFI, TTI, TLI, AC, LI, PSE.getSE(), DT, LVL); 10093 10094 // Entrance to the VPlan-native vectorization path. Outer loops are processed 10095 // here. They may require CFG and instruction level transformations before 10096 // even evaluating whether vectorization is profitable. Since we cannot modify 10097 // the incoming IR, we need to build VPlan upfront in the vectorization 10098 // pipeline. 10099 if (!L->isInnermost()) 10100 return processLoopInVPlanNativePath(L, PSE, LI, DT, &LVL, TTI, TLI, DB, AC, 10101 ORE, BFI, PSI, Hints, Requirements); 10102 10103 assert(L->isInnermost() && "Inner loop expected."); 10104 10105 // Check the loop for a trip count threshold: vectorize loops with a tiny trip 10106 // count by optimizing for size, to minimize overheads. 10107 auto ExpectedTC = getSmallBestKnownTC(*SE, L); 10108 if (ExpectedTC && *ExpectedTC < TinyTripCountVectorThreshold) { 10109 LLVM_DEBUG(dbgs() << "LV: Found a loop with a very small trip count. " 10110 << "This loop is worth vectorizing only if no scalar " 10111 << "iteration overheads are incurred."); 10112 if (Hints.getForce() == LoopVectorizeHints::FK_Enabled) 10113 LLVM_DEBUG(dbgs() << " But vectorizing was explicitly forced.\n"); 10114 else { 10115 LLVM_DEBUG(dbgs() << "\n"); 10116 SEL = CM_ScalarEpilogueNotAllowedLowTripLoop; 10117 } 10118 } 10119 10120 // Check the function attributes to see if implicit floats are allowed. 10121 // FIXME: This check doesn't seem possibly correct -- what if the loop is 10122 // an integer loop and the vector instructions selected are purely integer 10123 // vector instructions? 10124 if (F->hasFnAttribute(Attribute::NoImplicitFloat)) { 10125 reportVectorizationFailure( 10126 "Can't vectorize when the NoImplicitFloat attribute is used", 10127 "loop not vectorized due to NoImplicitFloat attribute", 10128 "NoImplicitFloat", ORE, L); 10129 Hints.emitRemarkWithHints(); 10130 return false; 10131 } 10132 10133 // Check if the target supports potentially unsafe FP vectorization. 10134 // FIXME: Add a check for the type of safety issue (denormal, signaling) 10135 // for the target we're vectorizing for, to make sure none of the 10136 // additional fp-math flags can help. 10137 if (Hints.isPotentiallyUnsafe() && 10138 TTI->isFPVectorizationPotentiallyUnsafe()) { 10139 reportVectorizationFailure( 10140 "Potentially unsafe FP op prevents vectorization", 10141 "loop not vectorized due to unsafe FP support.", 10142 "UnsafeFP", ORE, L); 10143 Hints.emitRemarkWithHints(); 10144 return false; 10145 } 10146 10147 bool AllowOrderedReductions; 10148 // If the flag is set, use that instead and override the TTI behaviour. 10149 if (ForceOrderedReductions.getNumOccurrences() > 0) 10150 AllowOrderedReductions = ForceOrderedReductions; 10151 else 10152 AllowOrderedReductions = TTI->enableOrderedReductions(); 10153 if (!LVL.canVectorizeFPMath(AllowOrderedReductions)) { 10154 ORE->emit([&]() { 10155 auto *ExactFPMathInst = Requirements.getExactFPInst(); 10156 return OptimizationRemarkAnalysisFPCommute(DEBUG_TYPE, "CantReorderFPOps", 10157 ExactFPMathInst->getDebugLoc(), 10158 ExactFPMathInst->getParent()) 10159 << "loop not vectorized: cannot prove it is safe to reorder " 10160 "floating-point operations"; 10161 }); 10162 LLVM_DEBUG(dbgs() << "LV: loop not vectorized: cannot prove it is safe to " 10163 "reorder floating-point operations\n"); 10164 Hints.emitRemarkWithHints(); 10165 return false; 10166 } 10167 10168 bool UseInterleaved = TTI->enableInterleavedAccessVectorization(); 10169 InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL.getLAI()); 10170 10171 // If an override option has been passed in for interleaved accesses, use it. 10172 if (EnableInterleavedMemAccesses.getNumOccurrences() > 0) 10173 UseInterleaved = EnableInterleavedMemAccesses; 10174 10175 // Analyze interleaved memory accesses. 10176 if (UseInterleaved) { 10177 IAI.analyzeInterleaving(useMaskedInterleavedAccesses(*TTI)); 10178 } 10179 10180 // Use the cost model. 10181 LoopVectorizationCostModel CM(SEL, L, PSE, LI, &LVL, *TTI, TLI, DB, AC, ORE, 10182 F, &Hints, IAI); 10183 CM.collectValuesToIgnore(); 10184 CM.collectElementTypesForWidening(); 10185 10186 // Use the planner for vectorization. 10187 LoopVectorizationPlanner LVP(L, LI, TLI, TTI, &LVL, CM, IAI, PSE, Hints, ORE); 10188 10189 // Get user vectorization factor and interleave count. 10190 ElementCount UserVF = Hints.getWidth(); 10191 unsigned UserIC = Hints.getInterleave(); 10192 10193 // Plan how to best vectorize, return the best VF and its cost. 10194 Optional<VectorizationFactor> MaybeVF = LVP.plan(UserVF, UserIC); 10195 10196 VectorizationFactor VF = VectorizationFactor::Disabled(); 10197 unsigned IC = 1; 10198 10199 GeneratedRTChecks Checks(*PSE.getSE(), DT, LI, TTI, 10200 F->getParent()->getDataLayout()); 10201 if (MaybeVF) { 10202 VF = *MaybeVF; 10203 // Select the interleave count. 10204 IC = CM.selectInterleaveCount(VF.Width, *VF.Cost.getValue()); 10205 10206 unsigned SelectedIC = std::max(IC, UserIC); 10207 // Optimistically generate runtime checks if they are needed. Drop them if 10208 // they turn out to not be profitable. 10209 if (VF.Width.isVector() || SelectedIC > 1) 10210 Checks.Create(L, *LVL.getLAI(), PSE.getPredicate(), VF.Width, SelectedIC); 10211 10212 // Check if it is profitable to vectorize with runtime checks. 10213 bool ForceVectorization = 10214 Hints.getForce() == LoopVectorizeHints::FK_Enabled; 10215 if (!ForceVectorization && 10216 !areRuntimeChecksProfitable(Checks, VF, CM.getVScaleForTuning(), L, 10217 *PSE.getSE())) { 10218 ORE->emit([&]() { 10219 return OptimizationRemarkAnalysisAliasing( 10220 DEBUG_TYPE, "CantReorderMemOps", L->getStartLoc(), 10221 L->getHeader()) 10222 << "loop not vectorized: cannot prove it is safe to reorder " 10223 "memory operations"; 10224 }); 10225 LLVM_DEBUG(dbgs() << "LV: Too many memory checks needed.\n"); 10226 Hints.emitRemarkWithHints(); 10227 return false; 10228 } 10229 } 10230 10231 // Identify the diagnostic messages that should be produced. 10232 std::pair<StringRef, std::string> VecDiagMsg, IntDiagMsg; 10233 bool VectorizeLoop = true, InterleaveLoop = true; 10234 if (VF.Width.isScalar()) { 10235 LLVM_DEBUG(dbgs() << "LV: Vectorization is possible but not beneficial.\n"); 10236 VecDiagMsg = std::make_pair( 10237 "VectorizationNotBeneficial", 10238 "the cost-model indicates that vectorization is not beneficial"); 10239 VectorizeLoop = false; 10240 } 10241 10242 if (!MaybeVF && UserIC > 1) { 10243 // Tell the user interleaving was avoided up-front, despite being explicitly 10244 // requested. 10245 LLVM_DEBUG(dbgs() << "LV: Ignoring UserIC, because vectorization and " 10246 "interleaving should be avoided up front\n"); 10247 IntDiagMsg = std::make_pair( 10248 "InterleavingAvoided", 10249 "Ignoring UserIC, because interleaving was avoided up front"); 10250 InterleaveLoop = false; 10251 } else if (IC == 1 && UserIC <= 1) { 10252 // Tell the user interleaving is not beneficial. 10253 LLVM_DEBUG(dbgs() << "LV: Interleaving is not beneficial.\n"); 10254 IntDiagMsg = std::make_pair( 10255 "InterleavingNotBeneficial", 10256 "the cost-model indicates that interleaving is not beneficial"); 10257 InterleaveLoop = false; 10258 if (UserIC == 1) { 10259 IntDiagMsg.first = "InterleavingNotBeneficialAndDisabled"; 10260 IntDiagMsg.second += 10261 " and is explicitly disabled or interleave count is set to 1"; 10262 } 10263 } else if (IC > 1 && UserIC == 1) { 10264 // Tell the user interleaving is beneficial, but it explicitly disabled. 10265 LLVM_DEBUG( 10266 dbgs() << "LV: Interleaving is beneficial but is explicitly disabled."); 10267 IntDiagMsg = std::make_pair( 10268 "InterleavingBeneficialButDisabled", 10269 "the cost-model indicates that interleaving is beneficial " 10270 "but is explicitly disabled or interleave count is set to 1"); 10271 InterleaveLoop = false; 10272 } 10273 10274 // Override IC if user provided an interleave count. 10275 IC = UserIC > 0 ? UserIC : IC; 10276 10277 // Emit diagnostic messages, if any. 10278 const char *VAPassName = Hints.vectorizeAnalysisPassName(); 10279 if (!VectorizeLoop && !InterleaveLoop) { 10280 // Do not vectorize or interleaving the loop. 10281 ORE->emit([&]() { 10282 return OptimizationRemarkMissed(VAPassName, VecDiagMsg.first, 10283 L->getStartLoc(), L->getHeader()) 10284 << VecDiagMsg.second; 10285 }); 10286 ORE->emit([&]() { 10287 return OptimizationRemarkMissed(LV_NAME, IntDiagMsg.first, 10288 L->getStartLoc(), L->getHeader()) 10289 << IntDiagMsg.second; 10290 }); 10291 return false; 10292 } else if (!VectorizeLoop && InterleaveLoop) { 10293 LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n'); 10294 ORE->emit([&]() { 10295 return OptimizationRemarkAnalysis(VAPassName, VecDiagMsg.first, 10296 L->getStartLoc(), L->getHeader()) 10297 << VecDiagMsg.second; 10298 }); 10299 } else if (VectorizeLoop && !InterleaveLoop) { 10300 LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width 10301 << ") in " << DebugLocStr << '\n'); 10302 ORE->emit([&]() { 10303 return OptimizationRemarkAnalysis(LV_NAME, IntDiagMsg.first, 10304 L->getStartLoc(), L->getHeader()) 10305 << IntDiagMsg.second; 10306 }); 10307 } else if (VectorizeLoop && InterleaveLoop) { 10308 LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width 10309 << ") in " << DebugLocStr << '\n'); 10310 LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n'); 10311 } 10312 10313 bool DisableRuntimeUnroll = false; 10314 MDNode *OrigLoopID = L->getLoopID(); 10315 { 10316 using namespace ore; 10317 if (!VectorizeLoop) { 10318 assert(IC > 1 && "interleave count should not be 1 or 0"); 10319 // If we decided that it is not legal to vectorize the loop, then 10320 // interleave it. 10321 InnerLoopUnroller Unroller(L, PSE, LI, DT, TLI, TTI, AC, ORE, IC, &LVL, 10322 &CM, BFI, PSI, Checks); 10323 10324 VPlan &BestPlan = LVP.getBestPlanFor(VF.Width); 10325 LVP.executePlan(VF.Width, IC, BestPlan, Unroller, DT, false); 10326 10327 ORE->emit([&]() { 10328 return OptimizationRemark(LV_NAME, "Interleaved", L->getStartLoc(), 10329 L->getHeader()) 10330 << "interleaved loop (interleaved count: " 10331 << NV("InterleaveCount", IC) << ")"; 10332 }); 10333 } else { 10334 // If we decided that it is *legal* to vectorize the loop, then do it. 10335 10336 // Consider vectorizing the epilogue too if it's profitable. 10337 VectorizationFactor EpilogueVF = 10338 CM.selectEpilogueVectorizationFactor(VF.Width, LVP); 10339 if (EpilogueVF.Width.isVector()) { 10340 10341 // The first pass vectorizes the main loop and creates a scalar epilogue 10342 // to be vectorized by executing the plan (potentially with a different 10343 // factor) again shortly afterwards. 10344 EpilogueLoopVectorizationInfo EPI(VF.Width, IC, EpilogueVF.Width, 1); 10345 EpilogueVectorizerMainLoop MainILV(L, PSE, LI, DT, TLI, TTI, AC, ORE, 10346 EPI, &LVL, &CM, BFI, PSI, Checks); 10347 10348 VPlan &BestMainPlan = LVP.getBestPlanFor(EPI.MainLoopVF); 10349 LVP.executePlan(EPI.MainLoopVF, EPI.MainLoopUF, BestMainPlan, MainILV, 10350 DT, true); 10351 ++LoopsVectorized; 10352 10353 // Second pass vectorizes the epilogue and adjusts the control flow 10354 // edges from the first pass. 10355 EPI.MainLoopVF = EPI.EpilogueVF; 10356 EPI.MainLoopUF = EPI.EpilogueUF; 10357 EpilogueVectorizerEpilogueLoop EpilogILV(L, PSE, LI, DT, TLI, TTI, AC, 10358 ORE, EPI, &LVL, &CM, BFI, PSI, 10359 Checks); 10360 10361 VPlan &BestEpiPlan = LVP.getBestPlanFor(EPI.EpilogueVF); 10362 VPRegionBlock *VectorLoop = BestEpiPlan.getVectorLoopRegion(); 10363 VPBasicBlock *Header = VectorLoop->getEntryBasicBlock(); 10364 Header->setName("vec.epilog.vector.body"); 10365 10366 // Ensure that the start values for any VPReductionPHIRecipes are 10367 // updated before vectorising the epilogue loop. 10368 for (VPRecipeBase &R : Header->phis()) { 10369 if (auto *ReductionPhi = dyn_cast<VPReductionPHIRecipe>(&R)) { 10370 if (auto *Resume = MainILV.getReductionResumeValue( 10371 ReductionPhi->getRecurrenceDescriptor())) { 10372 VPValue *StartVal = BestEpiPlan.getOrAddExternalDef(Resume); 10373 ReductionPhi->setOperand(0, StartVal); 10374 } 10375 } 10376 } 10377 10378 LVP.executePlan(EPI.EpilogueVF, EPI.EpilogueUF, BestEpiPlan, EpilogILV, 10379 DT, true); 10380 ++LoopsEpilogueVectorized; 10381 10382 if (!MainILV.areSafetyChecksAdded()) 10383 DisableRuntimeUnroll = true; 10384 } else { 10385 InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, 10386 VF.MinProfitableTripCount, IC, &LVL, &CM, BFI, 10387 PSI, Checks); 10388 10389 VPlan &BestPlan = LVP.getBestPlanFor(VF.Width); 10390 LVP.executePlan(VF.Width, IC, BestPlan, LB, DT, false); 10391 ++LoopsVectorized; 10392 10393 // Add metadata to disable runtime unrolling a scalar loop when there 10394 // are no runtime checks about strides and memory. A scalar loop that is 10395 // rarely used is not worth unrolling. 10396 if (!LB.areSafetyChecksAdded()) 10397 DisableRuntimeUnroll = true; 10398 } 10399 // Report the vectorization decision. 10400 ORE->emit([&]() { 10401 return OptimizationRemark(LV_NAME, "Vectorized", L->getStartLoc(), 10402 L->getHeader()) 10403 << "vectorized loop (vectorization width: " 10404 << NV("VectorizationFactor", VF.Width) 10405 << ", interleaved count: " << NV("InterleaveCount", IC) << ")"; 10406 }); 10407 } 10408 10409 if (ORE->allowExtraAnalysis(LV_NAME)) 10410 checkMixedPrecision(L, ORE); 10411 } 10412 10413 Optional<MDNode *> RemainderLoopID = 10414 makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll, 10415 LLVMLoopVectorizeFollowupEpilogue}); 10416 if (RemainderLoopID) { 10417 L->setLoopID(RemainderLoopID.value()); 10418 } else { 10419 if (DisableRuntimeUnroll) 10420 AddRuntimeUnrollDisableMetaData(L); 10421 10422 // Mark the loop as already vectorized to avoid vectorizing again. 10423 Hints.setAlreadyVectorized(); 10424 } 10425 10426 assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs())); 10427 return true; 10428 } 10429 10430 LoopVectorizeResult LoopVectorizePass::runImpl( 10431 Function &F, ScalarEvolution &SE_, LoopInfo &LI_, TargetTransformInfo &TTI_, 10432 DominatorTree &DT_, BlockFrequencyInfo &BFI_, TargetLibraryInfo *TLI_, 10433 DemandedBits &DB_, AAResults &AA_, AssumptionCache &AC_, 10434 std::function<const LoopAccessInfo &(Loop &)> &GetLAA_, 10435 OptimizationRemarkEmitter &ORE_, ProfileSummaryInfo *PSI_) { 10436 SE = &SE_; 10437 LI = &LI_; 10438 TTI = &TTI_; 10439 DT = &DT_; 10440 BFI = &BFI_; 10441 TLI = TLI_; 10442 AA = &AA_; 10443 AC = &AC_; 10444 GetLAA = &GetLAA_; 10445 DB = &DB_; 10446 ORE = &ORE_; 10447 PSI = PSI_; 10448 10449 // Don't attempt if 10450 // 1. the target claims to have no vector registers, and 10451 // 2. interleaving won't help ILP. 10452 // 10453 // The second condition is necessary because, even if the target has no 10454 // vector registers, loop vectorization may still enable scalar 10455 // interleaving. 10456 if (!TTI->getNumberOfRegisters(TTI->getRegisterClassForType(true)) && 10457 TTI->getMaxInterleaveFactor(1) < 2) 10458 return LoopVectorizeResult(false, false); 10459 10460 bool Changed = false, CFGChanged = false; 10461 10462 // The vectorizer requires loops to be in simplified form. 10463 // Since simplification may add new inner loops, it has to run before the 10464 // legality and profitability checks. This means running the loop vectorizer 10465 // will simplify all loops, regardless of whether anything end up being 10466 // vectorized. 10467 for (auto &L : *LI) 10468 Changed |= CFGChanged |= 10469 simplifyLoop(L, DT, LI, SE, AC, nullptr, false /* PreserveLCSSA */); 10470 10471 // Build up a worklist of inner-loops to vectorize. This is necessary as 10472 // the act of vectorizing or partially unrolling a loop creates new loops 10473 // and can invalidate iterators across the loops. 10474 SmallVector<Loop *, 8> Worklist; 10475 10476 for (Loop *L : *LI) 10477 collectSupportedLoops(*L, LI, ORE, Worklist); 10478 10479 LoopsAnalyzed += Worklist.size(); 10480 10481 // Now walk the identified inner loops. 10482 while (!Worklist.empty()) { 10483 Loop *L = Worklist.pop_back_val(); 10484 10485 // For the inner loops we actually process, form LCSSA to simplify the 10486 // transform. 10487 Changed |= formLCSSARecursively(*L, *DT, LI, SE); 10488 10489 Changed |= CFGChanged |= processLoop(L); 10490 } 10491 10492 // Process each loop nest in the function. 10493 return LoopVectorizeResult(Changed, CFGChanged); 10494 } 10495 10496 PreservedAnalyses LoopVectorizePass::run(Function &F, 10497 FunctionAnalysisManager &AM) { 10498 auto &LI = AM.getResult<LoopAnalysis>(F); 10499 // There are no loops in the function. Return before computing other expensive 10500 // analyses. 10501 if (LI.empty()) 10502 return PreservedAnalyses::all(); 10503 auto &SE = AM.getResult<ScalarEvolutionAnalysis>(F); 10504 auto &TTI = AM.getResult<TargetIRAnalysis>(F); 10505 auto &DT = AM.getResult<DominatorTreeAnalysis>(F); 10506 auto &BFI = AM.getResult<BlockFrequencyAnalysis>(F); 10507 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F); 10508 auto &AA = AM.getResult<AAManager>(F); 10509 auto &AC = AM.getResult<AssumptionAnalysis>(F); 10510 auto &DB = AM.getResult<DemandedBitsAnalysis>(F); 10511 auto &ORE = AM.getResult<OptimizationRemarkEmitterAnalysis>(F); 10512 10513 auto &LAM = AM.getResult<LoopAnalysisManagerFunctionProxy>(F).getManager(); 10514 std::function<const LoopAccessInfo &(Loop &)> GetLAA = 10515 [&](Loop &L) -> const LoopAccessInfo & { 10516 LoopStandardAnalysisResults AR = {AA, AC, DT, LI, SE, 10517 TLI, TTI, nullptr, nullptr, nullptr}; 10518 return LAM.getResult<LoopAccessAnalysis>(L, AR); 10519 }; 10520 auto &MAMProxy = AM.getResult<ModuleAnalysisManagerFunctionProxy>(F); 10521 ProfileSummaryInfo *PSI = 10522 MAMProxy.getCachedResult<ProfileSummaryAnalysis>(*F.getParent()); 10523 LoopVectorizeResult Result = 10524 runImpl(F, SE, LI, TTI, DT, BFI, &TLI, DB, AA, AC, GetLAA, ORE, PSI); 10525 if (!Result.MadeAnyChange) 10526 return PreservedAnalyses::all(); 10527 PreservedAnalyses PA; 10528 10529 // We currently do not preserve loopinfo/dominator analyses with outer loop 10530 // vectorization. Until this is addressed, mark these analyses as preserved 10531 // only for non-VPlan-native path. 10532 // TODO: Preserve Loop and Dominator analyses for VPlan-native path. 10533 if (!EnableVPlanNativePath) { 10534 PA.preserve<LoopAnalysis>(); 10535 PA.preserve<DominatorTreeAnalysis>(); 10536 } 10537 10538 if (Result.MadeCFGChange) { 10539 // Making CFG changes likely means a loop got vectorized. Indicate that 10540 // extra simplification passes should be run. 10541 // TODO: MadeCFGChanges is not a prefect proxy. Extra passes should only 10542 // be run if runtime checks have been added. 10543 AM.getResult<ShouldRunExtraVectorPasses>(F); 10544 PA.preserve<ShouldRunExtraVectorPasses>(); 10545 } else { 10546 PA.preserveSet<CFGAnalyses>(); 10547 } 10548 return PA; 10549 } 10550 10551 void LoopVectorizePass::printPipeline( 10552 raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) { 10553 static_cast<PassInfoMixin<LoopVectorizePass> *>(this)->printPipeline( 10554 OS, MapClassName2PassName); 10555 10556 OS << "<"; 10557 OS << (InterleaveOnlyWhenForced ? "" : "no-") << "interleave-forced-only;"; 10558 OS << (VectorizeOnlyWhenForced ? "" : "no-") << "vectorize-forced-only;"; 10559 OS << ">"; 10560 } 10561