xref: /freebsd-src/contrib/llvm-project/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp (revision 4824e7fd18a1223177218d4aec1b3c6c5c4a444e)
1 //===- LoopVectorize.cpp - A Loop Vectorizer ------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This is the LLVM loop vectorizer. This pass modifies 'vectorizable' loops
10 // and generates target-independent LLVM-IR.
11 // The vectorizer uses the TargetTransformInfo analysis to estimate the costs
12 // of instructions in order to estimate the profitability of vectorization.
13 //
14 // The loop vectorizer combines consecutive loop iterations into a single
15 // 'wide' iteration. After this transformation the index is incremented
16 // by the SIMD vector width, and not by one.
17 //
18 // This pass has three parts:
19 // 1. The main loop pass that drives the different parts.
20 // 2. LoopVectorizationLegality - A unit that checks for the legality
21 //    of the vectorization.
22 // 3. InnerLoopVectorizer - A unit that performs the actual
23 //    widening of instructions.
24 // 4. LoopVectorizationCostModel - A unit that checks for the profitability
25 //    of vectorization. It decides on the optimal vector width, which
26 //    can be one, if vectorization is not profitable.
27 //
28 // There is a development effort going on to migrate loop vectorizer to the
29 // VPlan infrastructure and to introduce outer loop vectorization support (see
30 // docs/Proposal/VectorizationPlan.rst and
31 // http://lists.llvm.org/pipermail/llvm-dev/2017-December/119523.html). For this
32 // purpose, we temporarily introduced the VPlan-native vectorization path: an
33 // alternative vectorization path that is natively implemented on top of the
34 // VPlan infrastructure. See EnableVPlanNativePath for enabling.
35 //
36 //===----------------------------------------------------------------------===//
37 //
38 // The reduction-variable vectorization is based on the paper:
39 //  D. Nuzman and R. Henderson. Multi-platform Auto-vectorization.
40 //
41 // Variable uniformity checks are inspired by:
42 //  Karrenberg, R. and Hack, S. Whole Function Vectorization.
43 //
44 // The interleaved access vectorization is based on the paper:
45 //  Dorit Nuzman, Ira Rosen and Ayal Zaks.  Auto-Vectorization of Interleaved
46 //  Data for SIMD
47 //
48 // Other ideas/concepts are from:
49 //  A. Zaks and D. Nuzman. Autovectorization in GCC-two years later.
50 //
51 //  S. Maleki, Y. Gao, M. Garzaran, T. Wong and D. Padua.  An Evaluation of
52 //  Vectorizing Compilers.
53 //
54 //===----------------------------------------------------------------------===//
55 
56 #include "llvm/Transforms/Vectorize/LoopVectorize.h"
57 #include "LoopVectorizationPlanner.h"
58 #include "VPRecipeBuilder.h"
59 #include "VPlan.h"
60 #include "VPlanHCFGBuilder.h"
61 #include "VPlanPredicator.h"
62 #include "VPlanTransforms.h"
63 #include "llvm/ADT/APInt.h"
64 #include "llvm/ADT/ArrayRef.h"
65 #include "llvm/ADT/DenseMap.h"
66 #include "llvm/ADT/DenseMapInfo.h"
67 #include "llvm/ADT/Hashing.h"
68 #include "llvm/ADT/MapVector.h"
69 #include "llvm/ADT/None.h"
70 #include "llvm/ADT/Optional.h"
71 #include "llvm/ADT/STLExtras.h"
72 #include "llvm/ADT/SmallPtrSet.h"
73 #include "llvm/ADT/SmallSet.h"
74 #include "llvm/ADT/SmallVector.h"
75 #include "llvm/ADT/Statistic.h"
76 #include "llvm/ADT/StringRef.h"
77 #include "llvm/ADT/Twine.h"
78 #include "llvm/ADT/iterator_range.h"
79 #include "llvm/Analysis/AssumptionCache.h"
80 #include "llvm/Analysis/BasicAliasAnalysis.h"
81 #include "llvm/Analysis/BlockFrequencyInfo.h"
82 #include "llvm/Analysis/CFG.h"
83 #include "llvm/Analysis/CodeMetrics.h"
84 #include "llvm/Analysis/DemandedBits.h"
85 #include "llvm/Analysis/GlobalsModRef.h"
86 #include "llvm/Analysis/LoopAccessAnalysis.h"
87 #include "llvm/Analysis/LoopAnalysisManager.h"
88 #include "llvm/Analysis/LoopInfo.h"
89 #include "llvm/Analysis/LoopIterator.h"
90 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
91 #include "llvm/Analysis/ProfileSummaryInfo.h"
92 #include "llvm/Analysis/ScalarEvolution.h"
93 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
94 #include "llvm/Analysis/TargetLibraryInfo.h"
95 #include "llvm/Analysis/TargetTransformInfo.h"
96 #include "llvm/Analysis/VectorUtils.h"
97 #include "llvm/IR/Attributes.h"
98 #include "llvm/IR/BasicBlock.h"
99 #include "llvm/IR/CFG.h"
100 #include "llvm/IR/Constant.h"
101 #include "llvm/IR/Constants.h"
102 #include "llvm/IR/DataLayout.h"
103 #include "llvm/IR/DebugInfoMetadata.h"
104 #include "llvm/IR/DebugLoc.h"
105 #include "llvm/IR/DerivedTypes.h"
106 #include "llvm/IR/DiagnosticInfo.h"
107 #include "llvm/IR/Dominators.h"
108 #include "llvm/IR/Function.h"
109 #include "llvm/IR/IRBuilder.h"
110 #include "llvm/IR/InstrTypes.h"
111 #include "llvm/IR/Instruction.h"
112 #include "llvm/IR/Instructions.h"
113 #include "llvm/IR/IntrinsicInst.h"
114 #include "llvm/IR/Intrinsics.h"
115 #include "llvm/IR/LLVMContext.h"
116 #include "llvm/IR/Metadata.h"
117 #include "llvm/IR/Module.h"
118 #include "llvm/IR/Operator.h"
119 #include "llvm/IR/PatternMatch.h"
120 #include "llvm/IR/Type.h"
121 #include "llvm/IR/Use.h"
122 #include "llvm/IR/User.h"
123 #include "llvm/IR/Value.h"
124 #include "llvm/IR/ValueHandle.h"
125 #include "llvm/IR/Verifier.h"
126 #include "llvm/InitializePasses.h"
127 #include "llvm/Pass.h"
128 #include "llvm/Support/Casting.h"
129 #include "llvm/Support/CommandLine.h"
130 #include "llvm/Support/Compiler.h"
131 #include "llvm/Support/Debug.h"
132 #include "llvm/Support/ErrorHandling.h"
133 #include "llvm/Support/InstructionCost.h"
134 #include "llvm/Support/MathExtras.h"
135 #include "llvm/Support/raw_ostream.h"
136 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
137 #include "llvm/Transforms/Utils/InjectTLIMappings.h"
138 #include "llvm/Transforms/Utils/LoopSimplify.h"
139 #include "llvm/Transforms/Utils/LoopUtils.h"
140 #include "llvm/Transforms/Utils/LoopVersioning.h"
141 #include "llvm/Transforms/Utils/ScalarEvolutionExpander.h"
142 #include "llvm/Transforms/Utils/SizeOpts.h"
143 #include "llvm/Transforms/Vectorize/LoopVectorizationLegality.h"
144 #include <algorithm>
145 #include <cassert>
146 #include <cstdint>
147 #include <cstdlib>
148 #include <functional>
149 #include <iterator>
150 #include <limits>
151 #include <memory>
152 #include <string>
153 #include <tuple>
154 #include <utility>
155 
156 using namespace llvm;
157 
158 #define LV_NAME "loop-vectorize"
159 #define DEBUG_TYPE LV_NAME
160 
161 #ifndef NDEBUG
162 const char VerboseDebug[] = DEBUG_TYPE "-verbose";
163 #endif
164 
165 /// @{
166 /// Metadata attribute names
167 const char LLVMLoopVectorizeFollowupAll[] = "llvm.loop.vectorize.followup_all";
168 const char LLVMLoopVectorizeFollowupVectorized[] =
169     "llvm.loop.vectorize.followup_vectorized";
170 const char LLVMLoopVectorizeFollowupEpilogue[] =
171     "llvm.loop.vectorize.followup_epilogue";
172 /// @}
173 
174 STATISTIC(LoopsVectorized, "Number of loops vectorized");
175 STATISTIC(LoopsAnalyzed, "Number of loops analyzed for vectorization");
176 STATISTIC(LoopsEpilogueVectorized, "Number of epilogues vectorized");
177 
178 static cl::opt<bool> EnableEpilogueVectorization(
179     "enable-epilogue-vectorization", cl::init(true), cl::Hidden,
180     cl::desc("Enable vectorization of epilogue loops."));
181 
182 static cl::opt<unsigned> EpilogueVectorizationForceVF(
183     "epilogue-vectorization-force-VF", cl::init(1), cl::Hidden,
184     cl::desc("When epilogue vectorization is enabled, and a value greater than "
185              "1 is specified, forces the given VF for all applicable epilogue "
186              "loops."));
187 
188 static cl::opt<unsigned> EpilogueVectorizationMinVF(
189     "epilogue-vectorization-minimum-VF", cl::init(16), cl::Hidden,
190     cl::desc("Only loops with vectorization factor equal to or larger than "
191              "the specified value are considered for epilogue vectorization."));
192 
193 /// Loops with a known constant trip count below this number are vectorized only
194 /// if no scalar iteration overheads are incurred.
195 static cl::opt<unsigned> TinyTripCountVectorThreshold(
196     "vectorizer-min-trip-count", cl::init(16), cl::Hidden,
197     cl::desc("Loops with a constant trip count that is smaller than this "
198              "value are vectorized only if no scalar iteration overheads "
199              "are incurred."));
200 
201 static cl::opt<unsigned> PragmaVectorizeMemoryCheckThreshold(
202     "pragma-vectorize-memory-check-threshold", cl::init(128), cl::Hidden,
203     cl::desc("The maximum allowed number of runtime memory checks with a "
204              "vectorize(enable) pragma."));
205 
206 // Option prefer-predicate-over-epilogue indicates that an epilogue is undesired,
207 // that predication is preferred, and this lists all options. I.e., the
208 // vectorizer will try to fold the tail-loop (epilogue) into the vector body
209 // and predicate the instructions accordingly. If tail-folding fails, there are
210 // different fallback strategies depending on these values:
211 namespace PreferPredicateTy {
212   enum Option {
213     ScalarEpilogue = 0,
214     PredicateElseScalarEpilogue,
215     PredicateOrDontVectorize
216   };
217 } // namespace PreferPredicateTy
218 
219 static cl::opt<PreferPredicateTy::Option> PreferPredicateOverEpilogue(
220     "prefer-predicate-over-epilogue",
221     cl::init(PreferPredicateTy::ScalarEpilogue),
222     cl::Hidden,
223     cl::desc("Tail-folding and predication preferences over creating a scalar "
224              "epilogue loop."),
225     cl::values(clEnumValN(PreferPredicateTy::ScalarEpilogue,
226                          "scalar-epilogue",
227                          "Don't tail-predicate loops, create scalar epilogue"),
228               clEnumValN(PreferPredicateTy::PredicateElseScalarEpilogue,
229                          "predicate-else-scalar-epilogue",
230                          "prefer tail-folding, create scalar epilogue if tail "
231                          "folding fails."),
232               clEnumValN(PreferPredicateTy::PredicateOrDontVectorize,
233                          "predicate-dont-vectorize",
234                          "prefers tail-folding, don't attempt vectorization if "
235                          "tail-folding fails.")));
236 
237 static cl::opt<bool> MaximizeBandwidth(
238     "vectorizer-maximize-bandwidth", cl::init(false), cl::Hidden,
239     cl::desc("Maximize bandwidth when selecting vectorization factor which "
240              "will be determined by the smallest type in loop."));
241 
242 static cl::opt<bool> EnableInterleavedMemAccesses(
243     "enable-interleaved-mem-accesses", cl::init(false), cl::Hidden,
244     cl::desc("Enable vectorization on interleaved memory accesses in a loop"));
245 
246 /// An interleave-group may need masking if it resides in a block that needs
247 /// predication, or in order to mask away gaps.
248 static cl::opt<bool> EnableMaskedInterleavedMemAccesses(
249     "enable-masked-interleaved-mem-accesses", cl::init(false), cl::Hidden,
250     cl::desc("Enable vectorization on masked interleaved memory accesses in a loop"));
251 
252 static cl::opt<unsigned> TinyTripCountInterleaveThreshold(
253     "tiny-trip-count-interleave-threshold", cl::init(128), cl::Hidden,
254     cl::desc("We don't interleave loops with a estimated constant trip count "
255              "below this number"));
256 
257 static cl::opt<unsigned> ForceTargetNumScalarRegs(
258     "force-target-num-scalar-regs", cl::init(0), cl::Hidden,
259     cl::desc("A flag that overrides the target's number of scalar registers."));
260 
261 static cl::opt<unsigned> ForceTargetNumVectorRegs(
262     "force-target-num-vector-regs", cl::init(0), cl::Hidden,
263     cl::desc("A flag that overrides the target's number of vector registers."));
264 
265 static cl::opt<unsigned> ForceTargetMaxScalarInterleaveFactor(
266     "force-target-max-scalar-interleave", cl::init(0), cl::Hidden,
267     cl::desc("A flag that overrides the target's max interleave factor for "
268              "scalar loops."));
269 
270 static cl::opt<unsigned> ForceTargetMaxVectorInterleaveFactor(
271     "force-target-max-vector-interleave", cl::init(0), cl::Hidden,
272     cl::desc("A flag that overrides the target's max interleave factor for "
273              "vectorized loops."));
274 
275 static cl::opt<unsigned> ForceTargetInstructionCost(
276     "force-target-instruction-cost", cl::init(0), cl::Hidden,
277     cl::desc("A flag that overrides the target's expected cost for "
278              "an instruction to a single constant value. Mostly "
279              "useful for getting consistent testing."));
280 
281 static cl::opt<bool> ForceTargetSupportsScalableVectors(
282     "force-target-supports-scalable-vectors", cl::init(false), cl::Hidden,
283     cl::desc(
284         "Pretend that scalable vectors are supported, even if the target does "
285         "not support them. This flag should only be used for testing."));
286 
287 static cl::opt<unsigned> SmallLoopCost(
288     "small-loop-cost", cl::init(20), cl::Hidden,
289     cl::desc(
290         "The cost of a loop that is considered 'small' by the interleaver."));
291 
292 static cl::opt<bool> LoopVectorizeWithBlockFrequency(
293     "loop-vectorize-with-block-frequency", cl::init(true), cl::Hidden,
294     cl::desc("Enable the use of the block frequency analysis to access PGO "
295              "heuristics minimizing code growth in cold regions and being more "
296              "aggressive in hot regions."));
297 
298 // Runtime interleave loops for load/store throughput.
299 static cl::opt<bool> EnableLoadStoreRuntimeInterleave(
300     "enable-loadstore-runtime-interleave", cl::init(true), cl::Hidden,
301     cl::desc(
302         "Enable runtime interleaving until load/store ports are saturated"));
303 
304 /// Interleave small loops with scalar reductions.
305 static cl::opt<bool> InterleaveSmallLoopScalarReduction(
306     "interleave-small-loop-scalar-reduction", cl::init(false), cl::Hidden,
307     cl::desc("Enable interleaving for loops with small iteration counts that "
308              "contain scalar reductions to expose ILP."));
309 
310 /// The number of stores in a loop that are allowed to need predication.
311 static cl::opt<unsigned> NumberOfStoresToPredicate(
312     "vectorize-num-stores-pred", cl::init(1), cl::Hidden,
313     cl::desc("Max number of stores to be predicated behind an if."));
314 
315 static cl::opt<bool> EnableIndVarRegisterHeur(
316     "enable-ind-var-reg-heur", cl::init(true), cl::Hidden,
317     cl::desc("Count the induction variable only once when interleaving"));
318 
319 static cl::opt<bool> EnableCondStoresVectorization(
320     "enable-cond-stores-vec", cl::init(true), cl::Hidden,
321     cl::desc("Enable if predication of stores during vectorization."));
322 
323 static cl::opt<unsigned> MaxNestedScalarReductionIC(
324     "max-nested-scalar-reduction-interleave", cl::init(2), cl::Hidden,
325     cl::desc("The maximum interleave count to use when interleaving a scalar "
326              "reduction in a nested loop."));
327 
328 static cl::opt<bool>
329     PreferInLoopReductions("prefer-inloop-reductions", cl::init(false),
330                            cl::Hidden,
331                            cl::desc("Prefer in-loop vector reductions, "
332                                     "overriding the targets preference."));
333 
334 static cl::opt<bool> ForceOrderedReductions(
335     "force-ordered-reductions", cl::init(false), cl::Hidden,
336     cl::desc("Enable the vectorisation of loops with in-order (strict) "
337              "FP reductions"));
338 
339 static cl::opt<bool> PreferPredicatedReductionSelect(
340     "prefer-predicated-reduction-select", cl::init(false), cl::Hidden,
341     cl::desc(
342         "Prefer predicating a reduction operation over an after loop select."));
343 
344 cl::opt<bool> EnableVPlanNativePath(
345     "enable-vplan-native-path", cl::init(false), cl::Hidden,
346     cl::desc("Enable VPlan-native vectorization path with "
347              "support for outer loop vectorization."));
348 
349 // FIXME: Remove this switch once we have divergence analysis. Currently we
350 // assume divergent non-backedge branches when this switch is true.
351 cl::opt<bool> EnableVPlanPredication(
352     "enable-vplan-predication", cl::init(false), cl::Hidden,
353     cl::desc("Enable VPlan-native vectorization path predicator with "
354              "support for outer loop vectorization."));
355 
356 // This flag enables the stress testing of the VPlan H-CFG construction in the
357 // VPlan-native vectorization path. It must be used in conjuction with
358 // -enable-vplan-native-path. -vplan-verify-hcfg can also be used to enable the
359 // verification of the H-CFGs built.
360 static cl::opt<bool> VPlanBuildStressTest(
361     "vplan-build-stress-test", cl::init(false), cl::Hidden,
362     cl::desc(
363         "Build VPlan for every supported loop nest in the function and bail "
364         "out right after the build (stress test the VPlan H-CFG construction "
365         "in the VPlan-native vectorization path)."));
366 
367 cl::opt<bool> llvm::EnableLoopInterleaving(
368     "interleave-loops", cl::init(true), cl::Hidden,
369     cl::desc("Enable loop interleaving in Loop vectorization passes"));
370 cl::opt<bool> llvm::EnableLoopVectorization(
371     "vectorize-loops", cl::init(true), cl::Hidden,
372     cl::desc("Run the Loop vectorization passes"));
373 
374 cl::opt<bool> PrintVPlansInDotFormat(
375     "vplan-print-in-dot-format", cl::init(false), cl::Hidden,
376     cl::desc("Use dot format instead of plain text when dumping VPlans"));
377 
378 /// A helper function that returns true if the given type is irregular. The
379 /// type is irregular if its allocated size doesn't equal the store size of an
380 /// element of the corresponding vector type.
381 static bool hasIrregularType(Type *Ty, const DataLayout &DL) {
382   // Determine if an array of N elements of type Ty is "bitcast compatible"
383   // with a <N x Ty> vector.
384   // This is only true if there is no padding between the array elements.
385   return DL.getTypeAllocSizeInBits(Ty) != DL.getTypeSizeInBits(Ty);
386 }
387 
388 /// A helper function that returns the reciprocal of the block probability of
389 /// predicated blocks. If we return X, we are assuming the predicated block
390 /// will execute once for every X iterations of the loop header.
391 ///
392 /// TODO: We should use actual block probability here, if available. Currently,
393 ///       we always assume predicated blocks have a 50% chance of executing.
394 static unsigned getReciprocalPredBlockProb() { return 2; }
395 
396 /// A helper function that returns an integer or floating-point constant with
397 /// value C.
398 static Constant *getSignedIntOrFpConstant(Type *Ty, int64_t C) {
399   return Ty->isIntegerTy() ? ConstantInt::getSigned(Ty, C)
400                            : ConstantFP::get(Ty, C);
401 }
402 
403 /// Returns "best known" trip count for the specified loop \p L as defined by
404 /// the following procedure:
405 ///   1) Returns exact trip count if it is known.
406 ///   2) Returns expected trip count according to profile data if any.
407 ///   3) Returns upper bound estimate if it is known.
408 ///   4) Returns None if all of the above failed.
409 static Optional<unsigned> getSmallBestKnownTC(ScalarEvolution &SE, Loop *L) {
410   // Check if exact trip count is known.
411   if (unsigned ExpectedTC = SE.getSmallConstantTripCount(L))
412     return ExpectedTC;
413 
414   // Check if there is an expected trip count available from profile data.
415   if (LoopVectorizeWithBlockFrequency)
416     if (auto EstimatedTC = getLoopEstimatedTripCount(L))
417       return EstimatedTC;
418 
419   // Check if upper bound estimate is known.
420   if (unsigned ExpectedTC = SE.getSmallConstantMaxTripCount(L))
421     return ExpectedTC;
422 
423   return None;
424 }
425 
426 // Forward declare GeneratedRTChecks.
427 class GeneratedRTChecks;
428 
429 namespace llvm {
430 
431 /// InnerLoopVectorizer vectorizes loops which contain only one basic
432 /// block to a specified vectorization factor (VF).
433 /// This class performs the widening of scalars into vectors, or multiple
434 /// scalars. This class also implements the following features:
435 /// * It inserts an epilogue loop for handling loops that don't have iteration
436 ///   counts that are known to be a multiple of the vectorization factor.
437 /// * It handles the code generation for reduction variables.
438 /// * Scalarization (implementation using scalars) of un-vectorizable
439 ///   instructions.
440 /// InnerLoopVectorizer does not perform any vectorization-legality
441 /// checks, and relies on the caller to check for the different legality
442 /// aspects. The InnerLoopVectorizer relies on the
443 /// LoopVectorizationLegality class to provide information about the induction
444 /// and reduction variables that were found to a given vectorization factor.
445 class InnerLoopVectorizer {
446 public:
447   InnerLoopVectorizer(Loop *OrigLoop, PredicatedScalarEvolution &PSE,
448                       LoopInfo *LI, DominatorTree *DT,
449                       const TargetLibraryInfo *TLI,
450                       const TargetTransformInfo *TTI, AssumptionCache *AC,
451                       OptimizationRemarkEmitter *ORE, ElementCount VecWidth,
452                       unsigned UnrollFactor, LoopVectorizationLegality *LVL,
453                       LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI,
454                       ProfileSummaryInfo *PSI, GeneratedRTChecks &RTChecks)
455       : OrigLoop(OrigLoop), PSE(PSE), LI(LI), DT(DT), TLI(TLI), TTI(TTI),
456         AC(AC), ORE(ORE), VF(VecWidth), UF(UnrollFactor),
457         Builder(PSE.getSE()->getContext()), Legal(LVL), Cost(CM), BFI(BFI),
458         PSI(PSI), RTChecks(RTChecks) {
459     // Query this against the original loop and save it here because the profile
460     // of the original loop header may change as the transformation happens.
461     OptForSizeBasedOnProfile = llvm::shouldOptimizeForSize(
462         OrigLoop->getHeader(), PSI, BFI, PGSOQueryType::IRPass);
463   }
464 
465   virtual ~InnerLoopVectorizer() = default;
466 
467   /// Create a new empty loop that will contain vectorized instructions later
468   /// on, while the old loop will be used as the scalar remainder. Control flow
469   /// is generated around the vectorized (and scalar epilogue) loops consisting
470   /// of various checks and bypasses. Return the pre-header block of the new
471   /// loop.
472   /// In the case of epilogue vectorization, this function is overriden to
473   /// handle the more complex control flow around the loops.
474   virtual BasicBlock *createVectorizedLoopSkeleton();
475 
476   /// Widen a single call instruction within the innermost loop.
477   void widenCallInstruction(CallInst &I, VPValue *Def, VPUser &ArgOperands,
478                             VPTransformState &State);
479 
480   /// Fix the vectorized code, taking care of header phi's, live-outs, and more.
481   void fixVectorizedLoop(VPTransformState &State);
482 
483   // Return true if any runtime check is added.
484   bool areSafetyChecksAdded() { return AddedSafetyChecks; }
485 
486   /// A type for vectorized values in the new loop. Each value from the
487   /// original loop, when vectorized, is represented by UF vector values in the
488   /// new unrolled loop, where UF is the unroll factor.
489   using VectorParts = SmallVector<Value *, 2>;
490 
491   /// Vectorize a single first-order recurrence or pointer induction PHINode in
492   /// a block. This method handles the induction variable canonicalization. It
493   /// supports both VF = 1 for unrolled loops and arbitrary length vectors.
494   void widenPHIInstruction(Instruction *PN, VPWidenPHIRecipe *PhiR,
495                            VPTransformState &State);
496 
497   /// A helper function to scalarize a single Instruction in the innermost loop.
498   /// Generates a sequence of scalar instances for each lane between \p MinLane
499   /// and \p MaxLane, times each part between \p MinPart and \p MaxPart,
500   /// inclusive. Uses the VPValue operands from \p RepRecipe instead of \p
501   /// Instr's operands.
502   void scalarizeInstruction(Instruction *Instr, VPReplicateRecipe *RepRecipe,
503                             const VPIteration &Instance, bool IfPredicateInstr,
504                             VPTransformState &State);
505 
506   /// Widen an integer or floating-point induction variable \p IV. If \p Trunc
507   /// is provided, the integer induction variable will first be truncated to
508   /// the corresponding type.
509   void widenIntOrFpInduction(PHINode *IV, Value *Start, TruncInst *Trunc,
510                              VPValue *Def, VPValue *CastDef,
511                              VPTransformState &State);
512 
513   /// Construct the vector value of a scalarized value \p V one lane at a time.
514   void packScalarIntoVectorValue(VPValue *Def, const VPIteration &Instance,
515                                  VPTransformState &State);
516 
517   /// Try to vectorize interleaved access group \p Group with the base address
518   /// given in \p Addr, optionally masking the vector operations if \p
519   /// BlockInMask is non-null. Use \p State to translate given VPValues to IR
520   /// values in the vectorized loop.
521   void vectorizeInterleaveGroup(const InterleaveGroup<Instruction> *Group,
522                                 ArrayRef<VPValue *> VPDefs,
523                                 VPTransformState &State, VPValue *Addr,
524                                 ArrayRef<VPValue *> StoredValues,
525                                 VPValue *BlockInMask = nullptr);
526 
527   /// Set the debug location in the builder \p Ptr using the debug location in
528   /// \p V. If \p Ptr is None then it uses the class member's Builder.
529   void setDebugLocFromInst(const Value *V,
530                            Optional<IRBuilder<> *> CustomBuilder = None);
531 
532   /// Fix the non-induction PHIs in the OrigPHIsToFix vector.
533   void fixNonInductionPHIs(VPTransformState &State);
534 
535   /// Returns true if the reordering of FP operations is not allowed, but we are
536   /// able to vectorize with strict in-order reductions for the given RdxDesc.
537   bool useOrderedReductions(RecurrenceDescriptor &RdxDesc);
538 
539   /// Create a broadcast instruction. This method generates a broadcast
540   /// instruction (shuffle) for loop invariant values and for the induction
541   /// value. If this is the induction variable then we extend it to N, N+1, ...
542   /// this is needed because each iteration in the loop corresponds to a SIMD
543   /// element.
544   virtual Value *getBroadcastInstrs(Value *V);
545 
546   /// Add metadata from one instruction to another.
547   ///
548   /// This includes both the original MDs from \p From and additional ones (\see
549   /// addNewMetadata).  Use this for *newly created* instructions in the vector
550   /// loop.
551   void addMetadata(Instruction *To, Instruction *From);
552 
553   /// Similar to the previous function but it adds the metadata to a
554   /// vector of instructions.
555   void addMetadata(ArrayRef<Value *> To, Instruction *From);
556 
557 protected:
558   friend class LoopVectorizationPlanner;
559 
560   /// A small list of PHINodes.
561   using PhiVector = SmallVector<PHINode *, 4>;
562 
563   /// A type for scalarized values in the new loop. Each value from the
564   /// original loop, when scalarized, is represented by UF x VF scalar values
565   /// in the new unrolled loop, where UF is the unroll factor and VF is the
566   /// vectorization factor.
567   using ScalarParts = SmallVector<SmallVector<Value *, 4>, 2>;
568 
569   /// Set up the values of the IVs correctly when exiting the vector loop.
570   void fixupIVUsers(PHINode *OrigPhi, const InductionDescriptor &II,
571                     Value *CountRoundDown, Value *EndValue,
572                     BasicBlock *MiddleBlock);
573 
574   /// Create a new induction variable inside L.
575   PHINode *createInductionVariable(Loop *L, Value *Start, Value *End,
576                                    Value *Step, Instruction *DL);
577 
578   /// Handle all cross-iteration phis in the header.
579   void fixCrossIterationPHIs(VPTransformState &State);
580 
581   /// Create the exit value of first order recurrences in the middle block and
582   /// update their users.
583   void fixFirstOrderRecurrence(VPWidenPHIRecipe *PhiR, VPTransformState &State);
584 
585   /// Create code for the loop exit value of the reduction.
586   void fixReduction(VPReductionPHIRecipe *Phi, VPTransformState &State);
587 
588   /// Clear NSW/NUW flags from reduction instructions if necessary.
589   void clearReductionWrapFlags(const RecurrenceDescriptor &RdxDesc,
590                                VPTransformState &State);
591 
592   /// Fixup the LCSSA phi nodes in the unique exit block.  This simply
593   /// means we need to add the appropriate incoming value from the middle
594   /// block as exiting edges from the scalar epilogue loop (if present) are
595   /// already in place, and we exit the vector loop exclusively to the middle
596   /// block.
597   void fixLCSSAPHIs(VPTransformState &State);
598 
599   /// Iteratively sink the scalarized operands of a predicated instruction into
600   /// the block that was created for it.
601   void sinkScalarOperands(Instruction *PredInst);
602 
603   /// Shrinks vector element sizes to the smallest bitwidth they can be legally
604   /// represented as.
605   void truncateToMinimalBitwidths(VPTransformState &State);
606 
607   /// This function adds
608   /// (StartIdx * Step, (StartIdx + 1) * Step, (StartIdx + 2) * Step, ...)
609   /// to each vector element of Val. The sequence starts at StartIndex.
610   /// \p Opcode is relevant for FP induction variable.
611   virtual Value *
612   getStepVector(Value *Val, Value *StartIdx, Value *Step,
613                 Instruction::BinaryOps Opcode = Instruction::BinaryOpsEnd);
614 
615   /// Compute scalar induction steps. \p ScalarIV is the scalar induction
616   /// variable on which to base the steps, \p Step is the size of the step, and
617   /// \p EntryVal is the value from the original loop that maps to the steps.
618   /// Note that \p EntryVal doesn't have to be an induction variable - it
619   /// can also be a truncate instruction.
620   void buildScalarSteps(Value *ScalarIV, Value *Step, Instruction *EntryVal,
621                         const InductionDescriptor &ID, VPValue *Def,
622                         VPValue *CastDef, VPTransformState &State);
623 
624   /// Create a vector induction phi node based on an existing scalar one. \p
625   /// EntryVal is the value from the original loop that maps to the vector phi
626   /// node, and \p Step is the loop-invariant step. If \p EntryVal is a
627   /// truncate instruction, instead of widening the original IV, we widen a
628   /// version of the IV truncated to \p EntryVal's type.
629   void createVectorIntOrFpInductionPHI(const InductionDescriptor &II,
630                                        Value *Step, Value *Start,
631                                        Instruction *EntryVal, VPValue *Def,
632                                        VPValue *CastDef,
633                                        VPTransformState &State);
634 
635   /// Returns true if an instruction \p I should be scalarized instead of
636   /// vectorized for the chosen vectorization factor.
637   bool shouldScalarizeInstruction(Instruction *I) const;
638 
639   /// Returns true if we should generate a scalar version of \p IV.
640   bool needsScalarInduction(Instruction *IV) const;
641 
642   /// If there is a cast involved in the induction variable \p ID, which should
643   /// be ignored in the vectorized loop body, this function records the
644   /// VectorLoopValue of the respective Phi also as the VectorLoopValue of the
645   /// cast. We had already proved that the casted Phi is equal to the uncasted
646   /// Phi in the vectorized loop (under a runtime guard), and therefore
647   /// there is no need to vectorize the cast - the same value can be used in the
648   /// vector loop for both the Phi and the cast.
649   /// If \p VectorLoopValue is a scalarized value, \p Lane is also specified,
650   /// Otherwise, \p VectorLoopValue is a widened/vectorized value.
651   ///
652   /// \p EntryVal is the value from the original loop that maps to the vector
653   /// phi node and is used to distinguish what is the IV currently being
654   /// processed - original one (if \p EntryVal is a phi corresponding to the
655   /// original IV) or the "newly-created" one based on the proof mentioned above
656   /// (see also buildScalarSteps() and createVectorIntOrFPInductionPHI()). In the
657   /// latter case \p EntryVal is a TruncInst and we must not record anything for
658   /// that IV, but it's error-prone to expect callers of this routine to care
659   /// about that, hence this explicit parameter.
660   void recordVectorLoopValueForInductionCast(
661       const InductionDescriptor &ID, const Instruction *EntryVal,
662       Value *VectorLoopValue, VPValue *CastDef, VPTransformState &State,
663       unsigned Part, unsigned Lane = UINT_MAX);
664 
665   /// Generate a shuffle sequence that will reverse the vector Vec.
666   virtual Value *reverseVector(Value *Vec);
667 
668   /// Returns (and creates if needed) the original loop trip count.
669   Value *getOrCreateTripCount(Loop *NewLoop);
670 
671   /// Returns (and creates if needed) the trip count of the widened loop.
672   Value *getOrCreateVectorTripCount(Loop *NewLoop);
673 
674   /// Returns a bitcasted value to the requested vector type.
675   /// Also handles bitcasts of vector<float> <-> vector<pointer> types.
676   Value *createBitOrPointerCast(Value *V, VectorType *DstVTy,
677                                 const DataLayout &DL);
678 
679   /// Emit a bypass check to see if the vector trip count is zero, including if
680   /// it overflows.
681   void emitMinimumIterationCountCheck(Loop *L, BasicBlock *Bypass);
682 
683   /// Emit a bypass check to see if all of the SCEV assumptions we've
684   /// had to make are correct. Returns the block containing the checks or
685   /// nullptr if no checks have been added.
686   BasicBlock *emitSCEVChecks(Loop *L, BasicBlock *Bypass);
687 
688   /// Emit bypass checks to check any memory assumptions we may have made.
689   /// Returns the block containing the checks or nullptr if no checks have been
690   /// added.
691   BasicBlock *emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass);
692 
693   /// Compute the transformed value of Index at offset StartValue using step
694   /// StepValue.
695   /// For integer induction, returns StartValue + Index * StepValue.
696   /// For pointer induction, returns StartValue[Index * StepValue].
697   /// FIXME: The newly created binary instructions should contain nsw/nuw
698   /// flags, which can be found from the original scalar operations.
699   Value *emitTransformedIndex(IRBuilder<> &B, Value *Index, ScalarEvolution *SE,
700                               const DataLayout &DL,
701                               const InductionDescriptor &ID) const;
702 
703   /// Emit basic blocks (prefixed with \p Prefix) for the iteration check,
704   /// vector loop preheader, middle block and scalar preheader. Also
705   /// allocate a loop object for the new vector loop and return it.
706   Loop *createVectorLoopSkeleton(StringRef Prefix);
707 
708   /// Create new phi nodes for the induction variables to resume iteration count
709   /// in the scalar epilogue, from where the vectorized loop left off (given by
710   /// \p VectorTripCount).
711   /// In cases where the loop skeleton is more complicated (eg. epilogue
712   /// vectorization) and the resume values can come from an additional bypass
713   /// block, the \p AdditionalBypass pair provides information about the bypass
714   /// block and the end value on the edge from bypass to this loop.
715   void createInductionResumeValues(
716       Loop *L, Value *VectorTripCount,
717       std::pair<BasicBlock *, Value *> AdditionalBypass = {nullptr, nullptr});
718 
719   /// Complete the loop skeleton by adding debug MDs, creating appropriate
720   /// conditional branches in the middle block, preparing the builder and
721   /// running the verifier. Take in the vector loop \p L as argument, and return
722   /// the preheader of the completed vector loop.
723   BasicBlock *completeLoopSkeleton(Loop *L, MDNode *OrigLoopID);
724 
725   /// Add additional metadata to \p To that was not present on \p Orig.
726   ///
727   /// Currently this is used to add the noalias annotations based on the
728   /// inserted memchecks.  Use this for instructions that are *cloned* into the
729   /// vector loop.
730   void addNewMetadata(Instruction *To, const Instruction *Orig);
731 
732   /// Collect poison-generating recipes that may generate a poison value that is
733   /// used after vectorization, even when their operands are not poison. Those
734   /// recipes meet the following conditions:
735   ///  * Contribute to the address computation of a recipe generating a widen
736   ///    memory load/store (VPWidenMemoryInstructionRecipe or
737   ///    VPInterleaveRecipe).
738   ///  * Such a widen memory load/store has at least one underlying Instruction
739   ///    that is in a basic block that needs predication and after vectorization
740   ///    the generated instruction won't be predicated.
741   void collectPoisonGeneratingRecipes(VPTransformState &State);
742 
743   /// Allow subclasses to override and print debug traces before/after vplan
744   /// execution, when trace information is requested.
745   virtual void printDebugTracesAtStart(){};
746   virtual void printDebugTracesAtEnd(){};
747 
748   /// The original loop.
749   Loop *OrigLoop;
750 
751   /// A wrapper around ScalarEvolution used to add runtime SCEV checks. Applies
752   /// dynamic knowledge to simplify SCEV expressions and converts them to a
753   /// more usable form.
754   PredicatedScalarEvolution &PSE;
755 
756   /// Loop Info.
757   LoopInfo *LI;
758 
759   /// Dominator Tree.
760   DominatorTree *DT;
761 
762   /// Alias Analysis.
763   AAResults *AA;
764 
765   /// Target Library Info.
766   const TargetLibraryInfo *TLI;
767 
768   /// Target Transform Info.
769   const TargetTransformInfo *TTI;
770 
771   /// Assumption Cache.
772   AssumptionCache *AC;
773 
774   /// Interface to emit optimization remarks.
775   OptimizationRemarkEmitter *ORE;
776 
777   /// LoopVersioning.  It's only set up (non-null) if memchecks were
778   /// used.
779   ///
780   /// This is currently only used to add no-alias metadata based on the
781   /// memchecks.  The actually versioning is performed manually.
782   std::unique_ptr<LoopVersioning> LVer;
783 
784   /// The vectorization SIMD factor to use. Each vector will have this many
785   /// vector elements.
786   ElementCount VF;
787 
788   /// The vectorization unroll factor to use. Each scalar is vectorized to this
789   /// many different vector instructions.
790   unsigned UF;
791 
792   /// The builder that we use
793   IRBuilder<> Builder;
794 
795   // --- Vectorization state ---
796 
797   /// The vector-loop preheader.
798   BasicBlock *LoopVectorPreHeader;
799 
800   /// The scalar-loop preheader.
801   BasicBlock *LoopScalarPreHeader;
802 
803   /// Middle Block between the vector and the scalar.
804   BasicBlock *LoopMiddleBlock;
805 
806   /// The unique ExitBlock of the scalar loop if one exists.  Note that
807   /// there can be multiple exiting edges reaching this block.
808   BasicBlock *LoopExitBlock;
809 
810   /// The vector loop body.
811   BasicBlock *LoopVectorBody;
812 
813   /// The scalar loop body.
814   BasicBlock *LoopScalarBody;
815 
816   /// A list of all bypass blocks. The first block is the entry of the loop.
817   SmallVector<BasicBlock *, 4> LoopBypassBlocks;
818 
819   /// The new Induction variable which was added to the new block.
820   PHINode *Induction = nullptr;
821 
822   /// The induction variable of the old basic block.
823   PHINode *OldInduction = nullptr;
824 
825   /// Store instructions that were predicated.
826   SmallVector<Instruction *, 4> PredicatedInstructions;
827 
828   /// Trip count of the original loop.
829   Value *TripCount = nullptr;
830 
831   /// Trip count of the widened loop (TripCount - TripCount % (VF*UF))
832   Value *VectorTripCount = nullptr;
833 
834   /// The legality analysis.
835   LoopVectorizationLegality *Legal;
836 
837   /// The profitablity analysis.
838   LoopVectorizationCostModel *Cost;
839 
840   // Record whether runtime checks are added.
841   bool AddedSafetyChecks = false;
842 
843   // Holds the end values for each induction variable. We save the end values
844   // so we can later fix-up the external users of the induction variables.
845   DenseMap<PHINode *, Value *> IVEndValues;
846 
847   // Vector of original scalar PHIs whose corresponding widened PHIs need to be
848   // fixed up at the end of vector code generation.
849   SmallVector<PHINode *, 8> OrigPHIsToFix;
850 
851   /// BFI and PSI are used to check for profile guided size optimizations.
852   BlockFrequencyInfo *BFI;
853   ProfileSummaryInfo *PSI;
854 
855   // Whether this loop should be optimized for size based on profile guided size
856   // optimizatios.
857   bool OptForSizeBasedOnProfile;
858 
859   /// Structure to hold information about generated runtime checks, responsible
860   /// for cleaning the checks, if vectorization turns out unprofitable.
861   GeneratedRTChecks &RTChecks;
862 };
863 
864 class InnerLoopUnroller : public InnerLoopVectorizer {
865 public:
866   InnerLoopUnroller(Loop *OrigLoop, PredicatedScalarEvolution &PSE,
867                     LoopInfo *LI, DominatorTree *DT,
868                     const TargetLibraryInfo *TLI,
869                     const TargetTransformInfo *TTI, AssumptionCache *AC,
870                     OptimizationRemarkEmitter *ORE, unsigned UnrollFactor,
871                     LoopVectorizationLegality *LVL,
872                     LoopVectorizationCostModel *CM, BlockFrequencyInfo *BFI,
873                     ProfileSummaryInfo *PSI, GeneratedRTChecks &Check)
874       : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE,
875                             ElementCount::getFixed(1), UnrollFactor, LVL, CM,
876                             BFI, PSI, Check) {}
877 
878 private:
879   Value *getBroadcastInstrs(Value *V) override;
880   Value *getStepVector(
881       Value *Val, Value *StartIdx, Value *Step,
882       Instruction::BinaryOps Opcode = Instruction::BinaryOpsEnd) override;
883   Value *reverseVector(Value *Vec) override;
884 };
885 
886 /// Encapsulate information regarding vectorization of a loop and its epilogue.
887 /// This information is meant to be updated and used across two stages of
888 /// epilogue vectorization.
889 struct EpilogueLoopVectorizationInfo {
890   ElementCount MainLoopVF = ElementCount::getFixed(0);
891   unsigned MainLoopUF = 0;
892   ElementCount EpilogueVF = ElementCount::getFixed(0);
893   unsigned EpilogueUF = 0;
894   BasicBlock *MainLoopIterationCountCheck = nullptr;
895   BasicBlock *EpilogueIterationCountCheck = nullptr;
896   BasicBlock *SCEVSafetyCheck = nullptr;
897   BasicBlock *MemSafetyCheck = nullptr;
898   Value *TripCount = nullptr;
899   Value *VectorTripCount = nullptr;
900 
901   EpilogueLoopVectorizationInfo(ElementCount MVF, unsigned MUF,
902                                 ElementCount EVF, unsigned EUF)
903       : MainLoopVF(MVF), MainLoopUF(MUF), EpilogueVF(EVF), EpilogueUF(EUF) {
904     assert(EUF == 1 &&
905            "A high UF for the epilogue loop is likely not beneficial.");
906   }
907 };
908 
909 /// An extension of the inner loop vectorizer that creates a skeleton for a
910 /// vectorized loop that has its epilogue (residual) also vectorized.
911 /// The idea is to run the vplan on a given loop twice, firstly to setup the
912 /// skeleton and vectorize the main loop, and secondly to complete the skeleton
913 /// from the first step and vectorize the epilogue.  This is achieved by
914 /// deriving two concrete strategy classes from this base class and invoking
915 /// them in succession from the loop vectorizer planner.
916 class InnerLoopAndEpilogueVectorizer : public InnerLoopVectorizer {
917 public:
918   InnerLoopAndEpilogueVectorizer(
919       Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI,
920       DominatorTree *DT, const TargetLibraryInfo *TLI,
921       const TargetTransformInfo *TTI, AssumptionCache *AC,
922       OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI,
923       LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM,
924       BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI,
925       GeneratedRTChecks &Checks)
926       : InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE,
927                             EPI.MainLoopVF, EPI.MainLoopUF, LVL, CM, BFI, PSI,
928                             Checks),
929         EPI(EPI) {}
930 
931   // Override this function to handle the more complex control flow around the
932   // three loops.
933   BasicBlock *createVectorizedLoopSkeleton() final override {
934     return createEpilogueVectorizedLoopSkeleton();
935   }
936 
937   /// The interface for creating a vectorized skeleton using one of two
938   /// different strategies, each corresponding to one execution of the vplan
939   /// as described above.
940   virtual BasicBlock *createEpilogueVectorizedLoopSkeleton() = 0;
941 
942   /// Holds and updates state information required to vectorize the main loop
943   /// and its epilogue in two separate passes. This setup helps us avoid
944   /// regenerating and recomputing runtime safety checks. It also helps us to
945   /// shorten the iteration-count-check path length for the cases where the
946   /// iteration count of the loop is so small that the main vector loop is
947   /// completely skipped.
948   EpilogueLoopVectorizationInfo &EPI;
949 };
950 
951 /// A specialized derived class of inner loop vectorizer that performs
952 /// vectorization of *main* loops in the process of vectorizing loops and their
953 /// epilogues.
954 class EpilogueVectorizerMainLoop : public InnerLoopAndEpilogueVectorizer {
955 public:
956   EpilogueVectorizerMainLoop(
957       Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI,
958       DominatorTree *DT, const TargetLibraryInfo *TLI,
959       const TargetTransformInfo *TTI, AssumptionCache *AC,
960       OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI,
961       LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM,
962       BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI,
963       GeneratedRTChecks &Check)
964       : InnerLoopAndEpilogueVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE,
965                                        EPI, LVL, CM, BFI, PSI, Check) {}
966   /// Implements the interface for creating a vectorized skeleton using the
967   /// *main loop* strategy (ie the first pass of vplan execution).
968   BasicBlock *createEpilogueVectorizedLoopSkeleton() final override;
969 
970 protected:
971   /// Emits an iteration count bypass check once for the main loop (when \p
972   /// ForEpilogue is false) and once for the epilogue loop (when \p
973   /// ForEpilogue is true).
974   BasicBlock *emitMinimumIterationCountCheck(Loop *L, BasicBlock *Bypass,
975                                              bool ForEpilogue);
976   void printDebugTracesAtStart() override;
977   void printDebugTracesAtEnd() override;
978 };
979 
980 // A specialized derived class of inner loop vectorizer that performs
981 // vectorization of *epilogue* loops in the process of vectorizing loops and
982 // their epilogues.
983 class EpilogueVectorizerEpilogueLoop : public InnerLoopAndEpilogueVectorizer {
984 public:
985   EpilogueVectorizerEpilogueLoop(
986       Loop *OrigLoop, PredicatedScalarEvolution &PSE, LoopInfo *LI,
987       DominatorTree *DT, const TargetLibraryInfo *TLI,
988       const TargetTransformInfo *TTI, AssumptionCache *AC,
989       OptimizationRemarkEmitter *ORE, EpilogueLoopVectorizationInfo &EPI,
990       LoopVectorizationLegality *LVL, llvm::LoopVectorizationCostModel *CM,
991       BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI,
992       GeneratedRTChecks &Checks)
993       : InnerLoopAndEpilogueVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, AC, ORE,
994                                        EPI, LVL, CM, BFI, PSI, Checks) {}
995   /// Implements the interface for creating a vectorized skeleton using the
996   /// *epilogue loop* strategy (ie the second pass of vplan execution).
997   BasicBlock *createEpilogueVectorizedLoopSkeleton() final override;
998 
999 protected:
1000   /// Emits an iteration count bypass check after the main vector loop has
1001   /// finished to see if there are any iterations left to execute by either
1002   /// the vector epilogue or the scalar epilogue.
1003   BasicBlock *emitMinimumVectorEpilogueIterCountCheck(Loop *L,
1004                                                       BasicBlock *Bypass,
1005                                                       BasicBlock *Insert);
1006   void printDebugTracesAtStart() override;
1007   void printDebugTracesAtEnd() override;
1008 };
1009 } // end namespace llvm
1010 
1011 /// Look for a meaningful debug location on the instruction or it's
1012 /// operands.
1013 static Instruction *getDebugLocFromInstOrOperands(Instruction *I) {
1014   if (!I)
1015     return I;
1016 
1017   DebugLoc Empty;
1018   if (I->getDebugLoc() != Empty)
1019     return I;
1020 
1021   for (Use &Op : I->operands()) {
1022     if (Instruction *OpInst = dyn_cast<Instruction>(Op))
1023       if (OpInst->getDebugLoc() != Empty)
1024         return OpInst;
1025   }
1026 
1027   return I;
1028 }
1029 
1030 void InnerLoopVectorizer::setDebugLocFromInst(
1031     const Value *V, Optional<IRBuilder<> *> CustomBuilder) {
1032   IRBuilder<> *B = (CustomBuilder == None) ? &Builder : *CustomBuilder;
1033   if (const Instruction *Inst = dyn_cast_or_null<Instruction>(V)) {
1034     const DILocation *DIL = Inst->getDebugLoc();
1035 
1036     // When a FSDiscriminator is enabled, we don't need to add the multiply
1037     // factors to the discriminators.
1038     if (DIL && Inst->getFunction()->isDebugInfoForProfiling() &&
1039         !isa<DbgInfoIntrinsic>(Inst) && !EnableFSDiscriminator) {
1040       // FIXME: For scalable vectors, assume vscale=1.
1041       auto NewDIL =
1042           DIL->cloneByMultiplyingDuplicationFactor(UF * VF.getKnownMinValue());
1043       if (NewDIL)
1044         B->SetCurrentDebugLocation(NewDIL.getValue());
1045       else
1046         LLVM_DEBUG(dbgs()
1047                    << "Failed to create new discriminator: "
1048                    << DIL->getFilename() << " Line: " << DIL->getLine());
1049     } else
1050       B->SetCurrentDebugLocation(DIL);
1051   } else
1052     B->SetCurrentDebugLocation(DebugLoc());
1053 }
1054 
1055 /// Write a \p DebugMsg about vectorization to the debug output stream. If \p I
1056 /// is passed, the message relates to that particular instruction.
1057 #ifndef NDEBUG
1058 static void debugVectorizationMessage(const StringRef Prefix,
1059                                       const StringRef DebugMsg,
1060                                       Instruction *I) {
1061   dbgs() << "LV: " << Prefix << DebugMsg;
1062   if (I != nullptr)
1063     dbgs() << " " << *I;
1064   else
1065     dbgs() << '.';
1066   dbgs() << '\n';
1067 }
1068 #endif
1069 
1070 /// Create an analysis remark that explains why vectorization failed
1071 ///
1072 /// \p PassName is the name of the pass (e.g. can be AlwaysPrint).  \p
1073 /// RemarkName is the identifier for the remark.  If \p I is passed it is an
1074 /// instruction that prevents vectorization.  Otherwise \p TheLoop is used for
1075 /// the location of the remark.  \return the remark object that can be
1076 /// streamed to.
1077 static OptimizationRemarkAnalysis createLVAnalysis(const char *PassName,
1078     StringRef RemarkName, Loop *TheLoop, Instruction *I) {
1079   Value *CodeRegion = TheLoop->getHeader();
1080   DebugLoc DL = TheLoop->getStartLoc();
1081 
1082   if (I) {
1083     CodeRegion = I->getParent();
1084     // If there is no debug location attached to the instruction, revert back to
1085     // using the loop's.
1086     if (I->getDebugLoc())
1087       DL = I->getDebugLoc();
1088   }
1089 
1090   return OptimizationRemarkAnalysis(PassName, RemarkName, DL, CodeRegion);
1091 }
1092 
1093 /// Return a value for Step multiplied by VF.
1094 static Value *createStepForVF(IRBuilder<> &B, Type *Ty, ElementCount VF,
1095                               int64_t Step) {
1096   assert(Ty->isIntegerTy() && "Expected an integer step");
1097   Constant *StepVal = ConstantInt::get(Ty, Step * VF.getKnownMinValue());
1098   return VF.isScalable() ? B.CreateVScale(StepVal) : StepVal;
1099 }
1100 
1101 namespace llvm {
1102 
1103 /// Return the runtime value for VF.
1104 Value *getRuntimeVF(IRBuilder<> &B, Type *Ty, ElementCount VF) {
1105   Constant *EC = ConstantInt::get(Ty, VF.getKnownMinValue());
1106   return VF.isScalable() ? B.CreateVScale(EC) : EC;
1107 }
1108 
1109 static Value *getRuntimeVFAsFloat(IRBuilder<> &B, Type *FTy, ElementCount VF) {
1110   assert(FTy->isFloatingPointTy() && "Expected floating point type!");
1111   Type *IntTy = IntegerType::get(FTy->getContext(), FTy->getScalarSizeInBits());
1112   Value *RuntimeVF = getRuntimeVF(B, IntTy, VF);
1113   return B.CreateUIToFP(RuntimeVF, FTy);
1114 }
1115 
1116 void reportVectorizationFailure(const StringRef DebugMsg,
1117                                 const StringRef OREMsg, const StringRef ORETag,
1118                                 OptimizationRemarkEmitter *ORE, Loop *TheLoop,
1119                                 Instruction *I) {
1120   LLVM_DEBUG(debugVectorizationMessage("Not vectorizing: ", DebugMsg, I));
1121   LoopVectorizeHints Hints(TheLoop, true /* doesn't matter */, *ORE);
1122   ORE->emit(
1123       createLVAnalysis(Hints.vectorizeAnalysisPassName(), ORETag, TheLoop, I)
1124       << "loop not vectorized: " << OREMsg);
1125 }
1126 
1127 void reportVectorizationInfo(const StringRef Msg, const StringRef ORETag,
1128                              OptimizationRemarkEmitter *ORE, Loop *TheLoop,
1129                              Instruction *I) {
1130   LLVM_DEBUG(debugVectorizationMessage("", Msg, I));
1131   LoopVectorizeHints Hints(TheLoop, true /* doesn't matter */, *ORE);
1132   ORE->emit(
1133       createLVAnalysis(Hints.vectorizeAnalysisPassName(), ORETag, TheLoop, I)
1134       << Msg);
1135 }
1136 
1137 } // end namespace llvm
1138 
1139 #ifndef NDEBUG
1140 /// \return string containing a file name and a line # for the given loop.
1141 static std::string getDebugLocString(const Loop *L) {
1142   std::string Result;
1143   if (L) {
1144     raw_string_ostream OS(Result);
1145     if (const DebugLoc LoopDbgLoc = L->getStartLoc())
1146       LoopDbgLoc.print(OS);
1147     else
1148       // Just print the module name.
1149       OS << L->getHeader()->getParent()->getParent()->getModuleIdentifier();
1150     OS.flush();
1151   }
1152   return Result;
1153 }
1154 #endif
1155 
1156 void InnerLoopVectorizer::addNewMetadata(Instruction *To,
1157                                          const Instruction *Orig) {
1158   // If the loop was versioned with memchecks, add the corresponding no-alias
1159   // metadata.
1160   if (LVer && (isa<LoadInst>(Orig) || isa<StoreInst>(Orig)))
1161     LVer->annotateInstWithNoAlias(To, Orig);
1162 }
1163 
1164 void InnerLoopVectorizer::collectPoisonGeneratingRecipes(
1165     VPTransformState &State) {
1166 
1167   // Collect recipes in the backward slice of `Root` that may generate a poison
1168   // value that is used after vectorization.
1169   SmallPtrSet<VPRecipeBase *, 16> Visited;
1170   auto collectPoisonGeneratingInstrsInBackwardSlice([&](VPRecipeBase *Root) {
1171     SmallVector<VPRecipeBase *, 16> Worklist;
1172     Worklist.push_back(Root);
1173 
1174     // Traverse the backward slice of Root through its use-def chain.
1175     while (!Worklist.empty()) {
1176       VPRecipeBase *CurRec = Worklist.back();
1177       Worklist.pop_back();
1178 
1179       if (!Visited.insert(CurRec).second)
1180         continue;
1181 
1182       // Prune search if we find another recipe generating a widen memory
1183       // instruction. Widen memory instructions involved in address computation
1184       // will lead to gather/scatter instructions, which don't need to be
1185       // handled.
1186       if (isa<VPWidenMemoryInstructionRecipe>(CurRec) ||
1187           isa<VPInterleaveRecipe>(CurRec))
1188         continue;
1189 
1190       // This recipe contributes to the address computation of a widen
1191       // load/store. Collect recipe if its underlying instruction has
1192       // poison-generating flags.
1193       Instruction *Instr = CurRec->getUnderlyingInstr();
1194       if (Instr && Instr->hasPoisonGeneratingFlags())
1195         State.MayGeneratePoisonRecipes.insert(CurRec);
1196 
1197       // Add new definitions to the worklist.
1198       for (VPValue *operand : CurRec->operands())
1199         if (VPDef *OpDef = operand->getDef())
1200           Worklist.push_back(cast<VPRecipeBase>(OpDef));
1201     }
1202   });
1203 
1204   // Traverse all the recipes in the VPlan and collect the poison-generating
1205   // recipes in the backward slice starting at the address of a VPWidenRecipe or
1206   // VPInterleaveRecipe.
1207   auto Iter = depth_first(
1208       VPBlockRecursiveTraversalWrapper<VPBlockBase *>(State.Plan->getEntry()));
1209   for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(Iter)) {
1210     for (VPRecipeBase &Recipe : *VPBB) {
1211       if (auto *WidenRec = dyn_cast<VPWidenMemoryInstructionRecipe>(&Recipe)) {
1212         Instruction *UnderlyingInstr = WidenRec->getUnderlyingInstr();
1213         VPDef *AddrDef = WidenRec->getAddr()->getDef();
1214         if (AddrDef && WidenRec->isConsecutive() && UnderlyingInstr &&
1215             Legal->blockNeedsPredication(UnderlyingInstr->getParent()))
1216           collectPoisonGeneratingInstrsInBackwardSlice(
1217               cast<VPRecipeBase>(AddrDef));
1218       } else if (auto *InterleaveRec = dyn_cast<VPInterleaveRecipe>(&Recipe)) {
1219         VPDef *AddrDef = InterleaveRec->getAddr()->getDef();
1220         if (AddrDef) {
1221           // Check if any member of the interleave group needs predication.
1222           const InterleaveGroup<Instruction> *InterGroup =
1223               InterleaveRec->getInterleaveGroup();
1224           bool NeedPredication = false;
1225           for (int I = 0, NumMembers = InterGroup->getNumMembers();
1226                I < NumMembers; ++I) {
1227             Instruction *Member = InterGroup->getMember(I);
1228             if (Member)
1229               NeedPredication |=
1230                   Legal->blockNeedsPredication(Member->getParent());
1231           }
1232 
1233           if (NeedPredication)
1234             collectPoisonGeneratingInstrsInBackwardSlice(
1235                 cast<VPRecipeBase>(AddrDef));
1236         }
1237       }
1238     }
1239   }
1240 }
1241 
1242 void InnerLoopVectorizer::addMetadata(Instruction *To,
1243                                       Instruction *From) {
1244   propagateMetadata(To, From);
1245   addNewMetadata(To, From);
1246 }
1247 
1248 void InnerLoopVectorizer::addMetadata(ArrayRef<Value *> To,
1249                                       Instruction *From) {
1250   for (Value *V : To) {
1251     if (Instruction *I = dyn_cast<Instruction>(V))
1252       addMetadata(I, From);
1253   }
1254 }
1255 
1256 namespace llvm {
1257 
1258 // Loop vectorization cost-model hints how the scalar epilogue loop should be
1259 // lowered.
1260 enum ScalarEpilogueLowering {
1261 
1262   // The default: allowing scalar epilogues.
1263   CM_ScalarEpilogueAllowed,
1264 
1265   // Vectorization with OptForSize: don't allow epilogues.
1266   CM_ScalarEpilogueNotAllowedOptSize,
1267 
1268   // A special case of vectorisation with OptForSize: loops with a very small
1269   // trip count are considered for vectorization under OptForSize, thereby
1270   // making sure the cost of their loop body is dominant, free of runtime
1271   // guards and scalar iteration overheads.
1272   CM_ScalarEpilogueNotAllowedLowTripLoop,
1273 
1274   // Loop hint predicate indicating an epilogue is undesired.
1275   CM_ScalarEpilogueNotNeededUsePredicate,
1276 
1277   // Directive indicating we must either tail fold or not vectorize
1278   CM_ScalarEpilogueNotAllowedUsePredicate
1279 };
1280 
1281 /// ElementCountComparator creates a total ordering for ElementCount
1282 /// for the purposes of using it in a set structure.
1283 struct ElementCountComparator {
1284   bool operator()(const ElementCount &LHS, const ElementCount &RHS) const {
1285     return std::make_tuple(LHS.isScalable(), LHS.getKnownMinValue()) <
1286            std::make_tuple(RHS.isScalable(), RHS.getKnownMinValue());
1287   }
1288 };
1289 using ElementCountSet = SmallSet<ElementCount, 16, ElementCountComparator>;
1290 
1291 /// LoopVectorizationCostModel - estimates the expected speedups due to
1292 /// vectorization.
1293 /// In many cases vectorization is not profitable. This can happen because of
1294 /// a number of reasons. In this class we mainly attempt to predict the
1295 /// expected speedup/slowdowns due to the supported instruction set. We use the
1296 /// TargetTransformInfo to query the different backends for the cost of
1297 /// different operations.
1298 class LoopVectorizationCostModel {
1299 public:
1300   LoopVectorizationCostModel(ScalarEpilogueLowering SEL, Loop *L,
1301                              PredicatedScalarEvolution &PSE, LoopInfo *LI,
1302                              LoopVectorizationLegality *Legal,
1303                              const TargetTransformInfo &TTI,
1304                              const TargetLibraryInfo *TLI, DemandedBits *DB,
1305                              AssumptionCache *AC,
1306                              OptimizationRemarkEmitter *ORE, const Function *F,
1307                              const LoopVectorizeHints *Hints,
1308                              InterleavedAccessInfo &IAI)
1309       : ScalarEpilogueStatus(SEL), TheLoop(L), PSE(PSE), LI(LI), Legal(Legal),
1310         TTI(TTI), TLI(TLI), DB(DB), AC(AC), ORE(ORE), TheFunction(F),
1311         Hints(Hints), InterleaveInfo(IAI) {}
1312 
1313   /// \return An upper bound for the vectorization factors (both fixed and
1314   /// scalable). If the factors are 0, vectorization and interleaving should be
1315   /// avoided up front.
1316   FixedScalableVFPair computeMaxVF(ElementCount UserVF, unsigned UserIC);
1317 
1318   /// \return True if runtime checks are required for vectorization, and false
1319   /// otherwise.
1320   bool runtimeChecksRequired();
1321 
1322   /// \return The most profitable vectorization factor and the cost of that VF.
1323   /// This method checks every VF in \p CandidateVFs. If UserVF is not ZERO
1324   /// then this vectorization factor will be selected if vectorization is
1325   /// possible.
1326   VectorizationFactor
1327   selectVectorizationFactor(const ElementCountSet &CandidateVFs);
1328 
1329   VectorizationFactor
1330   selectEpilogueVectorizationFactor(const ElementCount MaxVF,
1331                                     const LoopVectorizationPlanner &LVP);
1332 
1333   /// Setup cost-based decisions for user vectorization factor.
1334   /// \return true if the UserVF is a feasible VF to be chosen.
1335   bool selectUserVectorizationFactor(ElementCount UserVF) {
1336     collectUniformsAndScalars(UserVF);
1337     collectInstsToScalarize(UserVF);
1338     return expectedCost(UserVF).first.isValid();
1339   }
1340 
1341   /// \return The size (in bits) of the smallest and widest types in the code
1342   /// that needs to be vectorized. We ignore values that remain scalar such as
1343   /// 64 bit loop indices.
1344   std::pair<unsigned, unsigned> getSmallestAndWidestTypes();
1345 
1346   /// \return The desired interleave count.
1347   /// If interleave count has been specified by metadata it will be returned.
1348   /// Otherwise, the interleave count is computed and returned. VF and LoopCost
1349   /// are the selected vectorization factor and the cost of the selected VF.
1350   unsigned selectInterleaveCount(ElementCount VF, unsigned LoopCost);
1351 
1352   /// Memory access instruction may be vectorized in more than one way.
1353   /// Form of instruction after vectorization depends on cost.
1354   /// This function takes cost-based decisions for Load/Store instructions
1355   /// and collects them in a map. This decisions map is used for building
1356   /// the lists of loop-uniform and loop-scalar instructions.
1357   /// The calculated cost is saved with widening decision in order to
1358   /// avoid redundant calculations.
1359   void setCostBasedWideningDecision(ElementCount VF);
1360 
1361   /// A struct that represents some properties of the register usage
1362   /// of a loop.
1363   struct RegisterUsage {
1364     /// Holds the number of loop invariant values that are used in the loop.
1365     /// The key is ClassID of target-provided register class.
1366     SmallMapVector<unsigned, unsigned, 4> LoopInvariantRegs;
1367     /// Holds the maximum number of concurrent live intervals in the loop.
1368     /// The key is ClassID of target-provided register class.
1369     SmallMapVector<unsigned, unsigned, 4> MaxLocalUsers;
1370   };
1371 
1372   /// \return Returns information about the register usages of the loop for the
1373   /// given vectorization factors.
1374   SmallVector<RegisterUsage, 8>
1375   calculateRegisterUsage(ArrayRef<ElementCount> VFs);
1376 
1377   /// Collect values we want to ignore in the cost model.
1378   void collectValuesToIgnore();
1379 
1380   /// Collect all element types in the loop for which widening is needed.
1381   void collectElementTypesForWidening();
1382 
1383   /// Split reductions into those that happen in the loop, and those that happen
1384   /// outside. In loop reductions are collected into InLoopReductionChains.
1385   void collectInLoopReductions();
1386 
1387   /// Returns true if we should use strict in-order reductions for the given
1388   /// RdxDesc. This is true if the -enable-strict-reductions flag is passed,
1389   /// the IsOrdered flag of RdxDesc is set and we do not allow reordering
1390   /// of FP operations.
1391   bool useOrderedReductions(const RecurrenceDescriptor &RdxDesc) {
1392     return !Hints->allowReordering() && RdxDesc.isOrdered();
1393   }
1394 
1395   /// \returns The smallest bitwidth each instruction can be represented with.
1396   /// The vector equivalents of these instructions should be truncated to this
1397   /// type.
1398   const MapVector<Instruction *, uint64_t> &getMinimalBitwidths() const {
1399     return MinBWs;
1400   }
1401 
1402   /// \returns True if it is more profitable to scalarize instruction \p I for
1403   /// vectorization factor \p VF.
1404   bool isProfitableToScalarize(Instruction *I, ElementCount VF) const {
1405     assert(VF.isVector() &&
1406            "Profitable to scalarize relevant only for VF > 1.");
1407 
1408     // Cost model is not run in the VPlan-native path - return conservative
1409     // result until this changes.
1410     if (EnableVPlanNativePath)
1411       return false;
1412 
1413     auto Scalars = InstsToScalarize.find(VF);
1414     assert(Scalars != InstsToScalarize.end() &&
1415            "VF not yet analyzed for scalarization profitability");
1416     return Scalars->second.find(I) != Scalars->second.end();
1417   }
1418 
1419   /// Returns true if \p I is known to be uniform after vectorization.
1420   bool isUniformAfterVectorization(Instruction *I, ElementCount VF) const {
1421     if (VF.isScalar())
1422       return true;
1423 
1424     // Cost model is not run in the VPlan-native path - return conservative
1425     // result until this changes.
1426     if (EnableVPlanNativePath)
1427       return false;
1428 
1429     auto UniformsPerVF = Uniforms.find(VF);
1430     assert(UniformsPerVF != Uniforms.end() &&
1431            "VF not yet analyzed for uniformity");
1432     return UniformsPerVF->second.count(I);
1433   }
1434 
1435   /// Returns true if \p I is known to be scalar after vectorization.
1436   bool isScalarAfterVectorization(Instruction *I, ElementCount VF) const {
1437     if (VF.isScalar())
1438       return true;
1439 
1440     // Cost model is not run in the VPlan-native path - return conservative
1441     // result until this changes.
1442     if (EnableVPlanNativePath)
1443       return false;
1444 
1445     auto ScalarsPerVF = Scalars.find(VF);
1446     assert(ScalarsPerVF != Scalars.end() &&
1447            "Scalar values are not calculated for VF");
1448     return ScalarsPerVF->second.count(I);
1449   }
1450 
1451   /// \returns True if instruction \p I can be truncated to a smaller bitwidth
1452   /// for vectorization factor \p VF.
1453   bool canTruncateToMinimalBitwidth(Instruction *I, ElementCount VF) const {
1454     return VF.isVector() && MinBWs.find(I) != MinBWs.end() &&
1455            !isProfitableToScalarize(I, VF) &&
1456            !isScalarAfterVectorization(I, VF);
1457   }
1458 
1459   /// Decision that was taken during cost calculation for memory instruction.
1460   enum InstWidening {
1461     CM_Unknown,
1462     CM_Widen,         // For consecutive accesses with stride +1.
1463     CM_Widen_Reverse, // For consecutive accesses with stride -1.
1464     CM_Interleave,
1465     CM_GatherScatter,
1466     CM_Scalarize
1467   };
1468 
1469   /// Save vectorization decision \p W and \p Cost taken by the cost model for
1470   /// instruction \p I and vector width \p VF.
1471   void setWideningDecision(Instruction *I, ElementCount VF, InstWidening W,
1472                            InstructionCost Cost) {
1473     assert(VF.isVector() && "Expected VF >=2");
1474     WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost);
1475   }
1476 
1477   /// Save vectorization decision \p W and \p Cost taken by the cost model for
1478   /// interleaving group \p Grp and vector width \p VF.
1479   void setWideningDecision(const InterleaveGroup<Instruction> *Grp,
1480                            ElementCount VF, InstWidening W,
1481                            InstructionCost Cost) {
1482     assert(VF.isVector() && "Expected VF >=2");
1483     /// Broadcast this decicion to all instructions inside the group.
1484     /// But the cost will be assigned to one instruction only.
1485     for (unsigned i = 0; i < Grp->getFactor(); ++i) {
1486       if (auto *I = Grp->getMember(i)) {
1487         if (Grp->getInsertPos() == I)
1488           WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, Cost);
1489         else
1490           WideningDecisions[std::make_pair(I, VF)] = std::make_pair(W, 0);
1491       }
1492     }
1493   }
1494 
1495   /// Return the cost model decision for the given instruction \p I and vector
1496   /// width \p VF. Return CM_Unknown if this instruction did not pass
1497   /// through the cost modeling.
1498   InstWidening getWideningDecision(Instruction *I, ElementCount VF) const {
1499     assert(VF.isVector() && "Expected VF to be a vector VF");
1500     // Cost model is not run in the VPlan-native path - return conservative
1501     // result until this changes.
1502     if (EnableVPlanNativePath)
1503       return CM_GatherScatter;
1504 
1505     std::pair<Instruction *, ElementCount> InstOnVF = std::make_pair(I, VF);
1506     auto Itr = WideningDecisions.find(InstOnVF);
1507     if (Itr == WideningDecisions.end())
1508       return CM_Unknown;
1509     return Itr->second.first;
1510   }
1511 
1512   /// Return the vectorization cost for the given instruction \p I and vector
1513   /// width \p VF.
1514   InstructionCost getWideningCost(Instruction *I, ElementCount VF) {
1515     assert(VF.isVector() && "Expected VF >=2");
1516     std::pair<Instruction *, ElementCount> InstOnVF = std::make_pair(I, VF);
1517     assert(WideningDecisions.find(InstOnVF) != WideningDecisions.end() &&
1518            "The cost is not calculated");
1519     return WideningDecisions[InstOnVF].second;
1520   }
1521 
1522   /// Return True if instruction \p I is an optimizable truncate whose operand
1523   /// is an induction variable. Such a truncate will be removed by adding a new
1524   /// induction variable with the destination type.
1525   bool isOptimizableIVTruncate(Instruction *I, ElementCount VF) {
1526     // If the instruction is not a truncate, return false.
1527     auto *Trunc = dyn_cast<TruncInst>(I);
1528     if (!Trunc)
1529       return false;
1530 
1531     // Get the source and destination types of the truncate.
1532     Type *SrcTy = ToVectorTy(cast<CastInst>(I)->getSrcTy(), VF);
1533     Type *DestTy = ToVectorTy(cast<CastInst>(I)->getDestTy(), VF);
1534 
1535     // If the truncate is free for the given types, return false. Replacing a
1536     // free truncate with an induction variable would add an induction variable
1537     // update instruction to each iteration of the loop. We exclude from this
1538     // check the primary induction variable since it will need an update
1539     // instruction regardless.
1540     Value *Op = Trunc->getOperand(0);
1541     if (Op != Legal->getPrimaryInduction() && TTI.isTruncateFree(SrcTy, DestTy))
1542       return false;
1543 
1544     // If the truncated value is not an induction variable, return false.
1545     return Legal->isInductionPhi(Op);
1546   }
1547 
1548   /// Collects the instructions to scalarize for each predicated instruction in
1549   /// the loop.
1550   void collectInstsToScalarize(ElementCount VF);
1551 
1552   /// Collect Uniform and Scalar values for the given \p VF.
1553   /// The sets depend on CM decision for Load/Store instructions
1554   /// that may be vectorized as interleave, gather-scatter or scalarized.
1555   void collectUniformsAndScalars(ElementCount VF) {
1556     // Do the analysis once.
1557     if (VF.isScalar() || Uniforms.find(VF) != Uniforms.end())
1558       return;
1559     setCostBasedWideningDecision(VF);
1560     collectLoopUniforms(VF);
1561     collectLoopScalars(VF);
1562   }
1563 
1564   /// Returns true if the target machine supports masked store operation
1565   /// for the given \p DataType and kind of access to \p Ptr.
1566   bool isLegalMaskedStore(Type *DataType, Value *Ptr, Align Alignment) const {
1567     return Legal->isConsecutivePtr(DataType, Ptr) &&
1568            TTI.isLegalMaskedStore(DataType, Alignment);
1569   }
1570 
1571   /// Returns true if the target machine supports masked load operation
1572   /// for the given \p DataType and kind of access to \p Ptr.
1573   bool isLegalMaskedLoad(Type *DataType, Value *Ptr, Align Alignment) const {
1574     return Legal->isConsecutivePtr(DataType, Ptr) &&
1575            TTI.isLegalMaskedLoad(DataType, Alignment);
1576   }
1577 
1578   /// Returns true if the target machine can represent \p V as a masked gather
1579   /// or scatter operation.
1580   bool isLegalGatherOrScatter(Value *V) {
1581     bool LI = isa<LoadInst>(V);
1582     bool SI = isa<StoreInst>(V);
1583     if (!LI && !SI)
1584       return false;
1585     auto *Ty = getLoadStoreType(V);
1586     Align Align = getLoadStoreAlignment(V);
1587     return (LI && TTI.isLegalMaskedGather(Ty, Align)) ||
1588            (SI && TTI.isLegalMaskedScatter(Ty, Align));
1589   }
1590 
1591   /// Returns true if the target machine supports all of the reduction
1592   /// variables found for the given VF.
1593   bool canVectorizeReductions(ElementCount VF) const {
1594     return (all_of(Legal->getReductionVars(), [&](auto &Reduction) -> bool {
1595       const RecurrenceDescriptor &RdxDesc = Reduction.second;
1596       return TTI.isLegalToVectorizeReduction(RdxDesc, VF);
1597     }));
1598   }
1599 
1600   /// Returns true if \p I is an instruction that will be scalarized with
1601   /// predication. Such instructions include conditional stores and
1602   /// instructions that may divide by zero.
1603   /// If a non-zero VF has been calculated, we check if I will be scalarized
1604   /// predication for that VF.
1605   bool isScalarWithPredication(Instruction *I) const;
1606 
1607   // Returns true if \p I is an instruction that will be predicated either
1608   // through scalar predication or masked load/store or masked gather/scatter.
1609   // Superset of instructions that return true for isScalarWithPredication.
1610   bool isPredicatedInst(Instruction *I, bool IsKnownUniform = false) {
1611     // When we know the load is uniform and the original scalar loop was not
1612     // predicated we don't need to mark it as a predicated instruction. Any
1613     // vectorised blocks created when tail-folding are something artificial we
1614     // have introduced and we know there is always at least one active lane.
1615     // That's why we call Legal->blockNeedsPredication here because it doesn't
1616     // query tail-folding.
1617     if (IsKnownUniform && isa<LoadInst>(I) &&
1618         !Legal->blockNeedsPredication(I->getParent()))
1619       return false;
1620     if (!blockNeedsPredicationForAnyReason(I->getParent()))
1621       return false;
1622     // Loads and stores that need some form of masked operation are predicated
1623     // instructions.
1624     if (isa<LoadInst>(I) || isa<StoreInst>(I))
1625       return Legal->isMaskRequired(I);
1626     return isScalarWithPredication(I);
1627   }
1628 
1629   /// Returns true if \p I is a memory instruction with consecutive memory
1630   /// access that can be widened.
1631   bool
1632   memoryInstructionCanBeWidened(Instruction *I,
1633                                 ElementCount VF = ElementCount::getFixed(1));
1634 
1635   /// Returns true if \p I is a memory instruction in an interleaved-group
1636   /// of memory accesses that can be vectorized with wide vector loads/stores
1637   /// and shuffles.
1638   bool
1639   interleavedAccessCanBeWidened(Instruction *I,
1640                                 ElementCount VF = ElementCount::getFixed(1));
1641 
1642   /// Check if \p Instr belongs to any interleaved access group.
1643   bool isAccessInterleaved(Instruction *Instr) {
1644     return InterleaveInfo.isInterleaved(Instr);
1645   }
1646 
1647   /// Get the interleaved access group that \p Instr belongs to.
1648   const InterleaveGroup<Instruction> *
1649   getInterleavedAccessGroup(Instruction *Instr) {
1650     return InterleaveInfo.getInterleaveGroup(Instr);
1651   }
1652 
1653   /// Returns true if we're required to use a scalar epilogue for at least
1654   /// the final iteration of the original loop.
1655   bool requiresScalarEpilogue(ElementCount VF) const {
1656     if (!isScalarEpilogueAllowed())
1657       return false;
1658     // If we might exit from anywhere but the latch, must run the exiting
1659     // iteration in scalar form.
1660     if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch())
1661       return true;
1662     return VF.isVector() && InterleaveInfo.requiresScalarEpilogue();
1663   }
1664 
1665   /// Returns true if a scalar epilogue is not allowed due to optsize or a
1666   /// loop hint annotation.
1667   bool isScalarEpilogueAllowed() const {
1668     return ScalarEpilogueStatus == CM_ScalarEpilogueAllowed;
1669   }
1670 
1671   /// Returns true if all loop blocks should be masked to fold tail loop.
1672   bool foldTailByMasking() const { return FoldTailByMasking; }
1673 
1674   /// Returns true if the instructions in this block requires predication
1675   /// for any reason, e.g. because tail folding now requires a predicate
1676   /// or because the block in the original loop was predicated.
1677   bool blockNeedsPredicationForAnyReason(BasicBlock *BB) const {
1678     return foldTailByMasking() || Legal->blockNeedsPredication(BB);
1679   }
1680 
1681   /// A SmallMapVector to store the InLoop reduction op chains, mapping phi
1682   /// nodes to the chain of instructions representing the reductions. Uses a
1683   /// MapVector to ensure deterministic iteration order.
1684   using ReductionChainMap =
1685       SmallMapVector<PHINode *, SmallVector<Instruction *, 4>, 4>;
1686 
1687   /// Return the chain of instructions representing an inloop reduction.
1688   const ReductionChainMap &getInLoopReductionChains() const {
1689     return InLoopReductionChains;
1690   }
1691 
1692   /// Returns true if the Phi is part of an inloop reduction.
1693   bool isInLoopReduction(PHINode *Phi) const {
1694     return InLoopReductionChains.count(Phi);
1695   }
1696 
1697   /// Estimate cost of an intrinsic call instruction CI if it were vectorized
1698   /// with factor VF.  Return the cost of the instruction, including
1699   /// scalarization overhead if it's needed.
1700   InstructionCost getVectorIntrinsicCost(CallInst *CI, ElementCount VF) const;
1701 
1702   /// Estimate cost of a call instruction CI if it were vectorized with factor
1703   /// VF. Return the cost of the instruction, including scalarization overhead
1704   /// if it's needed. The flag NeedToScalarize shows if the call needs to be
1705   /// scalarized -
1706   /// i.e. either vector version isn't available, or is too expensive.
1707   InstructionCost getVectorCallCost(CallInst *CI, ElementCount VF,
1708                                     bool &NeedToScalarize) const;
1709 
1710   /// Returns true if the per-lane cost of VectorizationFactor A is lower than
1711   /// that of B.
1712   bool isMoreProfitable(const VectorizationFactor &A,
1713                         const VectorizationFactor &B) const;
1714 
1715   /// Invalidates decisions already taken by the cost model.
1716   void invalidateCostModelingDecisions() {
1717     WideningDecisions.clear();
1718     Uniforms.clear();
1719     Scalars.clear();
1720   }
1721 
1722 private:
1723   unsigned NumPredStores = 0;
1724 
1725   /// \return An upper bound for the vectorization factors for both
1726   /// fixed and scalable vectorization, where the minimum-known number of
1727   /// elements is a power-of-2 larger than zero. If scalable vectorization is
1728   /// disabled or unsupported, then the scalable part will be equal to
1729   /// ElementCount::getScalable(0).
1730   FixedScalableVFPair computeFeasibleMaxVF(unsigned ConstTripCount,
1731                                            ElementCount UserVF);
1732 
1733   /// \return the maximized element count based on the targets vector
1734   /// registers and the loop trip-count, but limited to a maximum safe VF.
1735   /// This is a helper function of computeFeasibleMaxVF.
1736   /// FIXME: MaxSafeVF is currently passed by reference to avoid some obscure
1737   /// issue that occurred on one of the buildbots which cannot be reproduced
1738   /// without having access to the properietary compiler (see comments on
1739   /// D98509). The issue is currently under investigation and this workaround
1740   /// will be removed as soon as possible.
1741   ElementCount getMaximizedVFForTarget(unsigned ConstTripCount,
1742                                        unsigned SmallestType,
1743                                        unsigned WidestType,
1744                                        const ElementCount &MaxSafeVF);
1745 
1746   /// \return the maximum legal scalable VF, based on the safe max number
1747   /// of elements.
1748   ElementCount getMaxLegalScalableVF(unsigned MaxSafeElements);
1749 
1750   /// The vectorization cost is a combination of the cost itself and a boolean
1751   /// indicating whether any of the contributing operations will actually
1752   /// operate on vector values after type legalization in the backend. If this
1753   /// latter value is false, then all operations will be scalarized (i.e. no
1754   /// vectorization has actually taken place).
1755   using VectorizationCostTy = std::pair<InstructionCost, bool>;
1756 
1757   /// Returns the expected execution cost. The unit of the cost does
1758   /// not matter because we use the 'cost' units to compare different
1759   /// vector widths. The cost that is returned is *not* normalized by
1760   /// the factor width. If \p Invalid is not nullptr, this function
1761   /// will add a pair(Instruction*, ElementCount) to \p Invalid for
1762   /// each instruction that has an Invalid cost for the given VF.
1763   using InstructionVFPair = std::pair<Instruction *, ElementCount>;
1764   VectorizationCostTy
1765   expectedCost(ElementCount VF,
1766                SmallVectorImpl<InstructionVFPair> *Invalid = nullptr);
1767 
1768   /// Returns the execution time cost of an instruction for a given vector
1769   /// width. Vector width of one means scalar.
1770   VectorizationCostTy getInstructionCost(Instruction *I, ElementCount VF);
1771 
1772   /// The cost-computation logic from getInstructionCost which provides
1773   /// the vector type as an output parameter.
1774   InstructionCost getInstructionCost(Instruction *I, ElementCount VF,
1775                                      Type *&VectorTy);
1776 
1777   /// Return the cost of instructions in an inloop reduction pattern, if I is
1778   /// part of that pattern.
1779   Optional<InstructionCost>
1780   getReductionPatternCost(Instruction *I, ElementCount VF, Type *VectorTy,
1781                           TTI::TargetCostKind CostKind);
1782 
1783   /// Calculate vectorization cost of memory instruction \p I.
1784   InstructionCost getMemoryInstructionCost(Instruction *I, ElementCount VF);
1785 
1786   /// The cost computation for scalarized memory instruction.
1787   InstructionCost getMemInstScalarizationCost(Instruction *I, ElementCount VF);
1788 
1789   /// The cost computation for interleaving group of memory instructions.
1790   InstructionCost getInterleaveGroupCost(Instruction *I, ElementCount VF);
1791 
1792   /// The cost computation for Gather/Scatter instruction.
1793   InstructionCost getGatherScatterCost(Instruction *I, ElementCount VF);
1794 
1795   /// The cost computation for widening instruction \p I with consecutive
1796   /// memory access.
1797   InstructionCost getConsecutiveMemOpCost(Instruction *I, ElementCount VF);
1798 
1799   /// The cost calculation for Load/Store instruction \p I with uniform pointer -
1800   /// Load: scalar load + broadcast.
1801   /// Store: scalar store + (loop invariant value stored? 0 : extract of last
1802   /// element)
1803   InstructionCost getUniformMemOpCost(Instruction *I, ElementCount VF);
1804 
1805   /// Estimate the overhead of scalarizing an instruction. This is a
1806   /// convenience wrapper for the type-based getScalarizationOverhead API.
1807   InstructionCost getScalarizationOverhead(Instruction *I,
1808                                            ElementCount VF) const;
1809 
1810   /// Returns whether the instruction is a load or store and will be a emitted
1811   /// as a vector operation.
1812   bool isConsecutiveLoadOrStore(Instruction *I);
1813 
1814   /// Returns true if an artificially high cost for emulated masked memrefs
1815   /// should be used.
1816   bool useEmulatedMaskMemRefHack(Instruction *I);
1817 
1818   /// Map of scalar integer values to the smallest bitwidth they can be legally
1819   /// represented as. The vector equivalents of these values should be truncated
1820   /// to this type.
1821   MapVector<Instruction *, uint64_t> MinBWs;
1822 
1823   /// A type representing the costs for instructions if they were to be
1824   /// scalarized rather than vectorized. The entries are Instruction-Cost
1825   /// pairs.
1826   using ScalarCostsTy = DenseMap<Instruction *, InstructionCost>;
1827 
1828   /// A set containing all BasicBlocks that are known to present after
1829   /// vectorization as a predicated block.
1830   SmallPtrSet<BasicBlock *, 4> PredicatedBBsAfterVectorization;
1831 
1832   /// Records whether it is allowed to have the original scalar loop execute at
1833   /// least once. This may be needed as a fallback loop in case runtime
1834   /// aliasing/dependence checks fail, or to handle the tail/remainder
1835   /// iterations when the trip count is unknown or doesn't divide by the VF,
1836   /// or as a peel-loop to handle gaps in interleave-groups.
1837   /// Under optsize and when the trip count is very small we don't allow any
1838   /// iterations to execute in the scalar loop.
1839   ScalarEpilogueLowering ScalarEpilogueStatus = CM_ScalarEpilogueAllowed;
1840 
1841   /// All blocks of loop are to be masked to fold tail of scalar iterations.
1842   bool FoldTailByMasking = false;
1843 
1844   /// A map holding scalar costs for different vectorization factors. The
1845   /// presence of a cost for an instruction in the mapping indicates that the
1846   /// instruction will be scalarized when vectorizing with the associated
1847   /// vectorization factor. The entries are VF-ScalarCostTy pairs.
1848   DenseMap<ElementCount, ScalarCostsTy> InstsToScalarize;
1849 
1850   /// Holds the instructions known to be uniform after vectorization.
1851   /// The data is collected per VF.
1852   DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> Uniforms;
1853 
1854   /// Holds the instructions known to be scalar after vectorization.
1855   /// The data is collected per VF.
1856   DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> Scalars;
1857 
1858   /// Holds the instructions (address computations) that are forced to be
1859   /// scalarized.
1860   DenseMap<ElementCount, SmallPtrSet<Instruction *, 4>> ForcedScalars;
1861 
1862   /// PHINodes of the reductions that should be expanded in-loop along with
1863   /// their associated chains of reduction operations, in program order from top
1864   /// (PHI) to bottom
1865   ReductionChainMap InLoopReductionChains;
1866 
1867   /// A Map of inloop reduction operations and their immediate chain operand.
1868   /// FIXME: This can be removed once reductions can be costed correctly in
1869   /// vplan. This was added to allow quick lookup to the inloop operations,
1870   /// without having to loop through InLoopReductionChains.
1871   DenseMap<Instruction *, Instruction *> InLoopReductionImmediateChains;
1872 
1873   /// Returns the expected difference in cost from scalarizing the expression
1874   /// feeding a predicated instruction \p PredInst. The instructions to
1875   /// scalarize and their scalar costs are collected in \p ScalarCosts. A
1876   /// non-negative return value implies the expression will be scalarized.
1877   /// Currently, only single-use chains are considered for scalarization.
1878   int computePredInstDiscount(Instruction *PredInst, ScalarCostsTy &ScalarCosts,
1879                               ElementCount VF);
1880 
1881   /// Collect the instructions that are uniform after vectorization. An
1882   /// instruction is uniform if we represent it with a single scalar value in
1883   /// the vectorized loop corresponding to each vector iteration. Examples of
1884   /// uniform instructions include pointer operands of consecutive or
1885   /// interleaved memory accesses. Note that although uniformity implies an
1886   /// instruction will be scalar, the reverse is not true. In general, a
1887   /// scalarized instruction will be represented by VF scalar values in the
1888   /// vectorized loop, each corresponding to an iteration of the original
1889   /// scalar loop.
1890   void collectLoopUniforms(ElementCount VF);
1891 
1892   /// Collect the instructions that are scalar after vectorization. An
1893   /// instruction is scalar if it is known to be uniform or will be scalarized
1894   /// during vectorization. collectLoopScalars should only add non-uniform nodes
1895   /// to the list if they are used by a load/store instruction that is marked as
1896   /// CM_Scalarize. Non-uniform scalarized instructions will be represented by
1897   /// VF values in the vectorized loop, each corresponding to an iteration of
1898   /// the original scalar loop.
1899   void collectLoopScalars(ElementCount VF);
1900 
1901   /// Keeps cost model vectorization decision and cost for instructions.
1902   /// Right now it is used for memory instructions only.
1903   using DecisionList = DenseMap<std::pair<Instruction *, ElementCount>,
1904                                 std::pair<InstWidening, InstructionCost>>;
1905 
1906   DecisionList WideningDecisions;
1907 
1908   /// Returns true if \p V is expected to be vectorized and it needs to be
1909   /// extracted.
1910   bool needsExtract(Value *V, ElementCount VF) const {
1911     Instruction *I = dyn_cast<Instruction>(V);
1912     if (VF.isScalar() || !I || !TheLoop->contains(I) ||
1913         TheLoop->isLoopInvariant(I))
1914       return false;
1915 
1916     // Assume we can vectorize V (and hence we need extraction) if the
1917     // scalars are not computed yet. This can happen, because it is called
1918     // via getScalarizationOverhead from setCostBasedWideningDecision, before
1919     // the scalars are collected. That should be a safe assumption in most
1920     // cases, because we check if the operands have vectorizable types
1921     // beforehand in LoopVectorizationLegality.
1922     return Scalars.find(VF) == Scalars.end() ||
1923            !isScalarAfterVectorization(I, VF);
1924   };
1925 
1926   /// Returns a range containing only operands needing to be extracted.
1927   SmallVector<Value *, 4> filterExtractingOperands(Instruction::op_range Ops,
1928                                                    ElementCount VF) const {
1929     return SmallVector<Value *, 4>(make_filter_range(
1930         Ops, [this, VF](Value *V) { return this->needsExtract(V, VF); }));
1931   }
1932 
1933   /// Determines if we have the infrastructure to vectorize loop \p L and its
1934   /// epilogue, assuming the main loop is vectorized by \p VF.
1935   bool isCandidateForEpilogueVectorization(const Loop &L,
1936                                            const ElementCount VF) const;
1937 
1938   /// Returns true if epilogue vectorization is considered profitable, and
1939   /// false otherwise.
1940   /// \p VF is the vectorization factor chosen for the original loop.
1941   bool isEpilogueVectorizationProfitable(const ElementCount VF) const;
1942 
1943 public:
1944   /// The loop that we evaluate.
1945   Loop *TheLoop;
1946 
1947   /// Predicated scalar evolution analysis.
1948   PredicatedScalarEvolution &PSE;
1949 
1950   /// Loop Info analysis.
1951   LoopInfo *LI;
1952 
1953   /// Vectorization legality.
1954   LoopVectorizationLegality *Legal;
1955 
1956   /// Vector target information.
1957   const TargetTransformInfo &TTI;
1958 
1959   /// Target Library Info.
1960   const TargetLibraryInfo *TLI;
1961 
1962   /// Demanded bits analysis.
1963   DemandedBits *DB;
1964 
1965   /// Assumption cache.
1966   AssumptionCache *AC;
1967 
1968   /// Interface to emit optimization remarks.
1969   OptimizationRemarkEmitter *ORE;
1970 
1971   const Function *TheFunction;
1972 
1973   /// Loop Vectorize Hint.
1974   const LoopVectorizeHints *Hints;
1975 
1976   /// The interleave access information contains groups of interleaved accesses
1977   /// with the same stride and close to each other.
1978   InterleavedAccessInfo &InterleaveInfo;
1979 
1980   /// Values to ignore in the cost model.
1981   SmallPtrSet<const Value *, 16> ValuesToIgnore;
1982 
1983   /// Values to ignore in the cost model when VF > 1.
1984   SmallPtrSet<const Value *, 16> VecValuesToIgnore;
1985 
1986   /// All element types found in the loop.
1987   SmallPtrSet<Type *, 16> ElementTypesInLoop;
1988 
1989   /// Profitable vector factors.
1990   SmallVector<VectorizationFactor, 8> ProfitableVFs;
1991 };
1992 } // end namespace llvm
1993 
1994 /// Helper struct to manage generating runtime checks for vectorization.
1995 ///
1996 /// The runtime checks are created up-front in temporary blocks to allow better
1997 /// estimating the cost and un-linked from the existing IR. After deciding to
1998 /// vectorize, the checks are moved back. If deciding not to vectorize, the
1999 /// temporary blocks are completely removed.
2000 class GeneratedRTChecks {
2001   /// Basic block which contains the generated SCEV checks, if any.
2002   BasicBlock *SCEVCheckBlock = nullptr;
2003 
2004   /// The value representing the result of the generated SCEV checks. If it is
2005   /// nullptr, either no SCEV checks have been generated or they have been used.
2006   Value *SCEVCheckCond = nullptr;
2007 
2008   /// Basic block which contains the generated memory runtime checks, if any.
2009   BasicBlock *MemCheckBlock = nullptr;
2010 
2011   /// The value representing the result of the generated memory runtime checks.
2012   /// If it is nullptr, either no memory runtime checks have been generated or
2013   /// they have been used.
2014   Value *MemRuntimeCheckCond = nullptr;
2015 
2016   DominatorTree *DT;
2017   LoopInfo *LI;
2018 
2019   SCEVExpander SCEVExp;
2020   SCEVExpander MemCheckExp;
2021 
2022 public:
2023   GeneratedRTChecks(ScalarEvolution &SE, DominatorTree *DT, LoopInfo *LI,
2024                     const DataLayout &DL)
2025       : DT(DT), LI(LI), SCEVExp(SE, DL, "scev.check"),
2026         MemCheckExp(SE, DL, "scev.check") {}
2027 
2028   /// Generate runtime checks in SCEVCheckBlock and MemCheckBlock, so we can
2029   /// accurately estimate the cost of the runtime checks. The blocks are
2030   /// un-linked from the IR and is added back during vector code generation. If
2031   /// there is no vector code generation, the check blocks are removed
2032   /// completely.
2033   void Create(Loop *L, const LoopAccessInfo &LAI,
2034               const SCEVUnionPredicate &UnionPred) {
2035 
2036     BasicBlock *LoopHeader = L->getHeader();
2037     BasicBlock *Preheader = L->getLoopPreheader();
2038 
2039     // Use SplitBlock to create blocks for SCEV & memory runtime checks to
2040     // ensure the blocks are properly added to LoopInfo & DominatorTree. Those
2041     // may be used by SCEVExpander. The blocks will be un-linked from their
2042     // predecessors and removed from LI & DT at the end of the function.
2043     if (!UnionPred.isAlwaysTrue()) {
2044       SCEVCheckBlock = SplitBlock(Preheader, Preheader->getTerminator(), DT, LI,
2045                                   nullptr, "vector.scevcheck");
2046 
2047       SCEVCheckCond = SCEVExp.expandCodeForPredicate(
2048           &UnionPred, SCEVCheckBlock->getTerminator());
2049     }
2050 
2051     const auto &RtPtrChecking = *LAI.getRuntimePointerChecking();
2052     if (RtPtrChecking.Need) {
2053       auto *Pred = SCEVCheckBlock ? SCEVCheckBlock : Preheader;
2054       MemCheckBlock = SplitBlock(Pred, Pred->getTerminator(), DT, LI, nullptr,
2055                                  "vector.memcheck");
2056 
2057       MemRuntimeCheckCond =
2058           addRuntimeChecks(MemCheckBlock->getTerminator(), L,
2059                            RtPtrChecking.getChecks(), MemCheckExp);
2060       assert(MemRuntimeCheckCond &&
2061              "no RT checks generated although RtPtrChecking "
2062              "claimed checks are required");
2063     }
2064 
2065     if (!MemCheckBlock && !SCEVCheckBlock)
2066       return;
2067 
2068     // Unhook the temporary block with the checks, update various places
2069     // accordingly.
2070     if (SCEVCheckBlock)
2071       SCEVCheckBlock->replaceAllUsesWith(Preheader);
2072     if (MemCheckBlock)
2073       MemCheckBlock->replaceAllUsesWith(Preheader);
2074 
2075     if (SCEVCheckBlock) {
2076       SCEVCheckBlock->getTerminator()->moveBefore(Preheader->getTerminator());
2077       new UnreachableInst(Preheader->getContext(), SCEVCheckBlock);
2078       Preheader->getTerminator()->eraseFromParent();
2079     }
2080     if (MemCheckBlock) {
2081       MemCheckBlock->getTerminator()->moveBefore(Preheader->getTerminator());
2082       new UnreachableInst(Preheader->getContext(), MemCheckBlock);
2083       Preheader->getTerminator()->eraseFromParent();
2084     }
2085 
2086     DT->changeImmediateDominator(LoopHeader, Preheader);
2087     if (MemCheckBlock) {
2088       DT->eraseNode(MemCheckBlock);
2089       LI->removeBlock(MemCheckBlock);
2090     }
2091     if (SCEVCheckBlock) {
2092       DT->eraseNode(SCEVCheckBlock);
2093       LI->removeBlock(SCEVCheckBlock);
2094     }
2095   }
2096 
2097   /// Remove the created SCEV & memory runtime check blocks & instructions, if
2098   /// unused.
2099   ~GeneratedRTChecks() {
2100     SCEVExpanderCleaner SCEVCleaner(SCEVExp, *DT);
2101     SCEVExpanderCleaner MemCheckCleaner(MemCheckExp, *DT);
2102     if (!SCEVCheckCond)
2103       SCEVCleaner.markResultUsed();
2104 
2105     if (!MemRuntimeCheckCond)
2106       MemCheckCleaner.markResultUsed();
2107 
2108     if (MemRuntimeCheckCond) {
2109       auto &SE = *MemCheckExp.getSE();
2110       // Memory runtime check generation creates compares that use expanded
2111       // values. Remove them before running the SCEVExpanderCleaners.
2112       for (auto &I : make_early_inc_range(reverse(*MemCheckBlock))) {
2113         if (MemCheckExp.isInsertedInstruction(&I))
2114           continue;
2115         SE.forgetValue(&I);
2116         I.eraseFromParent();
2117       }
2118     }
2119     MemCheckCleaner.cleanup();
2120     SCEVCleaner.cleanup();
2121 
2122     if (SCEVCheckCond)
2123       SCEVCheckBlock->eraseFromParent();
2124     if (MemRuntimeCheckCond)
2125       MemCheckBlock->eraseFromParent();
2126   }
2127 
2128   /// Adds the generated SCEVCheckBlock before \p LoopVectorPreHeader and
2129   /// adjusts the branches to branch to the vector preheader or \p Bypass,
2130   /// depending on the generated condition.
2131   BasicBlock *emitSCEVChecks(Loop *L, BasicBlock *Bypass,
2132                              BasicBlock *LoopVectorPreHeader,
2133                              BasicBlock *LoopExitBlock) {
2134     if (!SCEVCheckCond)
2135       return nullptr;
2136     if (auto *C = dyn_cast<ConstantInt>(SCEVCheckCond))
2137       if (C->isZero())
2138         return nullptr;
2139 
2140     auto *Pred = LoopVectorPreHeader->getSinglePredecessor();
2141 
2142     BranchInst::Create(LoopVectorPreHeader, SCEVCheckBlock);
2143     // Create new preheader for vector loop.
2144     if (auto *PL = LI->getLoopFor(LoopVectorPreHeader))
2145       PL->addBasicBlockToLoop(SCEVCheckBlock, *LI);
2146 
2147     SCEVCheckBlock->getTerminator()->eraseFromParent();
2148     SCEVCheckBlock->moveBefore(LoopVectorPreHeader);
2149     Pred->getTerminator()->replaceSuccessorWith(LoopVectorPreHeader,
2150                                                 SCEVCheckBlock);
2151 
2152     DT->addNewBlock(SCEVCheckBlock, Pred);
2153     DT->changeImmediateDominator(LoopVectorPreHeader, SCEVCheckBlock);
2154 
2155     ReplaceInstWithInst(
2156         SCEVCheckBlock->getTerminator(),
2157         BranchInst::Create(Bypass, LoopVectorPreHeader, SCEVCheckCond));
2158     // Mark the check as used, to prevent it from being removed during cleanup.
2159     SCEVCheckCond = nullptr;
2160     return SCEVCheckBlock;
2161   }
2162 
2163   /// Adds the generated MemCheckBlock before \p LoopVectorPreHeader and adjusts
2164   /// the branches to branch to the vector preheader or \p Bypass, depending on
2165   /// the generated condition.
2166   BasicBlock *emitMemRuntimeChecks(Loop *L, BasicBlock *Bypass,
2167                                    BasicBlock *LoopVectorPreHeader) {
2168     // Check if we generated code that checks in runtime if arrays overlap.
2169     if (!MemRuntimeCheckCond)
2170       return nullptr;
2171 
2172     auto *Pred = LoopVectorPreHeader->getSinglePredecessor();
2173     Pred->getTerminator()->replaceSuccessorWith(LoopVectorPreHeader,
2174                                                 MemCheckBlock);
2175 
2176     DT->addNewBlock(MemCheckBlock, Pred);
2177     DT->changeImmediateDominator(LoopVectorPreHeader, MemCheckBlock);
2178     MemCheckBlock->moveBefore(LoopVectorPreHeader);
2179 
2180     if (auto *PL = LI->getLoopFor(LoopVectorPreHeader))
2181       PL->addBasicBlockToLoop(MemCheckBlock, *LI);
2182 
2183     ReplaceInstWithInst(
2184         MemCheckBlock->getTerminator(),
2185         BranchInst::Create(Bypass, LoopVectorPreHeader, MemRuntimeCheckCond));
2186     MemCheckBlock->getTerminator()->setDebugLoc(
2187         Pred->getTerminator()->getDebugLoc());
2188 
2189     // Mark the check as used, to prevent it from being removed during cleanup.
2190     MemRuntimeCheckCond = nullptr;
2191     return MemCheckBlock;
2192   }
2193 };
2194 
2195 // Return true if \p OuterLp is an outer loop annotated with hints for explicit
2196 // vectorization. The loop needs to be annotated with #pragma omp simd
2197 // simdlen(#) or #pragma clang vectorize(enable) vectorize_width(#). If the
2198 // vector length information is not provided, vectorization is not considered
2199 // explicit. Interleave hints are not allowed either. These limitations will be
2200 // relaxed in the future.
2201 // Please, note that we are currently forced to abuse the pragma 'clang
2202 // vectorize' semantics. This pragma provides *auto-vectorization hints*
2203 // (i.e., LV must check that vectorization is legal) whereas pragma 'omp simd'
2204 // provides *explicit vectorization hints* (LV can bypass legal checks and
2205 // assume that vectorization is legal). However, both hints are implemented
2206 // using the same metadata (llvm.loop.vectorize, processed by
2207 // LoopVectorizeHints). This will be fixed in the future when the native IR
2208 // representation for pragma 'omp simd' is introduced.
2209 static bool isExplicitVecOuterLoop(Loop *OuterLp,
2210                                    OptimizationRemarkEmitter *ORE) {
2211   assert(!OuterLp->isInnermost() && "This is not an outer loop");
2212   LoopVectorizeHints Hints(OuterLp, true /*DisableInterleaving*/, *ORE);
2213 
2214   // Only outer loops with an explicit vectorization hint are supported.
2215   // Unannotated outer loops are ignored.
2216   if (Hints.getForce() == LoopVectorizeHints::FK_Undefined)
2217     return false;
2218 
2219   Function *Fn = OuterLp->getHeader()->getParent();
2220   if (!Hints.allowVectorization(Fn, OuterLp,
2221                                 true /*VectorizeOnlyWhenForced*/)) {
2222     LLVM_DEBUG(dbgs() << "LV: Loop hints prevent outer loop vectorization.\n");
2223     return false;
2224   }
2225 
2226   if (Hints.getInterleave() > 1) {
2227     // TODO: Interleave support is future work.
2228     LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Interleave is not supported for "
2229                          "outer loops.\n");
2230     Hints.emitRemarkWithHints();
2231     return false;
2232   }
2233 
2234   return true;
2235 }
2236 
2237 static void collectSupportedLoops(Loop &L, LoopInfo *LI,
2238                                   OptimizationRemarkEmitter *ORE,
2239                                   SmallVectorImpl<Loop *> &V) {
2240   // Collect inner loops and outer loops without irreducible control flow. For
2241   // now, only collect outer loops that have explicit vectorization hints. If we
2242   // are stress testing the VPlan H-CFG construction, we collect the outermost
2243   // loop of every loop nest.
2244   if (L.isInnermost() || VPlanBuildStressTest ||
2245       (EnableVPlanNativePath && isExplicitVecOuterLoop(&L, ORE))) {
2246     LoopBlocksRPO RPOT(&L);
2247     RPOT.perform(LI);
2248     if (!containsIrreducibleCFG<const BasicBlock *>(RPOT, *LI)) {
2249       V.push_back(&L);
2250       // TODO: Collect inner loops inside marked outer loops in case
2251       // vectorization fails for the outer loop. Do not invoke
2252       // 'containsIrreducibleCFG' again for inner loops when the outer loop is
2253       // already known to be reducible. We can use an inherited attribute for
2254       // that.
2255       return;
2256     }
2257   }
2258   for (Loop *InnerL : L)
2259     collectSupportedLoops(*InnerL, LI, ORE, V);
2260 }
2261 
2262 namespace {
2263 
2264 /// The LoopVectorize Pass.
2265 struct LoopVectorize : public FunctionPass {
2266   /// Pass identification, replacement for typeid
2267   static char ID;
2268 
2269   LoopVectorizePass Impl;
2270 
2271   explicit LoopVectorize(bool InterleaveOnlyWhenForced = false,
2272                          bool VectorizeOnlyWhenForced = false)
2273       : FunctionPass(ID),
2274         Impl({InterleaveOnlyWhenForced, VectorizeOnlyWhenForced}) {
2275     initializeLoopVectorizePass(*PassRegistry::getPassRegistry());
2276   }
2277 
2278   bool runOnFunction(Function &F) override {
2279     if (skipFunction(F))
2280       return false;
2281 
2282     auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
2283     auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
2284     auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
2285     auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
2286     auto *BFI = &getAnalysis<BlockFrequencyInfoWrapperPass>().getBFI();
2287     auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>();
2288     auto *TLI = TLIP ? &TLIP->getTLI(F) : nullptr;
2289     auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
2290     auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
2291     auto *LAA = &getAnalysis<LoopAccessLegacyAnalysis>();
2292     auto *DB = &getAnalysis<DemandedBitsWrapperPass>().getDemandedBits();
2293     auto *ORE = &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE();
2294     auto *PSI = &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI();
2295 
2296     std::function<const LoopAccessInfo &(Loop &)> GetLAA =
2297         [&](Loop &L) -> const LoopAccessInfo & { return LAA->getInfo(&L); };
2298 
2299     return Impl.runImpl(F, *SE, *LI, *TTI, *DT, *BFI, TLI, *DB, *AA, *AC,
2300                         GetLAA, *ORE, PSI).MadeAnyChange;
2301   }
2302 
2303   void getAnalysisUsage(AnalysisUsage &AU) const override {
2304     AU.addRequired<AssumptionCacheTracker>();
2305     AU.addRequired<BlockFrequencyInfoWrapperPass>();
2306     AU.addRequired<DominatorTreeWrapperPass>();
2307     AU.addRequired<LoopInfoWrapperPass>();
2308     AU.addRequired<ScalarEvolutionWrapperPass>();
2309     AU.addRequired<TargetTransformInfoWrapperPass>();
2310     AU.addRequired<AAResultsWrapperPass>();
2311     AU.addRequired<LoopAccessLegacyAnalysis>();
2312     AU.addRequired<DemandedBitsWrapperPass>();
2313     AU.addRequired<OptimizationRemarkEmitterWrapperPass>();
2314     AU.addRequired<InjectTLIMappingsLegacy>();
2315 
2316     // We currently do not preserve loopinfo/dominator analyses with outer loop
2317     // vectorization. Until this is addressed, mark these analyses as preserved
2318     // only for non-VPlan-native path.
2319     // TODO: Preserve Loop and Dominator analyses for VPlan-native path.
2320     if (!EnableVPlanNativePath) {
2321       AU.addPreserved<LoopInfoWrapperPass>();
2322       AU.addPreserved<DominatorTreeWrapperPass>();
2323     }
2324 
2325     AU.addPreserved<BasicAAWrapperPass>();
2326     AU.addPreserved<GlobalsAAWrapperPass>();
2327     AU.addRequired<ProfileSummaryInfoWrapperPass>();
2328   }
2329 };
2330 
2331 } // end anonymous namespace
2332 
2333 //===----------------------------------------------------------------------===//
2334 // Implementation of LoopVectorizationLegality, InnerLoopVectorizer and
2335 // LoopVectorizationCostModel and LoopVectorizationPlanner.
2336 //===----------------------------------------------------------------------===//
2337 
2338 Value *InnerLoopVectorizer::getBroadcastInstrs(Value *V) {
2339   // We need to place the broadcast of invariant variables outside the loop,
2340   // but only if it's proven safe to do so. Else, broadcast will be inside
2341   // vector loop body.
2342   Instruction *Instr = dyn_cast<Instruction>(V);
2343   bool SafeToHoist = OrigLoop->isLoopInvariant(V) &&
2344                      (!Instr ||
2345                       DT->dominates(Instr->getParent(), LoopVectorPreHeader));
2346   // Place the code for broadcasting invariant variables in the new preheader.
2347   IRBuilder<>::InsertPointGuard Guard(Builder);
2348   if (SafeToHoist)
2349     Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
2350 
2351   // Broadcast the scalar into all locations in the vector.
2352   Value *Shuf = Builder.CreateVectorSplat(VF, V, "broadcast");
2353 
2354   return Shuf;
2355 }
2356 
2357 void InnerLoopVectorizer::createVectorIntOrFpInductionPHI(
2358     const InductionDescriptor &II, Value *Step, Value *Start,
2359     Instruction *EntryVal, VPValue *Def, VPValue *CastDef,
2360     VPTransformState &State) {
2361   assert((isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) &&
2362          "Expected either an induction phi-node or a truncate of it!");
2363 
2364   // Construct the initial value of the vector IV in the vector loop preheader
2365   auto CurrIP = Builder.saveIP();
2366   Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
2367   if (isa<TruncInst>(EntryVal)) {
2368     assert(Start->getType()->isIntegerTy() &&
2369            "Truncation requires an integer type");
2370     auto *TruncType = cast<IntegerType>(EntryVal->getType());
2371     Step = Builder.CreateTrunc(Step, TruncType);
2372     Start = Builder.CreateCast(Instruction::Trunc, Start, TruncType);
2373   }
2374 
2375   Value *Zero = getSignedIntOrFpConstant(Start->getType(), 0);
2376   Value *SplatStart = Builder.CreateVectorSplat(VF, Start);
2377   Value *SteppedStart =
2378       getStepVector(SplatStart, Zero, Step, II.getInductionOpcode());
2379 
2380   // We create vector phi nodes for both integer and floating-point induction
2381   // variables. Here, we determine the kind of arithmetic we will perform.
2382   Instruction::BinaryOps AddOp;
2383   Instruction::BinaryOps MulOp;
2384   if (Step->getType()->isIntegerTy()) {
2385     AddOp = Instruction::Add;
2386     MulOp = Instruction::Mul;
2387   } else {
2388     AddOp = II.getInductionOpcode();
2389     MulOp = Instruction::FMul;
2390   }
2391 
2392   // Multiply the vectorization factor by the step using integer or
2393   // floating-point arithmetic as appropriate.
2394   Type *StepType = Step->getType();
2395   Value *RuntimeVF;
2396   if (Step->getType()->isFloatingPointTy())
2397     RuntimeVF = getRuntimeVFAsFloat(Builder, StepType, VF);
2398   else
2399     RuntimeVF = getRuntimeVF(Builder, StepType, VF);
2400   Value *Mul = Builder.CreateBinOp(MulOp, Step, RuntimeVF);
2401 
2402   // Create a vector splat to use in the induction update.
2403   //
2404   // FIXME: If the step is non-constant, we create the vector splat with
2405   //        IRBuilder. IRBuilder can constant-fold the multiply, but it doesn't
2406   //        handle a constant vector splat.
2407   Value *SplatVF = isa<Constant>(Mul)
2408                        ? ConstantVector::getSplat(VF, cast<Constant>(Mul))
2409                        : Builder.CreateVectorSplat(VF, Mul);
2410   Builder.restoreIP(CurrIP);
2411 
2412   // We may need to add the step a number of times, depending on the unroll
2413   // factor. The last of those goes into the PHI.
2414   PHINode *VecInd = PHINode::Create(SteppedStart->getType(), 2, "vec.ind",
2415                                     &*LoopVectorBody->getFirstInsertionPt());
2416   VecInd->setDebugLoc(EntryVal->getDebugLoc());
2417   Instruction *LastInduction = VecInd;
2418   for (unsigned Part = 0; Part < UF; ++Part) {
2419     State.set(Def, LastInduction, Part);
2420 
2421     if (isa<TruncInst>(EntryVal))
2422       addMetadata(LastInduction, EntryVal);
2423     recordVectorLoopValueForInductionCast(II, EntryVal, LastInduction, CastDef,
2424                                           State, Part);
2425 
2426     LastInduction = cast<Instruction>(
2427         Builder.CreateBinOp(AddOp, LastInduction, SplatVF, "step.add"));
2428     LastInduction->setDebugLoc(EntryVal->getDebugLoc());
2429   }
2430 
2431   // Move the last step to the end of the latch block. This ensures consistent
2432   // placement of all induction updates.
2433   auto *LoopVectorLatch = LI->getLoopFor(LoopVectorBody)->getLoopLatch();
2434   auto *Br = cast<BranchInst>(LoopVectorLatch->getTerminator());
2435   auto *ICmp = cast<Instruction>(Br->getCondition());
2436   LastInduction->moveBefore(ICmp);
2437   LastInduction->setName("vec.ind.next");
2438 
2439   VecInd->addIncoming(SteppedStart, LoopVectorPreHeader);
2440   VecInd->addIncoming(LastInduction, LoopVectorLatch);
2441 }
2442 
2443 bool InnerLoopVectorizer::shouldScalarizeInstruction(Instruction *I) const {
2444   return Cost->isScalarAfterVectorization(I, VF) ||
2445          Cost->isProfitableToScalarize(I, VF);
2446 }
2447 
2448 bool InnerLoopVectorizer::needsScalarInduction(Instruction *IV) const {
2449   if (shouldScalarizeInstruction(IV))
2450     return true;
2451   auto isScalarInst = [&](User *U) -> bool {
2452     auto *I = cast<Instruction>(U);
2453     return (OrigLoop->contains(I) && shouldScalarizeInstruction(I));
2454   };
2455   return llvm::any_of(IV->users(), isScalarInst);
2456 }
2457 
2458 void InnerLoopVectorizer::recordVectorLoopValueForInductionCast(
2459     const InductionDescriptor &ID, const Instruction *EntryVal,
2460     Value *VectorLoopVal, VPValue *CastDef, VPTransformState &State,
2461     unsigned Part, unsigned Lane) {
2462   assert((isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) &&
2463          "Expected either an induction phi-node or a truncate of it!");
2464 
2465   // This induction variable is not the phi from the original loop but the
2466   // newly-created IV based on the proof that casted Phi is equal to the
2467   // uncasted Phi in the vectorized loop (under a runtime guard possibly). It
2468   // re-uses the same InductionDescriptor that original IV uses but we don't
2469   // have to do any recording in this case - that is done when original IV is
2470   // processed.
2471   if (isa<TruncInst>(EntryVal))
2472     return;
2473 
2474   if (!CastDef) {
2475     assert(ID.getCastInsts().empty() &&
2476            "there are casts for ID, but no CastDef");
2477     return;
2478   }
2479   assert(!ID.getCastInsts().empty() &&
2480          "there is a CastDef, but no casts for ID");
2481   // Only the first Cast instruction in the Casts vector is of interest.
2482   // The rest of the Casts (if exist) have no uses outside the
2483   // induction update chain itself.
2484   if (Lane < UINT_MAX)
2485     State.set(CastDef, VectorLoopVal, VPIteration(Part, Lane));
2486   else
2487     State.set(CastDef, VectorLoopVal, Part);
2488 }
2489 
2490 void InnerLoopVectorizer::widenIntOrFpInduction(PHINode *IV, Value *Start,
2491                                                 TruncInst *Trunc, VPValue *Def,
2492                                                 VPValue *CastDef,
2493                                                 VPTransformState &State) {
2494   assert((IV->getType()->isIntegerTy() || IV != OldInduction) &&
2495          "Primary induction variable must have an integer type");
2496 
2497   auto II = Legal->getInductionVars().find(IV);
2498   assert(II != Legal->getInductionVars().end() && "IV is not an induction");
2499 
2500   auto ID = II->second;
2501   assert(IV->getType() == ID.getStartValue()->getType() && "Types must match");
2502 
2503   // The value from the original loop to which we are mapping the new induction
2504   // variable.
2505   Instruction *EntryVal = Trunc ? cast<Instruction>(Trunc) : IV;
2506 
2507   auto &DL = OrigLoop->getHeader()->getModule()->getDataLayout();
2508 
2509   // Generate code for the induction step. Note that induction steps are
2510   // required to be loop-invariant
2511   auto CreateStepValue = [&](const SCEV *Step) -> Value * {
2512     assert(PSE.getSE()->isLoopInvariant(Step, OrigLoop) &&
2513            "Induction step should be loop invariant");
2514     if (PSE.getSE()->isSCEVable(IV->getType())) {
2515       SCEVExpander Exp(*PSE.getSE(), DL, "induction");
2516       return Exp.expandCodeFor(Step, Step->getType(),
2517                                LoopVectorPreHeader->getTerminator());
2518     }
2519     return cast<SCEVUnknown>(Step)->getValue();
2520   };
2521 
2522   // The scalar value to broadcast. This is derived from the canonical
2523   // induction variable. If a truncation type is given, truncate the canonical
2524   // induction variable and step. Otherwise, derive these values from the
2525   // induction descriptor.
2526   auto CreateScalarIV = [&](Value *&Step) -> Value * {
2527     Value *ScalarIV = Induction;
2528     if (IV != OldInduction) {
2529       ScalarIV = IV->getType()->isIntegerTy()
2530                      ? Builder.CreateSExtOrTrunc(Induction, IV->getType())
2531                      : Builder.CreateCast(Instruction::SIToFP, Induction,
2532                                           IV->getType());
2533       ScalarIV = emitTransformedIndex(Builder, ScalarIV, PSE.getSE(), DL, ID);
2534       ScalarIV->setName("offset.idx");
2535     }
2536     if (Trunc) {
2537       auto *TruncType = cast<IntegerType>(Trunc->getType());
2538       assert(Step->getType()->isIntegerTy() &&
2539              "Truncation requires an integer step");
2540       ScalarIV = Builder.CreateTrunc(ScalarIV, TruncType);
2541       Step = Builder.CreateTrunc(Step, TruncType);
2542     }
2543     return ScalarIV;
2544   };
2545 
2546   // Create the vector values from the scalar IV, in the absence of creating a
2547   // vector IV.
2548   auto CreateSplatIV = [&](Value *ScalarIV, Value *Step) {
2549     Value *Broadcasted = getBroadcastInstrs(ScalarIV);
2550     for (unsigned Part = 0; Part < UF; ++Part) {
2551       assert(!VF.isScalable() && "scalable vectors not yet supported.");
2552       Value *StartIdx;
2553       if (Step->getType()->isFloatingPointTy())
2554         StartIdx = getRuntimeVFAsFloat(Builder, Step->getType(), VF * Part);
2555       else
2556         StartIdx = getRuntimeVF(Builder, Step->getType(), VF * Part);
2557 
2558       Value *EntryPart =
2559           getStepVector(Broadcasted, StartIdx, Step, ID.getInductionOpcode());
2560       State.set(Def, EntryPart, Part);
2561       if (Trunc)
2562         addMetadata(EntryPart, Trunc);
2563       recordVectorLoopValueForInductionCast(ID, EntryVal, EntryPart, CastDef,
2564                                             State, Part);
2565     }
2566   };
2567 
2568   // Fast-math-flags propagate from the original induction instruction.
2569   IRBuilder<>::FastMathFlagGuard FMFG(Builder);
2570   if (ID.getInductionBinOp() && isa<FPMathOperator>(ID.getInductionBinOp()))
2571     Builder.setFastMathFlags(ID.getInductionBinOp()->getFastMathFlags());
2572 
2573   // Now do the actual transformations, and start with creating the step value.
2574   Value *Step = CreateStepValue(ID.getStep());
2575   if (VF.isZero() || VF.isScalar()) {
2576     Value *ScalarIV = CreateScalarIV(Step);
2577     CreateSplatIV(ScalarIV, Step);
2578     return;
2579   }
2580 
2581   // Determine if we want a scalar version of the induction variable. This is
2582   // true if the induction variable itself is not widened, or if it has at
2583   // least one user in the loop that is not widened.
2584   auto NeedsScalarIV = needsScalarInduction(EntryVal);
2585   if (!NeedsScalarIV) {
2586     createVectorIntOrFpInductionPHI(ID, Step, Start, EntryVal, Def, CastDef,
2587                                     State);
2588     return;
2589   }
2590 
2591   // Try to create a new independent vector induction variable. If we can't
2592   // create the phi node, we will splat the scalar induction variable in each
2593   // loop iteration.
2594   if (!shouldScalarizeInstruction(EntryVal)) {
2595     createVectorIntOrFpInductionPHI(ID, Step, Start, EntryVal, Def, CastDef,
2596                                     State);
2597     Value *ScalarIV = CreateScalarIV(Step);
2598     // Create scalar steps that can be used by instructions we will later
2599     // scalarize. Note that the addition of the scalar steps will not increase
2600     // the number of instructions in the loop in the common case prior to
2601     // InstCombine. We will be trading one vector extract for each scalar step.
2602     buildScalarSteps(ScalarIV, Step, EntryVal, ID, Def, CastDef, State);
2603     return;
2604   }
2605 
2606   // All IV users are scalar instructions, so only emit a scalar IV, not a
2607   // vectorised IV. Except when we tail-fold, then the splat IV feeds the
2608   // predicate used by the masked loads/stores.
2609   Value *ScalarIV = CreateScalarIV(Step);
2610   if (!Cost->isScalarEpilogueAllowed())
2611     CreateSplatIV(ScalarIV, Step);
2612   buildScalarSteps(ScalarIV, Step, EntryVal, ID, Def, CastDef, State);
2613 }
2614 
2615 Value *InnerLoopVectorizer::getStepVector(Value *Val, Value *StartIdx,
2616                                           Value *Step,
2617                                           Instruction::BinaryOps BinOp) {
2618   // Create and check the types.
2619   auto *ValVTy = cast<VectorType>(Val->getType());
2620   ElementCount VLen = ValVTy->getElementCount();
2621 
2622   Type *STy = Val->getType()->getScalarType();
2623   assert((STy->isIntegerTy() || STy->isFloatingPointTy()) &&
2624          "Induction Step must be an integer or FP");
2625   assert(Step->getType() == STy && "Step has wrong type");
2626 
2627   SmallVector<Constant *, 8> Indices;
2628 
2629   // Create a vector of consecutive numbers from zero to VF.
2630   VectorType *InitVecValVTy = ValVTy;
2631   Type *InitVecValSTy = STy;
2632   if (STy->isFloatingPointTy()) {
2633     InitVecValSTy =
2634         IntegerType::get(STy->getContext(), STy->getScalarSizeInBits());
2635     InitVecValVTy = VectorType::get(InitVecValSTy, VLen);
2636   }
2637   Value *InitVec = Builder.CreateStepVector(InitVecValVTy);
2638 
2639   // Splat the StartIdx
2640   Value *StartIdxSplat = Builder.CreateVectorSplat(VLen, StartIdx);
2641 
2642   if (STy->isIntegerTy()) {
2643     InitVec = Builder.CreateAdd(InitVec, StartIdxSplat);
2644     Step = Builder.CreateVectorSplat(VLen, Step);
2645     assert(Step->getType() == Val->getType() && "Invalid step vec");
2646     // FIXME: The newly created binary instructions should contain nsw/nuw flags,
2647     // which can be found from the original scalar operations.
2648     Step = Builder.CreateMul(InitVec, Step);
2649     return Builder.CreateAdd(Val, Step, "induction");
2650   }
2651 
2652   // Floating point induction.
2653   assert((BinOp == Instruction::FAdd || BinOp == Instruction::FSub) &&
2654          "Binary Opcode should be specified for FP induction");
2655   InitVec = Builder.CreateUIToFP(InitVec, ValVTy);
2656   InitVec = Builder.CreateFAdd(InitVec, StartIdxSplat);
2657 
2658   Step = Builder.CreateVectorSplat(VLen, Step);
2659   Value *MulOp = Builder.CreateFMul(InitVec, Step);
2660   return Builder.CreateBinOp(BinOp, Val, MulOp, "induction");
2661 }
2662 
2663 void InnerLoopVectorizer::buildScalarSteps(Value *ScalarIV, Value *Step,
2664                                            Instruction *EntryVal,
2665                                            const InductionDescriptor &ID,
2666                                            VPValue *Def, VPValue *CastDef,
2667                                            VPTransformState &State) {
2668   // We shouldn't have to build scalar steps if we aren't vectorizing.
2669   assert(VF.isVector() && "VF should be greater than one");
2670   // Get the value type and ensure it and the step have the same integer type.
2671   Type *ScalarIVTy = ScalarIV->getType()->getScalarType();
2672   assert(ScalarIVTy == Step->getType() &&
2673          "Val and Step should have the same type");
2674 
2675   // We build scalar steps for both integer and floating-point induction
2676   // variables. Here, we determine the kind of arithmetic we will perform.
2677   Instruction::BinaryOps AddOp;
2678   Instruction::BinaryOps MulOp;
2679   if (ScalarIVTy->isIntegerTy()) {
2680     AddOp = Instruction::Add;
2681     MulOp = Instruction::Mul;
2682   } else {
2683     AddOp = ID.getInductionOpcode();
2684     MulOp = Instruction::FMul;
2685   }
2686 
2687   // Determine the number of scalars we need to generate for each unroll
2688   // iteration. If EntryVal is uniform, we only need to generate the first
2689   // lane. Otherwise, we generate all VF values.
2690   bool IsUniform =
2691       Cost->isUniformAfterVectorization(cast<Instruction>(EntryVal), VF);
2692   unsigned Lanes = IsUniform ? 1 : VF.getKnownMinValue();
2693   // Compute the scalar steps and save the results in State.
2694   Type *IntStepTy = IntegerType::get(ScalarIVTy->getContext(),
2695                                      ScalarIVTy->getScalarSizeInBits());
2696   Type *VecIVTy = nullptr;
2697   Value *UnitStepVec = nullptr, *SplatStep = nullptr, *SplatIV = nullptr;
2698   if (!IsUniform && VF.isScalable()) {
2699     VecIVTy = VectorType::get(ScalarIVTy, VF);
2700     UnitStepVec = Builder.CreateStepVector(VectorType::get(IntStepTy, VF));
2701     SplatStep = Builder.CreateVectorSplat(VF, Step);
2702     SplatIV = Builder.CreateVectorSplat(VF, ScalarIV);
2703   }
2704 
2705   for (unsigned Part = 0; Part < UF; ++Part) {
2706     Value *StartIdx0 = createStepForVF(Builder, IntStepTy, VF, Part);
2707 
2708     if (!IsUniform && VF.isScalable()) {
2709       auto *SplatStartIdx = Builder.CreateVectorSplat(VF, StartIdx0);
2710       auto *InitVec = Builder.CreateAdd(SplatStartIdx, UnitStepVec);
2711       if (ScalarIVTy->isFloatingPointTy())
2712         InitVec = Builder.CreateSIToFP(InitVec, VecIVTy);
2713       auto *Mul = Builder.CreateBinOp(MulOp, InitVec, SplatStep);
2714       auto *Add = Builder.CreateBinOp(AddOp, SplatIV, Mul);
2715       State.set(Def, Add, Part);
2716       recordVectorLoopValueForInductionCast(ID, EntryVal, Add, CastDef, State,
2717                                             Part);
2718       // It's useful to record the lane values too for the known minimum number
2719       // of elements so we do those below. This improves the code quality when
2720       // trying to extract the first element, for example.
2721     }
2722 
2723     if (ScalarIVTy->isFloatingPointTy())
2724       StartIdx0 = Builder.CreateSIToFP(StartIdx0, ScalarIVTy);
2725 
2726     for (unsigned Lane = 0; Lane < Lanes; ++Lane) {
2727       Value *StartIdx = Builder.CreateBinOp(
2728           AddOp, StartIdx0, getSignedIntOrFpConstant(ScalarIVTy, Lane));
2729       // The step returned by `createStepForVF` is a runtime-evaluated value
2730       // when VF is scalable. Otherwise, it should be folded into a Constant.
2731       assert((VF.isScalable() || isa<Constant>(StartIdx)) &&
2732              "Expected StartIdx to be folded to a constant when VF is not "
2733              "scalable");
2734       auto *Mul = Builder.CreateBinOp(MulOp, StartIdx, Step);
2735       auto *Add = Builder.CreateBinOp(AddOp, ScalarIV, Mul);
2736       State.set(Def, Add, VPIteration(Part, Lane));
2737       recordVectorLoopValueForInductionCast(ID, EntryVal, Add, CastDef, State,
2738                                             Part, Lane);
2739     }
2740   }
2741 }
2742 
2743 void InnerLoopVectorizer::packScalarIntoVectorValue(VPValue *Def,
2744                                                     const VPIteration &Instance,
2745                                                     VPTransformState &State) {
2746   Value *ScalarInst = State.get(Def, Instance);
2747   Value *VectorValue = State.get(Def, Instance.Part);
2748   VectorValue = Builder.CreateInsertElement(
2749       VectorValue, ScalarInst,
2750       Instance.Lane.getAsRuntimeExpr(State.Builder, VF));
2751   State.set(Def, VectorValue, Instance.Part);
2752 }
2753 
2754 Value *InnerLoopVectorizer::reverseVector(Value *Vec) {
2755   assert(Vec->getType()->isVectorTy() && "Invalid type");
2756   return Builder.CreateVectorReverse(Vec, "reverse");
2757 }
2758 
2759 // Return whether we allow using masked interleave-groups (for dealing with
2760 // strided loads/stores that reside in predicated blocks, or for dealing
2761 // with gaps).
2762 static bool useMaskedInterleavedAccesses(const TargetTransformInfo &TTI) {
2763   // If an override option has been passed in for interleaved accesses, use it.
2764   if (EnableMaskedInterleavedMemAccesses.getNumOccurrences() > 0)
2765     return EnableMaskedInterleavedMemAccesses;
2766 
2767   return TTI.enableMaskedInterleavedAccessVectorization();
2768 }
2769 
2770 // Try to vectorize the interleave group that \p Instr belongs to.
2771 //
2772 // E.g. Translate following interleaved load group (factor = 3):
2773 //   for (i = 0; i < N; i+=3) {
2774 //     R = Pic[i];             // Member of index 0
2775 //     G = Pic[i+1];           // Member of index 1
2776 //     B = Pic[i+2];           // Member of index 2
2777 //     ... // do something to R, G, B
2778 //   }
2779 // To:
2780 //   %wide.vec = load <12 x i32>                       ; Read 4 tuples of R,G,B
2781 //   %R.vec = shuffle %wide.vec, poison, <0, 3, 6, 9>   ; R elements
2782 //   %G.vec = shuffle %wide.vec, poison, <1, 4, 7, 10>  ; G elements
2783 //   %B.vec = shuffle %wide.vec, poison, <2, 5, 8, 11>  ; B elements
2784 //
2785 // Or translate following interleaved store group (factor = 3):
2786 //   for (i = 0; i < N; i+=3) {
2787 //     ... do something to R, G, B
2788 //     Pic[i]   = R;           // Member of index 0
2789 //     Pic[i+1] = G;           // Member of index 1
2790 //     Pic[i+2] = B;           // Member of index 2
2791 //   }
2792 // To:
2793 //   %R_G.vec = shuffle %R.vec, %G.vec, <0, 1, 2, ..., 7>
2794 //   %B_U.vec = shuffle %B.vec, poison, <0, 1, 2, 3, u, u, u, u>
2795 //   %interleaved.vec = shuffle %R_G.vec, %B_U.vec,
2796 //        <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11>    ; Interleave R,G,B elements
2797 //   store <12 x i32> %interleaved.vec              ; Write 4 tuples of R,G,B
2798 void InnerLoopVectorizer::vectorizeInterleaveGroup(
2799     const InterleaveGroup<Instruction> *Group, ArrayRef<VPValue *> VPDefs,
2800     VPTransformState &State, VPValue *Addr, ArrayRef<VPValue *> StoredValues,
2801     VPValue *BlockInMask) {
2802   Instruction *Instr = Group->getInsertPos();
2803   const DataLayout &DL = Instr->getModule()->getDataLayout();
2804 
2805   // Prepare for the vector type of the interleaved load/store.
2806   Type *ScalarTy = getLoadStoreType(Instr);
2807   unsigned InterleaveFactor = Group->getFactor();
2808   assert(!VF.isScalable() && "scalable vectors not yet supported.");
2809   auto *VecTy = VectorType::get(ScalarTy, VF * InterleaveFactor);
2810 
2811   // Prepare for the new pointers.
2812   SmallVector<Value *, 2> AddrParts;
2813   unsigned Index = Group->getIndex(Instr);
2814 
2815   // TODO: extend the masked interleaved-group support to reversed access.
2816   assert((!BlockInMask || !Group->isReverse()) &&
2817          "Reversed masked interleave-group not supported.");
2818 
2819   // If the group is reverse, adjust the index to refer to the last vector lane
2820   // instead of the first. We adjust the index from the first vector lane,
2821   // rather than directly getting the pointer for lane VF - 1, because the
2822   // pointer operand of the interleaved access is supposed to be uniform. For
2823   // uniform instructions, we're only required to generate a value for the
2824   // first vector lane in each unroll iteration.
2825   if (Group->isReverse())
2826     Index += (VF.getKnownMinValue() - 1) * Group->getFactor();
2827 
2828   for (unsigned Part = 0; Part < UF; Part++) {
2829     Value *AddrPart = State.get(Addr, VPIteration(Part, 0));
2830     setDebugLocFromInst(AddrPart);
2831 
2832     // Notice current instruction could be any index. Need to adjust the address
2833     // to the member of index 0.
2834     //
2835     // E.g.  a = A[i+1];     // Member of index 1 (Current instruction)
2836     //       b = A[i];       // Member of index 0
2837     // Current pointer is pointed to A[i+1], adjust it to A[i].
2838     //
2839     // E.g.  A[i+1] = a;     // Member of index 1
2840     //       A[i]   = b;     // Member of index 0
2841     //       A[i+2] = c;     // Member of index 2 (Current instruction)
2842     // Current pointer is pointed to A[i+2], adjust it to A[i].
2843 
2844     bool InBounds = false;
2845     if (auto *gep = dyn_cast<GetElementPtrInst>(AddrPart->stripPointerCasts()))
2846       InBounds = gep->isInBounds();
2847     AddrPart = Builder.CreateGEP(ScalarTy, AddrPart, Builder.getInt32(-Index));
2848     cast<GetElementPtrInst>(AddrPart)->setIsInBounds(InBounds);
2849 
2850     // Cast to the vector pointer type.
2851     unsigned AddressSpace = AddrPart->getType()->getPointerAddressSpace();
2852     Type *PtrTy = VecTy->getPointerTo(AddressSpace);
2853     AddrParts.push_back(Builder.CreateBitCast(AddrPart, PtrTy));
2854   }
2855 
2856   setDebugLocFromInst(Instr);
2857   Value *PoisonVec = PoisonValue::get(VecTy);
2858 
2859   Value *MaskForGaps = nullptr;
2860   if (Group->requiresScalarEpilogue() && !Cost->isScalarEpilogueAllowed()) {
2861     MaskForGaps = createBitMaskForGaps(Builder, VF.getKnownMinValue(), *Group);
2862     assert(MaskForGaps && "Mask for Gaps is required but it is null");
2863   }
2864 
2865   // Vectorize the interleaved load group.
2866   if (isa<LoadInst>(Instr)) {
2867     // For each unroll part, create a wide load for the group.
2868     SmallVector<Value *, 2> NewLoads;
2869     for (unsigned Part = 0; Part < UF; Part++) {
2870       Instruction *NewLoad;
2871       if (BlockInMask || MaskForGaps) {
2872         assert(useMaskedInterleavedAccesses(*TTI) &&
2873                "masked interleaved groups are not allowed.");
2874         Value *GroupMask = MaskForGaps;
2875         if (BlockInMask) {
2876           Value *BlockInMaskPart = State.get(BlockInMask, Part);
2877           Value *ShuffledMask = Builder.CreateShuffleVector(
2878               BlockInMaskPart,
2879               createReplicatedMask(InterleaveFactor, VF.getKnownMinValue()),
2880               "interleaved.mask");
2881           GroupMask = MaskForGaps
2882                           ? Builder.CreateBinOp(Instruction::And, ShuffledMask,
2883                                                 MaskForGaps)
2884                           : ShuffledMask;
2885         }
2886         NewLoad =
2887             Builder.CreateMaskedLoad(VecTy, AddrParts[Part], Group->getAlign(),
2888                                      GroupMask, PoisonVec, "wide.masked.vec");
2889       }
2890       else
2891         NewLoad = Builder.CreateAlignedLoad(VecTy, AddrParts[Part],
2892                                             Group->getAlign(), "wide.vec");
2893       Group->addMetadata(NewLoad);
2894       NewLoads.push_back(NewLoad);
2895     }
2896 
2897     // For each member in the group, shuffle out the appropriate data from the
2898     // wide loads.
2899     unsigned J = 0;
2900     for (unsigned I = 0; I < InterleaveFactor; ++I) {
2901       Instruction *Member = Group->getMember(I);
2902 
2903       // Skip the gaps in the group.
2904       if (!Member)
2905         continue;
2906 
2907       auto StrideMask =
2908           createStrideMask(I, InterleaveFactor, VF.getKnownMinValue());
2909       for (unsigned Part = 0; Part < UF; Part++) {
2910         Value *StridedVec = Builder.CreateShuffleVector(
2911             NewLoads[Part], StrideMask, "strided.vec");
2912 
2913         // If this member has different type, cast the result type.
2914         if (Member->getType() != ScalarTy) {
2915           assert(!VF.isScalable() && "VF is assumed to be non scalable.");
2916           VectorType *OtherVTy = VectorType::get(Member->getType(), VF);
2917           StridedVec = createBitOrPointerCast(StridedVec, OtherVTy, DL);
2918         }
2919 
2920         if (Group->isReverse())
2921           StridedVec = reverseVector(StridedVec);
2922 
2923         State.set(VPDefs[J], StridedVec, Part);
2924       }
2925       ++J;
2926     }
2927     return;
2928   }
2929 
2930   // The sub vector type for current instruction.
2931   auto *SubVT = VectorType::get(ScalarTy, VF);
2932 
2933   // Vectorize the interleaved store group.
2934   MaskForGaps = createBitMaskForGaps(Builder, VF.getKnownMinValue(), *Group);
2935   assert((!MaskForGaps || useMaskedInterleavedAccesses(*TTI)) &&
2936          "masked interleaved groups are not allowed.");
2937   assert((!MaskForGaps || !VF.isScalable()) &&
2938          "masking gaps for scalable vectors is not yet supported.");
2939   for (unsigned Part = 0; Part < UF; Part++) {
2940     // Collect the stored vector from each member.
2941     SmallVector<Value *, 4> StoredVecs;
2942     for (unsigned i = 0; i < InterleaveFactor; i++) {
2943       assert((Group->getMember(i) || MaskForGaps) &&
2944              "Fail to get a member from an interleaved store group");
2945       Instruction *Member = Group->getMember(i);
2946 
2947       // Skip the gaps in the group.
2948       if (!Member) {
2949         Value *Undef = PoisonValue::get(SubVT);
2950         StoredVecs.push_back(Undef);
2951         continue;
2952       }
2953 
2954       Value *StoredVec = State.get(StoredValues[i], Part);
2955 
2956       if (Group->isReverse())
2957         StoredVec = reverseVector(StoredVec);
2958 
2959       // If this member has different type, cast it to a unified type.
2960 
2961       if (StoredVec->getType() != SubVT)
2962         StoredVec = createBitOrPointerCast(StoredVec, SubVT, DL);
2963 
2964       StoredVecs.push_back(StoredVec);
2965     }
2966 
2967     // Concatenate all vectors into a wide vector.
2968     Value *WideVec = concatenateVectors(Builder, StoredVecs);
2969 
2970     // Interleave the elements in the wide vector.
2971     Value *IVec = Builder.CreateShuffleVector(
2972         WideVec, createInterleaveMask(VF.getKnownMinValue(), InterleaveFactor),
2973         "interleaved.vec");
2974 
2975     Instruction *NewStoreInstr;
2976     if (BlockInMask || MaskForGaps) {
2977       Value *GroupMask = MaskForGaps;
2978       if (BlockInMask) {
2979         Value *BlockInMaskPart = State.get(BlockInMask, Part);
2980         Value *ShuffledMask = Builder.CreateShuffleVector(
2981             BlockInMaskPart,
2982             createReplicatedMask(InterleaveFactor, VF.getKnownMinValue()),
2983             "interleaved.mask");
2984         GroupMask = MaskForGaps ? Builder.CreateBinOp(Instruction::And,
2985                                                       ShuffledMask, MaskForGaps)
2986                                 : ShuffledMask;
2987       }
2988       NewStoreInstr = Builder.CreateMaskedStore(IVec, AddrParts[Part],
2989                                                 Group->getAlign(), GroupMask);
2990     } else
2991       NewStoreInstr =
2992           Builder.CreateAlignedStore(IVec, AddrParts[Part], Group->getAlign());
2993 
2994     Group->addMetadata(NewStoreInstr);
2995   }
2996 }
2997 
2998 void InnerLoopVectorizer::scalarizeInstruction(Instruction *Instr,
2999                                                VPReplicateRecipe *RepRecipe,
3000                                                const VPIteration &Instance,
3001                                                bool IfPredicateInstr,
3002                                                VPTransformState &State) {
3003   assert(!Instr->getType()->isAggregateType() && "Can't handle vectors");
3004 
3005   // llvm.experimental.noalias.scope.decl intrinsics must only be duplicated for
3006   // the first lane and part.
3007   if (isa<NoAliasScopeDeclInst>(Instr))
3008     if (!Instance.isFirstIteration())
3009       return;
3010 
3011   setDebugLocFromInst(Instr);
3012 
3013   // Does this instruction return a value ?
3014   bool IsVoidRetTy = Instr->getType()->isVoidTy();
3015 
3016   Instruction *Cloned = Instr->clone();
3017   if (!IsVoidRetTy)
3018     Cloned->setName(Instr->getName() + ".cloned");
3019 
3020   // If the scalarized instruction contributes to the address computation of a
3021   // widen masked load/store which was in a basic block that needed predication
3022   // and is not predicated after vectorization, we can't propagate
3023   // poison-generating flags (nuw/nsw, exact, inbounds, etc.). The scalarized
3024   // instruction could feed a poison value to the base address of the widen
3025   // load/store.
3026   if (State.MayGeneratePoisonRecipes.count(RepRecipe) > 0)
3027     Cloned->dropPoisonGeneratingFlags();
3028 
3029   State.Builder.SetInsertPoint(Builder.GetInsertBlock(),
3030                                Builder.GetInsertPoint());
3031   // Replace the operands of the cloned instructions with their scalar
3032   // equivalents in the new loop.
3033   for (unsigned op = 0, e = RepRecipe->getNumOperands(); op != e; ++op) {
3034     auto *Operand = dyn_cast<Instruction>(Instr->getOperand(op));
3035     auto InputInstance = Instance;
3036     if (!Operand || !OrigLoop->contains(Operand) ||
3037         (Cost->isUniformAfterVectorization(Operand, State.VF)))
3038       InputInstance.Lane = VPLane::getFirstLane();
3039     auto *NewOp = State.get(RepRecipe->getOperand(op), InputInstance);
3040     Cloned->setOperand(op, NewOp);
3041   }
3042   addNewMetadata(Cloned, Instr);
3043 
3044   // Place the cloned scalar in the new loop.
3045   Builder.Insert(Cloned);
3046 
3047   State.set(RepRecipe, Cloned, Instance);
3048 
3049   // If we just cloned a new assumption, add it the assumption cache.
3050   if (auto *II = dyn_cast<AssumeInst>(Cloned))
3051     AC->registerAssumption(II);
3052 
3053   // End if-block.
3054   if (IfPredicateInstr)
3055     PredicatedInstructions.push_back(Cloned);
3056 }
3057 
3058 PHINode *InnerLoopVectorizer::createInductionVariable(Loop *L, Value *Start,
3059                                                       Value *End, Value *Step,
3060                                                       Instruction *DL) {
3061   BasicBlock *Header = L->getHeader();
3062   BasicBlock *Latch = L->getLoopLatch();
3063   // As we're just creating this loop, it's possible no latch exists
3064   // yet. If so, use the header as this will be a single block loop.
3065   if (!Latch)
3066     Latch = Header;
3067 
3068   IRBuilder<> B(&*Header->getFirstInsertionPt());
3069   Instruction *OldInst = getDebugLocFromInstOrOperands(OldInduction);
3070   setDebugLocFromInst(OldInst, &B);
3071   auto *Induction = B.CreatePHI(Start->getType(), 2, "index");
3072 
3073   B.SetInsertPoint(Latch->getTerminator());
3074   setDebugLocFromInst(OldInst, &B);
3075 
3076   // Create i+1 and fill the PHINode.
3077   //
3078   // If the tail is not folded, we know that End - Start >= Step (either
3079   // statically or through the minimum iteration checks). We also know that both
3080   // Start % Step == 0 and End % Step == 0. We exit the vector loop if %IV +
3081   // %Step == %End. Hence we must exit the loop before %IV + %Step unsigned
3082   // overflows and we can mark the induction increment as NUW.
3083   Value *Next = B.CreateAdd(Induction, Step, "index.next",
3084                             /*NUW=*/!Cost->foldTailByMasking(), /*NSW=*/false);
3085   Induction->addIncoming(Start, L->getLoopPreheader());
3086   Induction->addIncoming(Next, Latch);
3087   // Create the compare.
3088   Value *ICmp = B.CreateICmpEQ(Next, End);
3089   B.CreateCondBr(ICmp, L->getUniqueExitBlock(), Header);
3090 
3091   // Now we have two terminators. Remove the old one from the block.
3092   Latch->getTerminator()->eraseFromParent();
3093 
3094   return Induction;
3095 }
3096 
3097 Value *InnerLoopVectorizer::getOrCreateTripCount(Loop *L) {
3098   if (TripCount)
3099     return TripCount;
3100 
3101   assert(L && "Create Trip Count for null loop.");
3102   IRBuilder<> Builder(L->getLoopPreheader()->getTerminator());
3103   // Find the loop boundaries.
3104   ScalarEvolution *SE = PSE.getSE();
3105   const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount();
3106   assert(!isa<SCEVCouldNotCompute>(BackedgeTakenCount) &&
3107          "Invalid loop count");
3108 
3109   Type *IdxTy = Legal->getWidestInductionType();
3110   assert(IdxTy && "No type for induction");
3111 
3112   // The exit count might have the type of i64 while the phi is i32. This can
3113   // happen if we have an induction variable that is sign extended before the
3114   // compare. The only way that we get a backedge taken count is that the
3115   // induction variable was signed and as such will not overflow. In such a case
3116   // truncation is legal.
3117   if (SE->getTypeSizeInBits(BackedgeTakenCount->getType()) >
3118       IdxTy->getPrimitiveSizeInBits())
3119     BackedgeTakenCount = SE->getTruncateOrNoop(BackedgeTakenCount, IdxTy);
3120   BackedgeTakenCount = SE->getNoopOrZeroExtend(BackedgeTakenCount, IdxTy);
3121 
3122   // Get the total trip count from the count by adding 1.
3123   const SCEV *ExitCount = SE->getAddExpr(
3124       BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType()));
3125 
3126   const DataLayout &DL = L->getHeader()->getModule()->getDataLayout();
3127 
3128   // Expand the trip count and place the new instructions in the preheader.
3129   // Notice that the pre-header does not change, only the loop body.
3130   SCEVExpander Exp(*SE, DL, "induction");
3131 
3132   // Count holds the overall loop count (N).
3133   TripCount = Exp.expandCodeFor(ExitCount, ExitCount->getType(),
3134                                 L->getLoopPreheader()->getTerminator());
3135 
3136   if (TripCount->getType()->isPointerTy())
3137     TripCount =
3138         CastInst::CreatePointerCast(TripCount, IdxTy, "exitcount.ptrcnt.to.int",
3139                                     L->getLoopPreheader()->getTerminator());
3140 
3141   return TripCount;
3142 }
3143 
3144 Value *InnerLoopVectorizer::getOrCreateVectorTripCount(Loop *L) {
3145   if (VectorTripCount)
3146     return VectorTripCount;
3147 
3148   Value *TC = getOrCreateTripCount(L);
3149   IRBuilder<> Builder(L->getLoopPreheader()->getTerminator());
3150 
3151   Type *Ty = TC->getType();
3152   // This is where we can make the step a runtime constant.
3153   Value *Step = createStepForVF(Builder, Ty, VF, UF);
3154 
3155   // If the tail is to be folded by masking, round the number of iterations N
3156   // up to a multiple of Step instead of rounding down. This is done by first
3157   // adding Step-1 and then rounding down. Note that it's ok if this addition
3158   // overflows: the vector induction variable will eventually wrap to zero given
3159   // that it starts at zero and its Step is a power of two; the loop will then
3160   // exit, with the last early-exit vector comparison also producing all-true.
3161   if (Cost->foldTailByMasking()) {
3162     assert(isPowerOf2_32(VF.getKnownMinValue() * UF) &&
3163            "VF*UF must be a power of 2 when folding tail by masking");
3164     assert(!VF.isScalable() &&
3165            "Tail folding not yet supported for scalable vectors");
3166     TC = Builder.CreateAdd(
3167         TC, ConstantInt::get(Ty, VF.getKnownMinValue() * UF - 1), "n.rnd.up");
3168   }
3169 
3170   // Now we need to generate the expression for the part of the loop that the
3171   // vectorized body will execute. This is equal to N - (N % Step) if scalar
3172   // iterations are not required for correctness, or N - Step, otherwise. Step
3173   // is equal to the vectorization factor (number of SIMD elements) times the
3174   // unroll factor (number of SIMD instructions).
3175   Value *R = Builder.CreateURem(TC, Step, "n.mod.vf");
3176 
3177   // There are cases where we *must* run at least one iteration in the remainder
3178   // loop.  See the cost model for when this can happen.  If the step evenly
3179   // divides the trip count, we set the remainder to be equal to the step. If
3180   // the step does not evenly divide the trip count, no adjustment is necessary
3181   // since there will already be scalar iterations. Note that the minimum
3182   // iterations check ensures that N >= Step.
3183   if (Cost->requiresScalarEpilogue(VF)) {
3184     auto *IsZero = Builder.CreateICmpEQ(R, ConstantInt::get(R->getType(), 0));
3185     R = Builder.CreateSelect(IsZero, Step, R);
3186   }
3187 
3188   VectorTripCount = Builder.CreateSub(TC, R, "n.vec");
3189 
3190   return VectorTripCount;
3191 }
3192 
3193 Value *InnerLoopVectorizer::createBitOrPointerCast(Value *V, VectorType *DstVTy,
3194                                                    const DataLayout &DL) {
3195   // Verify that V is a vector type with same number of elements as DstVTy.
3196   auto *DstFVTy = cast<FixedVectorType>(DstVTy);
3197   unsigned VF = DstFVTy->getNumElements();
3198   auto *SrcVecTy = cast<FixedVectorType>(V->getType());
3199   assert((VF == SrcVecTy->getNumElements()) && "Vector dimensions do not match");
3200   Type *SrcElemTy = SrcVecTy->getElementType();
3201   Type *DstElemTy = DstFVTy->getElementType();
3202   assert((DL.getTypeSizeInBits(SrcElemTy) == DL.getTypeSizeInBits(DstElemTy)) &&
3203          "Vector elements must have same size");
3204 
3205   // Do a direct cast if element types are castable.
3206   if (CastInst::isBitOrNoopPointerCastable(SrcElemTy, DstElemTy, DL)) {
3207     return Builder.CreateBitOrPointerCast(V, DstFVTy);
3208   }
3209   // V cannot be directly casted to desired vector type.
3210   // May happen when V is a floating point vector but DstVTy is a vector of
3211   // pointers or vice-versa. Handle this using a two-step bitcast using an
3212   // intermediate Integer type for the bitcast i.e. Ptr <-> Int <-> Float.
3213   assert((DstElemTy->isPointerTy() != SrcElemTy->isPointerTy()) &&
3214          "Only one type should be a pointer type");
3215   assert((DstElemTy->isFloatingPointTy() != SrcElemTy->isFloatingPointTy()) &&
3216          "Only one type should be a floating point type");
3217   Type *IntTy =
3218       IntegerType::getIntNTy(V->getContext(), DL.getTypeSizeInBits(SrcElemTy));
3219   auto *VecIntTy = FixedVectorType::get(IntTy, VF);
3220   Value *CastVal = Builder.CreateBitOrPointerCast(V, VecIntTy);
3221   return Builder.CreateBitOrPointerCast(CastVal, DstFVTy);
3222 }
3223 
3224 void InnerLoopVectorizer::emitMinimumIterationCountCheck(Loop *L,
3225                                                          BasicBlock *Bypass) {
3226   Value *Count = getOrCreateTripCount(L);
3227   // Reuse existing vector loop preheader for TC checks.
3228   // Note that new preheader block is generated for vector loop.
3229   BasicBlock *const TCCheckBlock = LoopVectorPreHeader;
3230   IRBuilder<> Builder(TCCheckBlock->getTerminator());
3231 
3232   // Generate code to check if the loop's trip count is less than VF * UF, or
3233   // equal to it in case a scalar epilogue is required; this implies that the
3234   // vector trip count is zero. This check also covers the case where adding one
3235   // to the backedge-taken count overflowed leading to an incorrect trip count
3236   // of zero. In this case we will also jump to the scalar loop.
3237   auto P = Cost->requiresScalarEpilogue(VF) ? ICmpInst::ICMP_ULE
3238                                             : ICmpInst::ICMP_ULT;
3239 
3240   // If tail is to be folded, vector loop takes care of all iterations.
3241   Value *CheckMinIters = Builder.getFalse();
3242   if (!Cost->foldTailByMasking()) {
3243     Value *Step = createStepForVF(Builder, Count->getType(), VF, UF);
3244     CheckMinIters = Builder.CreateICmp(P, Count, Step, "min.iters.check");
3245   }
3246   // Create new preheader for vector loop.
3247   LoopVectorPreHeader =
3248       SplitBlock(TCCheckBlock, TCCheckBlock->getTerminator(), DT, LI, nullptr,
3249                  "vector.ph");
3250 
3251   assert(DT->properlyDominates(DT->getNode(TCCheckBlock),
3252                                DT->getNode(Bypass)->getIDom()) &&
3253          "TC check is expected to dominate Bypass");
3254 
3255   // Update dominator for Bypass & LoopExit (if needed).
3256   DT->changeImmediateDominator(Bypass, TCCheckBlock);
3257   if (!Cost->requiresScalarEpilogue(VF))
3258     // If there is an epilogue which must run, there's no edge from the
3259     // middle block to exit blocks  and thus no need to update the immediate
3260     // dominator of the exit blocks.
3261     DT->changeImmediateDominator(LoopExitBlock, TCCheckBlock);
3262 
3263   ReplaceInstWithInst(
3264       TCCheckBlock->getTerminator(),
3265       BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters));
3266   LoopBypassBlocks.push_back(TCCheckBlock);
3267 }
3268 
3269 BasicBlock *InnerLoopVectorizer::emitSCEVChecks(Loop *L, BasicBlock *Bypass) {
3270 
3271   BasicBlock *const SCEVCheckBlock =
3272       RTChecks.emitSCEVChecks(L, Bypass, LoopVectorPreHeader, LoopExitBlock);
3273   if (!SCEVCheckBlock)
3274     return nullptr;
3275 
3276   assert(!(SCEVCheckBlock->getParent()->hasOptSize() ||
3277            (OptForSizeBasedOnProfile &&
3278             Cost->Hints->getForce() != LoopVectorizeHints::FK_Enabled)) &&
3279          "Cannot SCEV check stride or overflow when optimizing for size");
3280 
3281 
3282   // Update dominator only if this is first RT check.
3283   if (LoopBypassBlocks.empty()) {
3284     DT->changeImmediateDominator(Bypass, SCEVCheckBlock);
3285     if (!Cost->requiresScalarEpilogue(VF))
3286       // If there is an epilogue which must run, there's no edge from the
3287       // middle block to exit blocks  and thus no need to update the immediate
3288       // dominator of the exit blocks.
3289       DT->changeImmediateDominator(LoopExitBlock, SCEVCheckBlock);
3290   }
3291 
3292   LoopBypassBlocks.push_back(SCEVCheckBlock);
3293   AddedSafetyChecks = true;
3294   return SCEVCheckBlock;
3295 }
3296 
3297 BasicBlock *InnerLoopVectorizer::emitMemRuntimeChecks(Loop *L,
3298                                                       BasicBlock *Bypass) {
3299   // VPlan-native path does not do any analysis for runtime checks currently.
3300   if (EnableVPlanNativePath)
3301     return nullptr;
3302 
3303   BasicBlock *const MemCheckBlock =
3304       RTChecks.emitMemRuntimeChecks(L, Bypass, LoopVectorPreHeader);
3305 
3306   // Check if we generated code that checks in runtime if arrays overlap. We put
3307   // the checks into a separate block to make the more common case of few
3308   // elements faster.
3309   if (!MemCheckBlock)
3310     return nullptr;
3311 
3312   if (MemCheckBlock->getParent()->hasOptSize() || OptForSizeBasedOnProfile) {
3313     assert(Cost->Hints->getForce() == LoopVectorizeHints::FK_Enabled &&
3314            "Cannot emit memory checks when optimizing for size, unless forced "
3315            "to vectorize.");
3316     ORE->emit([&]() {
3317       return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationCodeSize",
3318                                         L->getStartLoc(), L->getHeader())
3319              << "Code-size may be reduced by not forcing "
3320                 "vectorization, or by source-code modifications "
3321                 "eliminating the need for runtime checks "
3322                 "(e.g., adding 'restrict').";
3323     });
3324   }
3325 
3326   LoopBypassBlocks.push_back(MemCheckBlock);
3327 
3328   AddedSafetyChecks = true;
3329 
3330   // We currently don't use LoopVersioning for the actual loop cloning but we
3331   // still use it to add the noalias metadata.
3332   LVer = std::make_unique<LoopVersioning>(
3333       *Legal->getLAI(),
3334       Legal->getLAI()->getRuntimePointerChecking()->getChecks(), OrigLoop, LI,
3335       DT, PSE.getSE());
3336   LVer->prepareNoAliasMetadata();
3337   return MemCheckBlock;
3338 }
3339 
3340 Value *InnerLoopVectorizer::emitTransformedIndex(
3341     IRBuilder<> &B, Value *Index, ScalarEvolution *SE, const DataLayout &DL,
3342     const InductionDescriptor &ID) const {
3343 
3344   SCEVExpander Exp(*SE, DL, "induction");
3345   auto Step = ID.getStep();
3346   auto StartValue = ID.getStartValue();
3347   assert(Index->getType()->getScalarType() == Step->getType() &&
3348          "Index scalar type does not match StepValue type");
3349 
3350   // Note: the IR at this point is broken. We cannot use SE to create any new
3351   // SCEV and then expand it, hoping that SCEV's simplification will give us
3352   // a more optimal code. Unfortunately, attempt of doing so on invalid IR may
3353   // lead to various SCEV crashes. So all we can do is to use builder and rely
3354   // on InstCombine for future simplifications. Here we handle some trivial
3355   // cases only.
3356   auto CreateAdd = [&B](Value *X, Value *Y) {
3357     assert(X->getType() == Y->getType() && "Types don't match!");
3358     if (auto *CX = dyn_cast<ConstantInt>(X))
3359       if (CX->isZero())
3360         return Y;
3361     if (auto *CY = dyn_cast<ConstantInt>(Y))
3362       if (CY->isZero())
3363         return X;
3364     return B.CreateAdd(X, Y);
3365   };
3366 
3367   // We allow X to be a vector type, in which case Y will potentially be
3368   // splatted into a vector with the same element count.
3369   auto CreateMul = [&B](Value *X, Value *Y) {
3370     assert(X->getType()->getScalarType() == Y->getType() &&
3371            "Types don't match!");
3372     if (auto *CX = dyn_cast<ConstantInt>(X))
3373       if (CX->isOne())
3374         return Y;
3375     if (auto *CY = dyn_cast<ConstantInt>(Y))
3376       if (CY->isOne())
3377         return X;
3378     VectorType *XVTy = dyn_cast<VectorType>(X->getType());
3379     if (XVTy && !isa<VectorType>(Y->getType()))
3380       Y = B.CreateVectorSplat(XVTy->getElementCount(), Y);
3381     return B.CreateMul(X, Y);
3382   };
3383 
3384   // Get a suitable insert point for SCEV expansion. For blocks in the vector
3385   // loop, choose the end of the vector loop header (=LoopVectorBody), because
3386   // the DomTree is not kept up-to-date for additional blocks generated in the
3387   // vector loop. By using the header as insertion point, we guarantee that the
3388   // expanded instructions dominate all their uses.
3389   auto GetInsertPoint = [this, &B]() {
3390     BasicBlock *InsertBB = B.GetInsertPoint()->getParent();
3391     if (InsertBB != LoopVectorBody &&
3392         LI->getLoopFor(LoopVectorBody) == LI->getLoopFor(InsertBB))
3393       return LoopVectorBody->getTerminator();
3394     return &*B.GetInsertPoint();
3395   };
3396 
3397   switch (ID.getKind()) {
3398   case InductionDescriptor::IK_IntInduction: {
3399     assert(!isa<VectorType>(Index->getType()) &&
3400            "Vector indices not supported for integer inductions yet");
3401     assert(Index->getType() == StartValue->getType() &&
3402            "Index type does not match StartValue type");
3403     if (ID.getConstIntStepValue() && ID.getConstIntStepValue()->isMinusOne())
3404       return B.CreateSub(StartValue, Index);
3405     auto *Offset = CreateMul(
3406         Index, Exp.expandCodeFor(Step, Index->getType(), GetInsertPoint()));
3407     return CreateAdd(StartValue, Offset);
3408   }
3409   case InductionDescriptor::IK_PtrInduction: {
3410     assert(isa<SCEVConstant>(Step) &&
3411            "Expected constant step for pointer induction");
3412     return B.CreateGEP(
3413         ID.getElementType(), StartValue,
3414         CreateMul(Index,
3415                   Exp.expandCodeFor(Step, Index->getType()->getScalarType(),
3416                                     GetInsertPoint())));
3417   }
3418   case InductionDescriptor::IK_FpInduction: {
3419     assert(!isa<VectorType>(Index->getType()) &&
3420            "Vector indices not supported for FP inductions yet");
3421     assert(Step->getType()->isFloatingPointTy() && "Expected FP Step value");
3422     auto InductionBinOp = ID.getInductionBinOp();
3423     assert(InductionBinOp &&
3424            (InductionBinOp->getOpcode() == Instruction::FAdd ||
3425             InductionBinOp->getOpcode() == Instruction::FSub) &&
3426            "Original bin op should be defined for FP induction");
3427 
3428     Value *StepValue = cast<SCEVUnknown>(Step)->getValue();
3429     Value *MulExp = B.CreateFMul(StepValue, Index);
3430     return B.CreateBinOp(InductionBinOp->getOpcode(), StartValue, MulExp,
3431                          "induction");
3432   }
3433   case InductionDescriptor::IK_NoInduction:
3434     return nullptr;
3435   }
3436   llvm_unreachable("invalid enum");
3437 }
3438 
3439 Loop *InnerLoopVectorizer::createVectorLoopSkeleton(StringRef Prefix) {
3440   LoopScalarBody = OrigLoop->getHeader();
3441   LoopVectorPreHeader = OrigLoop->getLoopPreheader();
3442   assert(LoopVectorPreHeader && "Invalid loop structure");
3443   LoopExitBlock = OrigLoop->getUniqueExitBlock(); // may be nullptr
3444   assert((LoopExitBlock || Cost->requiresScalarEpilogue(VF)) &&
3445          "multiple exit loop without required epilogue?");
3446 
3447   LoopMiddleBlock =
3448       SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT,
3449                  LI, nullptr, Twine(Prefix) + "middle.block");
3450   LoopScalarPreHeader =
3451       SplitBlock(LoopMiddleBlock, LoopMiddleBlock->getTerminator(), DT, LI,
3452                  nullptr, Twine(Prefix) + "scalar.ph");
3453 
3454   auto *ScalarLatchTerm = OrigLoop->getLoopLatch()->getTerminator();
3455 
3456   // Set up the middle block terminator.  Two cases:
3457   // 1) If we know that we must execute the scalar epilogue, emit an
3458   //    unconditional branch.
3459   // 2) Otherwise, we must have a single unique exit block (due to how we
3460   //    implement the multiple exit case).  In this case, set up a conditonal
3461   //    branch from the middle block to the loop scalar preheader, and the
3462   //    exit block.  completeLoopSkeleton will update the condition to use an
3463   //    iteration check, if required to decide whether to execute the remainder.
3464   BranchInst *BrInst = Cost->requiresScalarEpilogue(VF) ?
3465     BranchInst::Create(LoopScalarPreHeader) :
3466     BranchInst::Create(LoopExitBlock, LoopScalarPreHeader,
3467                        Builder.getTrue());
3468   BrInst->setDebugLoc(ScalarLatchTerm->getDebugLoc());
3469   ReplaceInstWithInst(LoopMiddleBlock->getTerminator(), BrInst);
3470 
3471   // We intentionally don't let SplitBlock to update LoopInfo since
3472   // LoopVectorBody should belong to another loop than LoopVectorPreHeader.
3473   // LoopVectorBody is explicitly added to the correct place few lines later.
3474   LoopVectorBody =
3475       SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT,
3476                  nullptr, nullptr, Twine(Prefix) + "vector.body");
3477 
3478   // Update dominator for loop exit.
3479   if (!Cost->requiresScalarEpilogue(VF))
3480     // If there is an epilogue which must run, there's no edge from the
3481     // middle block to exit blocks  and thus no need to update the immediate
3482     // dominator of the exit blocks.
3483     DT->changeImmediateDominator(LoopExitBlock, LoopMiddleBlock);
3484 
3485   // Create and register the new vector loop.
3486   Loop *Lp = LI->AllocateLoop();
3487   Loop *ParentLoop = OrigLoop->getParentLoop();
3488 
3489   // Insert the new loop into the loop nest and register the new basic blocks
3490   // before calling any utilities such as SCEV that require valid LoopInfo.
3491   if (ParentLoop) {
3492     ParentLoop->addChildLoop(Lp);
3493   } else {
3494     LI->addTopLevelLoop(Lp);
3495   }
3496   Lp->addBasicBlockToLoop(LoopVectorBody, *LI);
3497   return Lp;
3498 }
3499 
3500 void InnerLoopVectorizer::createInductionResumeValues(
3501     Loop *L, Value *VectorTripCount,
3502     std::pair<BasicBlock *, Value *> AdditionalBypass) {
3503   assert(VectorTripCount && L && "Expected valid arguments");
3504   assert(((AdditionalBypass.first && AdditionalBypass.second) ||
3505           (!AdditionalBypass.first && !AdditionalBypass.second)) &&
3506          "Inconsistent information about additional bypass.");
3507   // We are going to resume the execution of the scalar loop.
3508   // Go over all of the induction variables that we found and fix the
3509   // PHIs that are left in the scalar version of the loop.
3510   // The starting values of PHI nodes depend on the counter of the last
3511   // iteration in the vectorized loop.
3512   // If we come from a bypass edge then we need to start from the original
3513   // start value.
3514   for (auto &InductionEntry : Legal->getInductionVars()) {
3515     PHINode *OrigPhi = InductionEntry.first;
3516     InductionDescriptor II = InductionEntry.second;
3517 
3518     // Create phi nodes to merge from the  backedge-taken check block.
3519     PHINode *BCResumeVal =
3520         PHINode::Create(OrigPhi->getType(), 3, "bc.resume.val",
3521                         LoopScalarPreHeader->getTerminator());
3522     // Copy original phi DL over to the new one.
3523     BCResumeVal->setDebugLoc(OrigPhi->getDebugLoc());
3524     Value *&EndValue = IVEndValues[OrigPhi];
3525     Value *EndValueFromAdditionalBypass = AdditionalBypass.second;
3526     if (OrigPhi == OldInduction) {
3527       // We know what the end value is.
3528       EndValue = VectorTripCount;
3529     } else {
3530       IRBuilder<> B(L->getLoopPreheader()->getTerminator());
3531 
3532       // Fast-math-flags propagate from the original induction instruction.
3533       if (II.getInductionBinOp() && isa<FPMathOperator>(II.getInductionBinOp()))
3534         B.setFastMathFlags(II.getInductionBinOp()->getFastMathFlags());
3535 
3536       Type *StepType = II.getStep()->getType();
3537       Instruction::CastOps CastOp =
3538           CastInst::getCastOpcode(VectorTripCount, true, StepType, true);
3539       Value *CRD = B.CreateCast(CastOp, VectorTripCount, StepType, "cast.crd");
3540       const DataLayout &DL = LoopScalarBody->getModule()->getDataLayout();
3541       EndValue = emitTransformedIndex(B, CRD, PSE.getSE(), DL, II);
3542       EndValue->setName("ind.end");
3543 
3544       // Compute the end value for the additional bypass (if applicable).
3545       if (AdditionalBypass.first) {
3546         B.SetInsertPoint(&(*AdditionalBypass.first->getFirstInsertionPt()));
3547         CastOp = CastInst::getCastOpcode(AdditionalBypass.second, true,
3548                                          StepType, true);
3549         CRD =
3550             B.CreateCast(CastOp, AdditionalBypass.second, StepType, "cast.crd");
3551         EndValueFromAdditionalBypass =
3552             emitTransformedIndex(B, CRD, PSE.getSE(), DL, II);
3553         EndValueFromAdditionalBypass->setName("ind.end");
3554       }
3555     }
3556     // The new PHI merges the original incoming value, in case of a bypass,
3557     // or the value at the end of the vectorized loop.
3558     BCResumeVal->addIncoming(EndValue, LoopMiddleBlock);
3559 
3560     // Fix the scalar body counter (PHI node).
3561     // The old induction's phi node in the scalar body needs the truncated
3562     // value.
3563     for (BasicBlock *BB : LoopBypassBlocks)
3564       BCResumeVal->addIncoming(II.getStartValue(), BB);
3565 
3566     if (AdditionalBypass.first)
3567       BCResumeVal->setIncomingValueForBlock(AdditionalBypass.first,
3568                                             EndValueFromAdditionalBypass);
3569 
3570     OrigPhi->setIncomingValueForBlock(LoopScalarPreHeader, BCResumeVal);
3571   }
3572 }
3573 
3574 BasicBlock *InnerLoopVectorizer::completeLoopSkeleton(Loop *L,
3575                                                       MDNode *OrigLoopID) {
3576   assert(L && "Expected valid loop.");
3577 
3578   // The trip counts should be cached by now.
3579   Value *Count = getOrCreateTripCount(L);
3580   Value *VectorTripCount = getOrCreateVectorTripCount(L);
3581 
3582   auto *ScalarLatchTerm = OrigLoop->getLoopLatch()->getTerminator();
3583 
3584   // Add a check in the middle block to see if we have completed
3585   // all of the iterations in the first vector loop.  Three cases:
3586   // 1) If we require a scalar epilogue, there is no conditional branch as
3587   //    we unconditionally branch to the scalar preheader.  Do nothing.
3588   // 2) If (N - N%VF) == N, then we *don't* need to run the remainder.
3589   //    Thus if tail is to be folded, we know we don't need to run the
3590   //    remainder and we can use the previous value for the condition (true).
3591   // 3) Otherwise, construct a runtime check.
3592   if (!Cost->requiresScalarEpilogue(VF) && !Cost->foldTailByMasking()) {
3593     Instruction *CmpN = CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_EQ,
3594                                         Count, VectorTripCount, "cmp.n",
3595                                         LoopMiddleBlock->getTerminator());
3596 
3597     // Here we use the same DebugLoc as the scalar loop latch terminator instead
3598     // of the corresponding compare because they may have ended up with
3599     // different line numbers and we want to avoid awkward line stepping while
3600     // debugging. Eg. if the compare has got a line number inside the loop.
3601     CmpN->setDebugLoc(ScalarLatchTerm->getDebugLoc());
3602     cast<BranchInst>(LoopMiddleBlock->getTerminator())->setCondition(CmpN);
3603   }
3604 
3605   // Get ready to start creating new instructions into the vectorized body.
3606   assert(LoopVectorPreHeader == L->getLoopPreheader() &&
3607          "Inconsistent vector loop preheader");
3608   Builder.SetInsertPoint(&*LoopVectorBody->getFirstInsertionPt());
3609 
3610   Optional<MDNode *> VectorizedLoopID =
3611       makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll,
3612                                       LLVMLoopVectorizeFollowupVectorized});
3613   if (VectorizedLoopID.hasValue()) {
3614     L->setLoopID(VectorizedLoopID.getValue());
3615 
3616     // Do not setAlreadyVectorized if loop attributes have been defined
3617     // explicitly.
3618     return LoopVectorPreHeader;
3619   }
3620 
3621   // Keep all loop hints from the original loop on the vector loop (we'll
3622   // replace the vectorizer-specific hints below).
3623   if (MDNode *LID = OrigLoop->getLoopID())
3624     L->setLoopID(LID);
3625 
3626   LoopVectorizeHints Hints(L, true, *ORE);
3627   Hints.setAlreadyVectorized();
3628 
3629 #ifdef EXPENSIVE_CHECKS
3630   assert(DT->verify(DominatorTree::VerificationLevel::Fast));
3631   LI->verify(*DT);
3632 #endif
3633 
3634   return LoopVectorPreHeader;
3635 }
3636 
3637 BasicBlock *InnerLoopVectorizer::createVectorizedLoopSkeleton() {
3638   /*
3639    In this function we generate a new loop. The new loop will contain
3640    the vectorized instructions while the old loop will continue to run the
3641    scalar remainder.
3642 
3643        [ ] <-- loop iteration number check.
3644     /   |
3645    /    v
3646   |    [ ] <-- vector loop bypass (may consist of multiple blocks).
3647   |  /  |
3648   | /   v
3649   ||   [ ]     <-- vector pre header.
3650   |/    |
3651   |     v
3652   |    [  ] \
3653   |    [  ]_|   <-- vector loop.
3654   |     |
3655   |     v
3656   \   -[ ]   <--- middle-block.
3657    \/   |
3658    /\   v
3659    | ->[ ]     <--- new preheader.
3660    |    |
3661  (opt)  v      <-- edge from middle to exit iff epilogue is not required.
3662    |   [ ] \
3663    |   [ ]_|   <-- old scalar loop to handle remainder (scalar epilogue).
3664     \   |
3665      \  v
3666       >[ ]     <-- exit block(s).
3667    ...
3668    */
3669 
3670   // Get the metadata of the original loop before it gets modified.
3671   MDNode *OrigLoopID = OrigLoop->getLoopID();
3672 
3673   // Workaround!  Compute the trip count of the original loop and cache it
3674   // before we start modifying the CFG.  This code has a systemic problem
3675   // wherein it tries to run analysis over partially constructed IR; this is
3676   // wrong, and not simply for SCEV.  The trip count of the original loop
3677   // simply happens to be prone to hitting this in practice.  In theory, we
3678   // can hit the same issue for any SCEV, or ValueTracking query done during
3679   // mutation.  See PR49900.
3680   getOrCreateTripCount(OrigLoop);
3681 
3682   // Create an empty vector loop, and prepare basic blocks for the runtime
3683   // checks.
3684   Loop *Lp = createVectorLoopSkeleton("");
3685 
3686   // Now, compare the new count to zero. If it is zero skip the vector loop and
3687   // jump to the scalar loop. This check also covers the case where the
3688   // backedge-taken count is uint##_max: adding one to it will overflow leading
3689   // to an incorrect trip count of zero. In this (rare) case we will also jump
3690   // to the scalar loop.
3691   emitMinimumIterationCountCheck(Lp, LoopScalarPreHeader);
3692 
3693   // Generate the code to check any assumptions that we've made for SCEV
3694   // expressions.
3695   emitSCEVChecks(Lp, LoopScalarPreHeader);
3696 
3697   // Generate the code that checks in runtime if arrays overlap. We put the
3698   // checks into a separate block to make the more common case of few elements
3699   // faster.
3700   emitMemRuntimeChecks(Lp, LoopScalarPreHeader);
3701 
3702   // Some loops have a single integer induction variable, while other loops
3703   // don't. One example is c++ iterators that often have multiple pointer
3704   // induction variables. In the code below we also support a case where we
3705   // don't have a single induction variable.
3706   //
3707   // We try to obtain an induction variable from the original loop as hard
3708   // as possible. However if we don't find one that:
3709   //   - is an integer
3710   //   - counts from zero, stepping by one
3711   //   - is the size of the widest induction variable type
3712   // then we create a new one.
3713   OldInduction = Legal->getPrimaryInduction();
3714   Type *IdxTy = Legal->getWidestInductionType();
3715   Value *StartIdx = ConstantInt::get(IdxTy, 0);
3716   // The loop step is equal to the vectorization factor (num of SIMD elements)
3717   // times the unroll factor (num of SIMD instructions).
3718   Builder.SetInsertPoint(&*Lp->getHeader()->getFirstInsertionPt());
3719   Value *Step = createStepForVF(Builder, IdxTy, VF, UF);
3720   Value *CountRoundDown = getOrCreateVectorTripCount(Lp);
3721   Induction =
3722       createInductionVariable(Lp, StartIdx, CountRoundDown, Step,
3723                               getDebugLocFromInstOrOperands(OldInduction));
3724 
3725   // Emit phis for the new starting index of the scalar loop.
3726   createInductionResumeValues(Lp, CountRoundDown);
3727 
3728   return completeLoopSkeleton(Lp, OrigLoopID);
3729 }
3730 
3731 // Fix up external users of the induction variable. At this point, we are
3732 // in LCSSA form, with all external PHIs that use the IV having one input value,
3733 // coming from the remainder loop. We need those PHIs to also have a correct
3734 // value for the IV when arriving directly from the middle block.
3735 void InnerLoopVectorizer::fixupIVUsers(PHINode *OrigPhi,
3736                                        const InductionDescriptor &II,
3737                                        Value *CountRoundDown, Value *EndValue,
3738                                        BasicBlock *MiddleBlock) {
3739   // There are two kinds of external IV usages - those that use the value
3740   // computed in the last iteration (the PHI) and those that use the penultimate
3741   // value (the value that feeds into the phi from the loop latch).
3742   // We allow both, but they, obviously, have different values.
3743 
3744   assert(OrigLoop->getUniqueExitBlock() && "Expected a single exit block");
3745 
3746   DenseMap<Value *, Value *> MissingVals;
3747 
3748   // An external user of the last iteration's value should see the value that
3749   // the remainder loop uses to initialize its own IV.
3750   Value *PostInc = OrigPhi->getIncomingValueForBlock(OrigLoop->getLoopLatch());
3751   for (User *U : PostInc->users()) {
3752     Instruction *UI = cast<Instruction>(U);
3753     if (!OrigLoop->contains(UI)) {
3754       assert(isa<PHINode>(UI) && "Expected LCSSA form");
3755       MissingVals[UI] = EndValue;
3756     }
3757   }
3758 
3759   // An external user of the penultimate value need to see EndValue - Step.
3760   // The simplest way to get this is to recompute it from the constituent SCEVs,
3761   // that is Start + (Step * (CRD - 1)).
3762   for (User *U : OrigPhi->users()) {
3763     auto *UI = cast<Instruction>(U);
3764     if (!OrigLoop->contains(UI)) {
3765       const DataLayout &DL =
3766           OrigLoop->getHeader()->getModule()->getDataLayout();
3767       assert(isa<PHINode>(UI) && "Expected LCSSA form");
3768 
3769       IRBuilder<> B(MiddleBlock->getTerminator());
3770 
3771       // Fast-math-flags propagate from the original induction instruction.
3772       if (II.getInductionBinOp() && isa<FPMathOperator>(II.getInductionBinOp()))
3773         B.setFastMathFlags(II.getInductionBinOp()->getFastMathFlags());
3774 
3775       Value *CountMinusOne = B.CreateSub(
3776           CountRoundDown, ConstantInt::get(CountRoundDown->getType(), 1));
3777       Value *CMO =
3778           !II.getStep()->getType()->isIntegerTy()
3779               ? B.CreateCast(Instruction::SIToFP, CountMinusOne,
3780                              II.getStep()->getType())
3781               : B.CreateSExtOrTrunc(CountMinusOne, II.getStep()->getType());
3782       CMO->setName("cast.cmo");
3783       Value *Escape = emitTransformedIndex(B, CMO, PSE.getSE(), DL, II);
3784       Escape->setName("ind.escape");
3785       MissingVals[UI] = Escape;
3786     }
3787   }
3788 
3789   for (auto &I : MissingVals) {
3790     PHINode *PHI = cast<PHINode>(I.first);
3791     // One corner case we have to handle is two IVs "chasing" each-other,
3792     // that is %IV2 = phi [...], [ %IV1, %latch ]
3793     // In this case, if IV1 has an external use, we need to avoid adding both
3794     // "last value of IV1" and "penultimate value of IV2". So, verify that we
3795     // don't already have an incoming value for the middle block.
3796     if (PHI->getBasicBlockIndex(MiddleBlock) == -1)
3797       PHI->addIncoming(I.second, MiddleBlock);
3798   }
3799 }
3800 
3801 namespace {
3802 
3803 struct CSEDenseMapInfo {
3804   static bool canHandle(const Instruction *I) {
3805     return isa<InsertElementInst>(I) || isa<ExtractElementInst>(I) ||
3806            isa<ShuffleVectorInst>(I) || isa<GetElementPtrInst>(I);
3807   }
3808 
3809   static inline Instruction *getEmptyKey() {
3810     return DenseMapInfo<Instruction *>::getEmptyKey();
3811   }
3812 
3813   static inline Instruction *getTombstoneKey() {
3814     return DenseMapInfo<Instruction *>::getTombstoneKey();
3815   }
3816 
3817   static unsigned getHashValue(const Instruction *I) {
3818     assert(canHandle(I) && "Unknown instruction!");
3819     return hash_combine(I->getOpcode(), hash_combine_range(I->value_op_begin(),
3820                                                            I->value_op_end()));
3821   }
3822 
3823   static bool isEqual(const Instruction *LHS, const Instruction *RHS) {
3824     if (LHS == getEmptyKey() || RHS == getEmptyKey() ||
3825         LHS == getTombstoneKey() || RHS == getTombstoneKey())
3826       return LHS == RHS;
3827     return LHS->isIdenticalTo(RHS);
3828   }
3829 };
3830 
3831 } // end anonymous namespace
3832 
3833 ///Perform cse of induction variable instructions.
3834 static void cse(BasicBlock *BB) {
3835   // Perform simple cse.
3836   SmallDenseMap<Instruction *, Instruction *, 4, CSEDenseMapInfo> CSEMap;
3837   for (Instruction &In : llvm::make_early_inc_range(*BB)) {
3838     if (!CSEDenseMapInfo::canHandle(&In))
3839       continue;
3840 
3841     // Check if we can replace this instruction with any of the
3842     // visited instructions.
3843     if (Instruction *V = CSEMap.lookup(&In)) {
3844       In.replaceAllUsesWith(V);
3845       In.eraseFromParent();
3846       continue;
3847     }
3848 
3849     CSEMap[&In] = &In;
3850   }
3851 }
3852 
3853 InstructionCost
3854 LoopVectorizationCostModel::getVectorCallCost(CallInst *CI, ElementCount VF,
3855                                               bool &NeedToScalarize) const {
3856   Function *F = CI->getCalledFunction();
3857   Type *ScalarRetTy = CI->getType();
3858   SmallVector<Type *, 4> Tys, ScalarTys;
3859   for (auto &ArgOp : CI->args())
3860     ScalarTys.push_back(ArgOp->getType());
3861 
3862   // Estimate cost of scalarized vector call. The source operands are assumed
3863   // to be vectors, so we need to extract individual elements from there,
3864   // execute VF scalar calls, and then gather the result into the vector return
3865   // value.
3866   InstructionCost ScalarCallCost =
3867       TTI.getCallInstrCost(F, ScalarRetTy, ScalarTys, TTI::TCK_RecipThroughput);
3868   if (VF.isScalar())
3869     return ScalarCallCost;
3870 
3871   // Compute corresponding vector type for return value and arguments.
3872   Type *RetTy = ToVectorTy(ScalarRetTy, VF);
3873   for (Type *ScalarTy : ScalarTys)
3874     Tys.push_back(ToVectorTy(ScalarTy, VF));
3875 
3876   // Compute costs of unpacking argument values for the scalar calls and
3877   // packing the return values to a vector.
3878   InstructionCost ScalarizationCost = getScalarizationOverhead(CI, VF);
3879 
3880   InstructionCost Cost =
3881       ScalarCallCost * VF.getKnownMinValue() + ScalarizationCost;
3882 
3883   // If we can't emit a vector call for this function, then the currently found
3884   // cost is the cost we need to return.
3885   NeedToScalarize = true;
3886   VFShape Shape = VFShape::get(*CI, VF, false /*HasGlobalPred*/);
3887   Function *VecFunc = VFDatabase(*CI).getVectorizedFunction(Shape);
3888 
3889   if (!TLI || CI->isNoBuiltin() || !VecFunc)
3890     return Cost;
3891 
3892   // If the corresponding vector cost is cheaper, return its cost.
3893   InstructionCost VectorCallCost =
3894       TTI.getCallInstrCost(nullptr, RetTy, Tys, TTI::TCK_RecipThroughput);
3895   if (VectorCallCost < Cost) {
3896     NeedToScalarize = false;
3897     Cost = VectorCallCost;
3898   }
3899   return Cost;
3900 }
3901 
3902 static Type *MaybeVectorizeType(Type *Elt, ElementCount VF) {
3903   if (VF.isScalar() || (!Elt->isIntOrPtrTy() && !Elt->isFloatingPointTy()))
3904     return Elt;
3905   return VectorType::get(Elt, VF);
3906 }
3907 
3908 InstructionCost
3909 LoopVectorizationCostModel::getVectorIntrinsicCost(CallInst *CI,
3910                                                    ElementCount VF) const {
3911   Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
3912   assert(ID && "Expected intrinsic call!");
3913   Type *RetTy = MaybeVectorizeType(CI->getType(), VF);
3914   FastMathFlags FMF;
3915   if (auto *FPMO = dyn_cast<FPMathOperator>(CI))
3916     FMF = FPMO->getFastMathFlags();
3917 
3918   SmallVector<const Value *> Arguments(CI->args());
3919   FunctionType *FTy = CI->getCalledFunction()->getFunctionType();
3920   SmallVector<Type *> ParamTys;
3921   std::transform(FTy->param_begin(), FTy->param_end(),
3922                  std::back_inserter(ParamTys),
3923                  [&](Type *Ty) { return MaybeVectorizeType(Ty, VF); });
3924 
3925   IntrinsicCostAttributes CostAttrs(ID, RetTy, Arguments, ParamTys, FMF,
3926                                     dyn_cast<IntrinsicInst>(CI));
3927   return TTI.getIntrinsicInstrCost(CostAttrs,
3928                                    TargetTransformInfo::TCK_RecipThroughput);
3929 }
3930 
3931 static Type *smallestIntegerVectorType(Type *T1, Type *T2) {
3932   auto *I1 = cast<IntegerType>(cast<VectorType>(T1)->getElementType());
3933   auto *I2 = cast<IntegerType>(cast<VectorType>(T2)->getElementType());
3934   return I1->getBitWidth() < I2->getBitWidth() ? T1 : T2;
3935 }
3936 
3937 static Type *largestIntegerVectorType(Type *T1, Type *T2) {
3938   auto *I1 = cast<IntegerType>(cast<VectorType>(T1)->getElementType());
3939   auto *I2 = cast<IntegerType>(cast<VectorType>(T2)->getElementType());
3940   return I1->getBitWidth() > I2->getBitWidth() ? T1 : T2;
3941 }
3942 
3943 void InnerLoopVectorizer::truncateToMinimalBitwidths(VPTransformState &State) {
3944   // For every instruction `I` in MinBWs, truncate the operands, create a
3945   // truncated version of `I` and reextend its result. InstCombine runs
3946   // later and will remove any ext/trunc pairs.
3947   SmallPtrSet<Value *, 4> Erased;
3948   for (const auto &KV : Cost->getMinimalBitwidths()) {
3949     // If the value wasn't vectorized, we must maintain the original scalar
3950     // type. The absence of the value from State indicates that it
3951     // wasn't vectorized.
3952     // FIXME: Should not rely on getVPValue at this point.
3953     VPValue *Def = State.Plan->getVPValue(KV.first, true);
3954     if (!State.hasAnyVectorValue(Def))
3955       continue;
3956     for (unsigned Part = 0; Part < UF; ++Part) {
3957       Value *I = State.get(Def, Part);
3958       if (Erased.count(I) || I->use_empty() || !isa<Instruction>(I))
3959         continue;
3960       Type *OriginalTy = I->getType();
3961       Type *ScalarTruncatedTy =
3962           IntegerType::get(OriginalTy->getContext(), KV.second);
3963       auto *TruncatedTy = VectorType::get(
3964           ScalarTruncatedTy, cast<VectorType>(OriginalTy)->getElementCount());
3965       if (TruncatedTy == OriginalTy)
3966         continue;
3967 
3968       IRBuilder<> B(cast<Instruction>(I));
3969       auto ShrinkOperand = [&](Value *V) -> Value * {
3970         if (auto *ZI = dyn_cast<ZExtInst>(V))
3971           if (ZI->getSrcTy() == TruncatedTy)
3972             return ZI->getOperand(0);
3973         return B.CreateZExtOrTrunc(V, TruncatedTy);
3974       };
3975 
3976       // The actual instruction modification depends on the instruction type,
3977       // unfortunately.
3978       Value *NewI = nullptr;
3979       if (auto *BO = dyn_cast<BinaryOperator>(I)) {
3980         NewI = B.CreateBinOp(BO->getOpcode(), ShrinkOperand(BO->getOperand(0)),
3981                              ShrinkOperand(BO->getOperand(1)));
3982 
3983         // Any wrapping introduced by shrinking this operation shouldn't be
3984         // considered undefined behavior. So, we can't unconditionally copy
3985         // arithmetic wrapping flags to NewI.
3986         cast<BinaryOperator>(NewI)->copyIRFlags(I, /*IncludeWrapFlags=*/false);
3987       } else if (auto *CI = dyn_cast<ICmpInst>(I)) {
3988         NewI =
3989             B.CreateICmp(CI->getPredicate(), ShrinkOperand(CI->getOperand(0)),
3990                          ShrinkOperand(CI->getOperand(1)));
3991       } else if (auto *SI = dyn_cast<SelectInst>(I)) {
3992         NewI = B.CreateSelect(SI->getCondition(),
3993                               ShrinkOperand(SI->getTrueValue()),
3994                               ShrinkOperand(SI->getFalseValue()));
3995       } else if (auto *CI = dyn_cast<CastInst>(I)) {
3996         switch (CI->getOpcode()) {
3997         default:
3998           llvm_unreachable("Unhandled cast!");
3999         case Instruction::Trunc:
4000           NewI = ShrinkOperand(CI->getOperand(0));
4001           break;
4002         case Instruction::SExt:
4003           NewI = B.CreateSExtOrTrunc(
4004               CI->getOperand(0),
4005               smallestIntegerVectorType(OriginalTy, TruncatedTy));
4006           break;
4007         case Instruction::ZExt:
4008           NewI = B.CreateZExtOrTrunc(
4009               CI->getOperand(0),
4010               smallestIntegerVectorType(OriginalTy, TruncatedTy));
4011           break;
4012         }
4013       } else if (auto *SI = dyn_cast<ShuffleVectorInst>(I)) {
4014         auto Elements0 =
4015             cast<VectorType>(SI->getOperand(0)->getType())->getElementCount();
4016         auto *O0 = B.CreateZExtOrTrunc(
4017             SI->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements0));
4018         auto Elements1 =
4019             cast<VectorType>(SI->getOperand(1)->getType())->getElementCount();
4020         auto *O1 = B.CreateZExtOrTrunc(
4021             SI->getOperand(1), VectorType::get(ScalarTruncatedTy, Elements1));
4022 
4023         NewI = B.CreateShuffleVector(O0, O1, SI->getShuffleMask());
4024       } else if (isa<LoadInst>(I) || isa<PHINode>(I)) {
4025         // Don't do anything with the operands, just extend the result.
4026         continue;
4027       } else if (auto *IE = dyn_cast<InsertElementInst>(I)) {
4028         auto Elements =
4029             cast<VectorType>(IE->getOperand(0)->getType())->getElementCount();
4030         auto *O0 = B.CreateZExtOrTrunc(
4031             IE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements));
4032         auto *O1 = B.CreateZExtOrTrunc(IE->getOperand(1), ScalarTruncatedTy);
4033         NewI = B.CreateInsertElement(O0, O1, IE->getOperand(2));
4034       } else if (auto *EE = dyn_cast<ExtractElementInst>(I)) {
4035         auto Elements =
4036             cast<VectorType>(EE->getOperand(0)->getType())->getElementCount();
4037         auto *O0 = B.CreateZExtOrTrunc(
4038             EE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements));
4039         NewI = B.CreateExtractElement(O0, EE->getOperand(2));
4040       } else {
4041         // If we don't know what to do, be conservative and don't do anything.
4042         continue;
4043       }
4044 
4045       // Lastly, extend the result.
4046       NewI->takeName(cast<Instruction>(I));
4047       Value *Res = B.CreateZExtOrTrunc(NewI, OriginalTy);
4048       I->replaceAllUsesWith(Res);
4049       cast<Instruction>(I)->eraseFromParent();
4050       Erased.insert(I);
4051       State.reset(Def, Res, Part);
4052     }
4053   }
4054 
4055   // We'll have created a bunch of ZExts that are now parentless. Clean up.
4056   for (const auto &KV : Cost->getMinimalBitwidths()) {
4057     // If the value wasn't vectorized, we must maintain the original scalar
4058     // type. The absence of the value from State indicates that it
4059     // wasn't vectorized.
4060     // FIXME: Should not rely on getVPValue at this point.
4061     VPValue *Def = State.Plan->getVPValue(KV.first, true);
4062     if (!State.hasAnyVectorValue(Def))
4063       continue;
4064     for (unsigned Part = 0; Part < UF; ++Part) {
4065       Value *I = State.get(Def, Part);
4066       ZExtInst *Inst = dyn_cast<ZExtInst>(I);
4067       if (Inst && Inst->use_empty()) {
4068         Value *NewI = Inst->getOperand(0);
4069         Inst->eraseFromParent();
4070         State.reset(Def, NewI, Part);
4071       }
4072     }
4073   }
4074 }
4075 
4076 void InnerLoopVectorizer::fixVectorizedLoop(VPTransformState &State) {
4077   // Insert truncates and extends for any truncated instructions as hints to
4078   // InstCombine.
4079   if (VF.isVector())
4080     truncateToMinimalBitwidths(State);
4081 
4082   // Fix widened non-induction PHIs by setting up the PHI operands.
4083   if (OrigPHIsToFix.size()) {
4084     assert(EnableVPlanNativePath &&
4085            "Unexpected non-induction PHIs for fixup in non VPlan-native path");
4086     fixNonInductionPHIs(State);
4087   }
4088 
4089   // At this point every instruction in the original loop is widened to a
4090   // vector form. Now we need to fix the recurrences in the loop. These PHI
4091   // nodes are currently empty because we did not want to introduce cycles.
4092   // This is the second stage of vectorizing recurrences.
4093   fixCrossIterationPHIs(State);
4094 
4095   // Forget the original basic block.
4096   PSE.getSE()->forgetLoop(OrigLoop);
4097 
4098   // If we inserted an edge from the middle block to the unique exit block,
4099   // update uses outside the loop (phis) to account for the newly inserted
4100   // edge.
4101   if (!Cost->requiresScalarEpilogue(VF)) {
4102     // Fix-up external users of the induction variables.
4103     for (auto &Entry : Legal->getInductionVars())
4104       fixupIVUsers(Entry.first, Entry.second,
4105                    getOrCreateVectorTripCount(LI->getLoopFor(LoopVectorBody)),
4106                    IVEndValues[Entry.first], LoopMiddleBlock);
4107 
4108     fixLCSSAPHIs(State);
4109   }
4110 
4111   for (Instruction *PI : PredicatedInstructions)
4112     sinkScalarOperands(&*PI);
4113 
4114   // Remove redundant induction instructions.
4115   cse(LoopVectorBody);
4116 
4117   // Set/update profile weights for the vector and remainder loops as original
4118   // loop iterations are now distributed among them. Note that original loop
4119   // represented by LoopScalarBody becomes remainder loop after vectorization.
4120   //
4121   // For cases like foldTailByMasking() and requiresScalarEpiloque() we may
4122   // end up getting slightly roughened result but that should be OK since
4123   // profile is not inherently precise anyway. Note also possible bypass of
4124   // vector code caused by legality checks is ignored, assigning all the weight
4125   // to the vector loop, optimistically.
4126   //
4127   // For scalable vectorization we can't know at compile time how many iterations
4128   // of the loop are handled in one vector iteration, so instead assume a pessimistic
4129   // vscale of '1'.
4130   setProfileInfoAfterUnrolling(
4131       LI->getLoopFor(LoopScalarBody), LI->getLoopFor(LoopVectorBody),
4132       LI->getLoopFor(LoopScalarBody), VF.getKnownMinValue() * UF);
4133 }
4134 
4135 void InnerLoopVectorizer::fixCrossIterationPHIs(VPTransformState &State) {
4136   // In order to support recurrences we need to be able to vectorize Phi nodes.
4137   // Phi nodes have cycles, so we need to vectorize them in two stages. This is
4138   // stage #2: We now need to fix the recurrences by adding incoming edges to
4139   // the currently empty PHI nodes. At this point every instruction in the
4140   // original loop is widened to a vector form so we can use them to construct
4141   // the incoming edges.
4142   VPBasicBlock *Header = State.Plan->getEntry()->getEntryBasicBlock();
4143   for (VPRecipeBase &R : Header->phis()) {
4144     if (auto *ReductionPhi = dyn_cast<VPReductionPHIRecipe>(&R))
4145       fixReduction(ReductionPhi, State);
4146     else if (auto *FOR = dyn_cast<VPFirstOrderRecurrencePHIRecipe>(&R))
4147       fixFirstOrderRecurrence(FOR, State);
4148   }
4149 }
4150 
4151 void InnerLoopVectorizer::fixFirstOrderRecurrence(VPWidenPHIRecipe *PhiR,
4152                                                   VPTransformState &State) {
4153   // This is the second phase of vectorizing first-order recurrences. An
4154   // overview of the transformation is described below. Suppose we have the
4155   // following loop.
4156   //
4157   //   for (int i = 0; i < n; ++i)
4158   //     b[i] = a[i] - a[i - 1];
4159   //
4160   // There is a first-order recurrence on "a". For this loop, the shorthand
4161   // scalar IR looks like:
4162   //
4163   //   scalar.ph:
4164   //     s_init = a[-1]
4165   //     br scalar.body
4166   //
4167   //   scalar.body:
4168   //     i = phi [0, scalar.ph], [i+1, scalar.body]
4169   //     s1 = phi [s_init, scalar.ph], [s2, scalar.body]
4170   //     s2 = a[i]
4171   //     b[i] = s2 - s1
4172   //     br cond, scalar.body, ...
4173   //
4174   // In this example, s1 is a recurrence because it's value depends on the
4175   // previous iteration. In the first phase of vectorization, we created a
4176   // vector phi v1 for s1. We now complete the vectorization and produce the
4177   // shorthand vector IR shown below (for VF = 4, UF = 1).
4178   //
4179   //   vector.ph:
4180   //     v_init = vector(..., ..., ..., a[-1])
4181   //     br vector.body
4182   //
4183   //   vector.body
4184   //     i = phi [0, vector.ph], [i+4, vector.body]
4185   //     v1 = phi [v_init, vector.ph], [v2, vector.body]
4186   //     v2 = a[i, i+1, i+2, i+3];
4187   //     v3 = vector(v1(3), v2(0, 1, 2))
4188   //     b[i, i+1, i+2, i+3] = v2 - v3
4189   //     br cond, vector.body, middle.block
4190   //
4191   //   middle.block:
4192   //     x = v2(3)
4193   //     br scalar.ph
4194   //
4195   //   scalar.ph:
4196   //     s_init = phi [x, middle.block], [a[-1], otherwise]
4197   //     br scalar.body
4198   //
4199   // After execution completes the vector loop, we extract the next value of
4200   // the recurrence (x) to use as the initial value in the scalar loop.
4201 
4202   // Extract the last vector element in the middle block. This will be the
4203   // initial value for the recurrence when jumping to the scalar loop.
4204   VPValue *PreviousDef = PhiR->getBackedgeValue();
4205   Value *Incoming = State.get(PreviousDef, UF - 1);
4206   auto *ExtractForScalar = Incoming;
4207   auto *IdxTy = Builder.getInt32Ty();
4208   if (VF.isVector()) {
4209     auto *One = ConstantInt::get(IdxTy, 1);
4210     Builder.SetInsertPoint(LoopMiddleBlock->getTerminator());
4211     auto *RuntimeVF = getRuntimeVF(Builder, IdxTy, VF);
4212     auto *LastIdx = Builder.CreateSub(RuntimeVF, One);
4213     ExtractForScalar = Builder.CreateExtractElement(ExtractForScalar, LastIdx,
4214                                                     "vector.recur.extract");
4215   }
4216   // Extract the second last element in the middle block if the
4217   // Phi is used outside the loop. We need to extract the phi itself
4218   // and not the last element (the phi update in the current iteration). This
4219   // will be the value when jumping to the exit block from the LoopMiddleBlock,
4220   // when the scalar loop is not run at all.
4221   Value *ExtractForPhiUsedOutsideLoop = nullptr;
4222   if (VF.isVector()) {
4223     auto *RuntimeVF = getRuntimeVF(Builder, IdxTy, VF);
4224     auto *Idx = Builder.CreateSub(RuntimeVF, ConstantInt::get(IdxTy, 2));
4225     ExtractForPhiUsedOutsideLoop = Builder.CreateExtractElement(
4226         Incoming, Idx, "vector.recur.extract.for.phi");
4227   } else if (UF > 1)
4228     // When loop is unrolled without vectorizing, initialize
4229     // ExtractForPhiUsedOutsideLoop with the value just prior to unrolled value
4230     // of `Incoming`. This is analogous to the vectorized case above: extracting
4231     // the second last element when VF > 1.
4232     ExtractForPhiUsedOutsideLoop = State.get(PreviousDef, UF - 2);
4233 
4234   // Fix the initial value of the original recurrence in the scalar loop.
4235   Builder.SetInsertPoint(&*LoopScalarPreHeader->begin());
4236   PHINode *Phi = cast<PHINode>(PhiR->getUnderlyingValue());
4237   auto *Start = Builder.CreatePHI(Phi->getType(), 2, "scalar.recur.init");
4238   auto *ScalarInit = PhiR->getStartValue()->getLiveInIRValue();
4239   for (auto *BB : predecessors(LoopScalarPreHeader)) {
4240     auto *Incoming = BB == LoopMiddleBlock ? ExtractForScalar : ScalarInit;
4241     Start->addIncoming(Incoming, BB);
4242   }
4243 
4244   Phi->setIncomingValueForBlock(LoopScalarPreHeader, Start);
4245   Phi->setName("scalar.recur");
4246 
4247   // Finally, fix users of the recurrence outside the loop. The users will need
4248   // either the last value of the scalar recurrence or the last value of the
4249   // vector recurrence we extracted in the middle block. Since the loop is in
4250   // LCSSA form, we just need to find all the phi nodes for the original scalar
4251   // recurrence in the exit block, and then add an edge for the middle block.
4252   // Note that LCSSA does not imply single entry when the original scalar loop
4253   // had multiple exiting edges (as we always run the last iteration in the
4254   // scalar epilogue); in that case, there is no edge from middle to exit and
4255   // and thus no phis which needed updated.
4256   if (!Cost->requiresScalarEpilogue(VF))
4257     for (PHINode &LCSSAPhi : LoopExitBlock->phis())
4258       if (llvm::is_contained(LCSSAPhi.incoming_values(), Phi))
4259         LCSSAPhi.addIncoming(ExtractForPhiUsedOutsideLoop, LoopMiddleBlock);
4260 }
4261 
4262 void InnerLoopVectorizer::fixReduction(VPReductionPHIRecipe *PhiR,
4263                                        VPTransformState &State) {
4264   PHINode *OrigPhi = cast<PHINode>(PhiR->getUnderlyingValue());
4265   // Get it's reduction variable descriptor.
4266   assert(Legal->isReductionVariable(OrigPhi) &&
4267          "Unable to find the reduction variable");
4268   const RecurrenceDescriptor &RdxDesc = PhiR->getRecurrenceDescriptor();
4269 
4270   RecurKind RK = RdxDesc.getRecurrenceKind();
4271   TrackingVH<Value> ReductionStartValue = RdxDesc.getRecurrenceStartValue();
4272   Instruction *LoopExitInst = RdxDesc.getLoopExitInstr();
4273   setDebugLocFromInst(ReductionStartValue);
4274 
4275   VPValue *LoopExitInstDef = PhiR->getBackedgeValue();
4276   // This is the vector-clone of the value that leaves the loop.
4277   Type *VecTy = State.get(LoopExitInstDef, 0)->getType();
4278 
4279   // Wrap flags are in general invalid after vectorization, clear them.
4280   clearReductionWrapFlags(RdxDesc, State);
4281 
4282   // Before each round, move the insertion point right between
4283   // the PHIs and the values we are going to write.
4284   // This allows us to write both PHINodes and the extractelement
4285   // instructions.
4286   Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt());
4287 
4288   setDebugLocFromInst(LoopExitInst);
4289 
4290   Type *PhiTy = OrigPhi->getType();
4291   // If tail is folded by masking, the vector value to leave the loop should be
4292   // a Select choosing between the vectorized LoopExitInst and vectorized Phi,
4293   // instead of the former. For an inloop reduction the reduction will already
4294   // be predicated, and does not need to be handled here.
4295   if (Cost->foldTailByMasking() && !PhiR->isInLoop()) {
4296     for (unsigned Part = 0; Part < UF; ++Part) {
4297       Value *VecLoopExitInst = State.get(LoopExitInstDef, Part);
4298       Value *Sel = nullptr;
4299       for (User *U : VecLoopExitInst->users()) {
4300         if (isa<SelectInst>(U)) {
4301           assert(!Sel && "Reduction exit feeding two selects");
4302           Sel = U;
4303         } else
4304           assert(isa<PHINode>(U) && "Reduction exit must feed Phi's or select");
4305       }
4306       assert(Sel && "Reduction exit feeds no select");
4307       State.reset(LoopExitInstDef, Sel, Part);
4308 
4309       // If the target can create a predicated operator for the reduction at no
4310       // extra cost in the loop (for example a predicated vadd), it can be
4311       // cheaper for the select to remain in the loop than be sunk out of it,
4312       // and so use the select value for the phi instead of the old
4313       // LoopExitValue.
4314       if (PreferPredicatedReductionSelect ||
4315           TTI->preferPredicatedReductionSelect(
4316               RdxDesc.getOpcode(), PhiTy,
4317               TargetTransformInfo::ReductionFlags())) {
4318         auto *VecRdxPhi =
4319             cast<PHINode>(State.get(PhiR, Part));
4320         VecRdxPhi->setIncomingValueForBlock(
4321             LI->getLoopFor(LoopVectorBody)->getLoopLatch(), Sel);
4322       }
4323     }
4324   }
4325 
4326   // If the vector reduction can be performed in a smaller type, we truncate
4327   // then extend the loop exit value to enable InstCombine to evaluate the
4328   // entire expression in the smaller type.
4329   if (VF.isVector() && PhiTy != RdxDesc.getRecurrenceType()) {
4330     assert(!PhiR->isInLoop() && "Unexpected truncated inloop reduction!");
4331     Type *RdxVecTy = VectorType::get(RdxDesc.getRecurrenceType(), VF);
4332     Builder.SetInsertPoint(
4333         LI->getLoopFor(LoopVectorBody)->getLoopLatch()->getTerminator());
4334     VectorParts RdxParts(UF);
4335     for (unsigned Part = 0; Part < UF; ++Part) {
4336       RdxParts[Part] = State.get(LoopExitInstDef, Part);
4337       Value *Trunc = Builder.CreateTrunc(RdxParts[Part], RdxVecTy);
4338       Value *Extnd = RdxDesc.isSigned() ? Builder.CreateSExt(Trunc, VecTy)
4339                                         : Builder.CreateZExt(Trunc, VecTy);
4340       for (User *U : llvm::make_early_inc_range(RdxParts[Part]->users()))
4341         if (U != Trunc) {
4342           U->replaceUsesOfWith(RdxParts[Part], Extnd);
4343           RdxParts[Part] = Extnd;
4344         }
4345     }
4346     Builder.SetInsertPoint(&*LoopMiddleBlock->getFirstInsertionPt());
4347     for (unsigned Part = 0; Part < UF; ++Part) {
4348       RdxParts[Part] = Builder.CreateTrunc(RdxParts[Part], RdxVecTy);
4349       State.reset(LoopExitInstDef, RdxParts[Part], Part);
4350     }
4351   }
4352 
4353   // Reduce all of the unrolled parts into a single vector.
4354   Value *ReducedPartRdx = State.get(LoopExitInstDef, 0);
4355   unsigned Op = RecurrenceDescriptor::getOpcode(RK);
4356 
4357   // The middle block terminator has already been assigned a DebugLoc here (the
4358   // OrigLoop's single latch terminator). We want the whole middle block to
4359   // appear to execute on this line because: (a) it is all compiler generated,
4360   // (b) these instructions are always executed after evaluating the latch
4361   // conditional branch, and (c) other passes may add new predecessors which
4362   // terminate on this line. This is the easiest way to ensure we don't
4363   // accidentally cause an extra step back into the loop while debugging.
4364   setDebugLocFromInst(LoopMiddleBlock->getTerminator());
4365   if (PhiR->isOrdered())
4366     ReducedPartRdx = State.get(LoopExitInstDef, UF - 1);
4367   else {
4368     // Floating-point operations should have some FMF to enable the reduction.
4369     IRBuilderBase::FastMathFlagGuard FMFG(Builder);
4370     Builder.setFastMathFlags(RdxDesc.getFastMathFlags());
4371     for (unsigned Part = 1; Part < UF; ++Part) {
4372       Value *RdxPart = State.get(LoopExitInstDef, Part);
4373       if (Op != Instruction::ICmp && Op != Instruction::FCmp) {
4374         ReducedPartRdx = Builder.CreateBinOp(
4375             (Instruction::BinaryOps)Op, RdxPart, ReducedPartRdx, "bin.rdx");
4376       } else if (RecurrenceDescriptor::isSelectCmpRecurrenceKind(RK))
4377         ReducedPartRdx = createSelectCmpOp(Builder, ReductionStartValue, RK,
4378                                            ReducedPartRdx, RdxPart);
4379       else
4380         ReducedPartRdx = createMinMaxOp(Builder, RK, ReducedPartRdx, RdxPart);
4381     }
4382   }
4383 
4384   // Create the reduction after the loop. Note that inloop reductions create the
4385   // target reduction in the loop using a Reduction recipe.
4386   if (VF.isVector() && !PhiR->isInLoop()) {
4387     ReducedPartRdx =
4388         createTargetReduction(Builder, TTI, RdxDesc, ReducedPartRdx, OrigPhi);
4389     // If the reduction can be performed in a smaller type, we need to extend
4390     // the reduction to the wider type before we branch to the original loop.
4391     if (PhiTy != RdxDesc.getRecurrenceType())
4392       ReducedPartRdx = RdxDesc.isSigned()
4393                            ? Builder.CreateSExt(ReducedPartRdx, PhiTy)
4394                            : Builder.CreateZExt(ReducedPartRdx, PhiTy);
4395   }
4396 
4397   // Create a phi node that merges control-flow from the backedge-taken check
4398   // block and the middle block.
4399   PHINode *BCBlockPhi = PHINode::Create(PhiTy, 2, "bc.merge.rdx",
4400                                         LoopScalarPreHeader->getTerminator());
4401   for (unsigned I = 0, E = LoopBypassBlocks.size(); I != E; ++I)
4402     BCBlockPhi->addIncoming(ReductionStartValue, LoopBypassBlocks[I]);
4403   BCBlockPhi->addIncoming(ReducedPartRdx, LoopMiddleBlock);
4404 
4405   // Now, we need to fix the users of the reduction variable
4406   // inside and outside of the scalar remainder loop.
4407 
4408   // We know that the loop is in LCSSA form. We need to update the PHI nodes
4409   // in the exit blocks.  See comment on analogous loop in
4410   // fixFirstOrderRecurrence for a more complete explaination of the logic.
4411   if (!Cost->requiresScalarEpilogue(VF))
4412     for (PHINode &LCSSAPhi : LoopExitBlock->phis())
4413       if (llvm::is_contained(LCSSAPhi.incoming_values(), LoopExitInst))
4414         LCSSAPhi.addIncoming(ReducedPartRdx, LoopMiddleBlock);
4415 
4416   // Fix the scalar loop reduction variable with the incoming reduction sum
4417   // from the vector body and from the backedge value.
4418   int IncomingEdgeBlockIdx =
4419       OrigPhi->getBasicBlockIndex(OrigLoop->getLoopLatch());
4420   assert(IncomingEdgeBlockIdx >= 0 && "Invalid block index");
4421   // Pick the other block.
4422   int SelfEdgeBlockIdx = (IncomingEdgeBlockIdx ? 0 : 1);
4423   OrigPhi->setIncomingValue(SelfEdgeBlockIdx, BCBlockPhi);
4424   OrigPhi->setIncomingValue(IncomingEdgeBlockIdx, LoopExitInst);
4425 }
4426 
4427 void InnerLoopVectorizer::clearReductionWrapFlags(const RecurrenceDescriptor &RdxDesc,
4428                                                   VPTransformState &State) {
4429   RecurKind RK = RdxDesc.getRecurrenceKind();
4430   if (RK != RecurKind::Add && RK != RecurKind::Mul)
4431     return;
4432 
4433   Instruction *LoopExitInstr = RdxDesc.getLoopExitInstr();
4434   assert(LoopExitInstr && "null loop exit instruction");
4435   SmallVector<Instruction *, 8> Worklist;
4436   SmallPtrSet<Instruction *, 8> Visited;
4437   Worklist.push_back(LoopExitInstr);
4438   Visited.insert(LoopExitInstr);
4439 
4440   while (!Worklist.empty()) {
4441     Instruction *Cur = Worklist.pop_back_val();
4442     if (isa<OverflowingBinaryOperator>(Cur))
4443       for (unsigned Part = 0; Part < UF; ++Part) {
4444         // FIXME: Should not rely on getVPValue at this point.
4445         Value *V = State.get(State.Plan->getVPValue(Cur, true), Part);
4446         cast<Instruction>(V)->dropPoisonGeneratingFlags();
4447       }
4448 
4449     for (User *U : Cur->users()) {
4450       Instruction *UI = cast<Instruction>(U);
4451       if ((Cur != LoopExitInstr || OrigLoop->contains(UI->getParent())) &&
4452           Visited.insert(UI).second)
4453         Worklist.push_back(UI);
4454     }
4455   }
4456 }
4457 
4458 void InnerLoopVectorizer::fixLCSSAPHIs(VPTransformState &State) {
4459   for (PHINode &LCSSAPhi : LoopExitBlock->phis()) {
4460     if (LCSSAPhi.getBasicBlockIndex(LoopMiddleBlock) != -1)
4461       // Some phis were already hand updated by the reduction and recurrence
4462       // code above, leave them alone.
4463       continue;
4464 
4465     auto *IncomingValue = LCSSAPhi.getIncomingValue(0);
4466     // Non-instruction incoming values will have only one value.
4467 
4468     VPLane Lane = VPLane::getFirstLane();
4469     if (isa<Instruction>(IncomingValue) &&
4470         !Cost->isUniformAfterVectorization(cast<Instruction>(IncomingValue),
4471                                            VF))
4472       Lane = VPLane::getLastLaneForVF(VF);
4473 
4474     // Can be a loop invariant incoming value or the last scalar value to be
4475     // extracted from the vectorized loop.
4476     // FIXME: Should not rely on getVPValue at this point.
4477     Builder.SetInsertPoint(LoopMiddleBlock->getTerminator());
4478     Value *lastIncomingValue =
4479         OrigLoop->isLoopInvariant(IncomingValue)
4480             ? IncomingValue
4481             : State.get(State.Plan->getVPValue(IncomingValue, true),
4482                         VPIteration(UF - 1, Lane));
4483     LCSSAPhi.addIncoming(lastIncomingValue, LoopMiddleBlock);
4484   }
4485 }
4486 
4487 void InnerLoopVectorizer::sinkScalarOperands(Instruction *PredInst) {
4488   // The basic block and loop containing the predicated instruction.
4489   auto *PredBB = PredInst->getParent();
4490   auto *VectorLoop = LI->getLoopFor(PredBB);
4491 
4492   // Initialize a worklist with the operands of the predicated instruction.
4493   SetVector<Value *> Worklist(PredInst->op_begin(), PredInst->op_end());
4494 
4495   // Holds instructions that we need to analyze again. An instruction may be
4496   // reanalyzed if we don't yet know if we can sink it or not.
4497   SmallVector<Instruction *, 8> InstsToReanalyze;
4498 
4499   // Returns true if a given use occurs in the predicated block. Phi nodes use
4500   // their operands in their corresponding predecessor blocks.
4501   auto isBlockOfUsePredicated = [&](Use &U) -> bool {
4502     auto *I = cast<Instruction>(U.getUser());
4503     BasicBlock *BB = I->getParent();
4504     if (auto *Phi = dyn_cast<PHINode>(I))
4505       BB = Phi->getIncomingBlock(
4506           PHINode::getIncomingValueNumForOperand(U.getOperandNo()));
4507     return BB == PredBB;
4508   };
4509 
4510   // Iteratively sink the scalarized operands of the predicated instruction
4511   // into the block we created for it. When an instruction is sunk, it's
4512   // operands are then added to the worklist. The algorithm ends after one pass
4513   // through the worklist doesn't sink a single instruction.
4514   bool Changed;
4515   do {
4516     // Add the instructions that need to be reanalyzed to the worklist, and
4517     // reset the changed indicator.
4518     Worklist.insert(InstsToReanalyze.begin(), InstsToReanalyze.end());
4519     InstsToReanalyze.clear();
4520     Changed = false;
4521 
4522     while (!Worklist.empty()) {
4523       auto *I = dyn_cast<Instruction>(Worklist.pop_back_val());
4524 
4525       // We can't sink an instruction if it is a phi node, is not in the loop,
4526       // or may have side effects.
4527       if (!I || isa<PHINode>(I) || !VectorLoop->contains(I) ||
4528           I->mayHaveSideEffects())
4529         continue;
4530 
4531       // If the instruction is already in PredBB, check if we can sink its
4532       // operands. In that case, VPlan's sinkScalarOperands() succeeded in
4533       // sinking the scalar instruction I, hence it appears in PredBB; but it
4534       // may have failed to sink I's operands (recursively), which we try
4535       // (again) here.
4536       if (I->getParent() == PredBB) {
4537         Worklist.insert(I->op_begin(), I->op_end());
4538         continue;
4539       }
4540 
4541       // It's legal to sink the instruction if all its uses occur in the
4542       // predicated block. Otherwise, there's nothing to do yet, and we may
4543       // need to reanalyze the instruction.
4544       if (!llvm::all_of(I->uses(), isBlockOfUsePredicated)) {
4545         InstsToReanalyze.push_back(I);
4546         continue;
4547       }
4548 
4549       // Move the instruction to the beginning of the predicated block, and add
4550       // it's operands to the worklist.
4551       I->moveBefore(&*PredBB->getFirstInsertionPt());
4552       Worklist.insert(I->op_begin(), I->op_end());
4553 
4554       // The sinking may have enabled other instructions to be sunk, so we will
4555       // need to iterate.
4556       Changed = true;
4557     }
4558   } while (Changed);
4559 }
4560 
4561 void InnerLoopVectorizer::fixNonInductionPHIs(VPTransformState &State) {
4562   for (PHINode *OrigPhi : OrigPHIsToFix) {
4563     VPWidenPHIRecipe *VPPhi =
4564         cast<VPWidenPHIRecipe>(State.Plan->getVPValue(OrigPhi));
4565     PHINode *NewPhi = cast<PHINode>(State.get(VPPhi, 0));
4566     // Make sure the builder has a valid insert point.
4567     Builder.SetInsertPoint(NewPhi);
4568     for (unsigned i = 0; i < VPPhi->getNumOperands(); ++i) {
4569       VPValue *Inc = VPPhi->getIncomingValue(i);
4570       VPBasicBlock *VPBB = VPPhi->getIncomingBlock(i);
4571       NewPhi->addIncoming(State.get(Inc, 0), State.CFG.VPBB2IRBB[VPBB]);
4572     }
4573   }
4574 }
4575 
4576 bool InnerLoopVectorizer::useOrderedReductions(RecurrenceDescriptor &RdxDesc) {
4577   return Cost->useOrderedReductions(RdxDesc);
4578 }
4579 
4580 void InnerLoopVectorizer::widenPHIInstruction(Instruction *PN,
4581                                               VPWidenPHIRecipe *PhiR,
4582                                               VPTransformState &State) {
4583   PHINode *P = cast<PHINode>(PN);
4584   if (EnableVPlanNativePath) {
4585     // Currently we enter here in the VPlan-native path for non-induction
4586     // PHIs where all control flow is uniform. We simply widen these PHIs.
4587     // Create a vector phi with no operands - the vector phi operands will be
4588     // set at the end of vector code generation.
4589     Type *VecTy = (State.VF.isScalar())
4590                       ? PN->getType()
4591                       : VectorType::get(PN->getType(), State.VF);
4592     Value *VecPhi = Builder.CreatePHI(VecTy, PN->getNumOperands(), "vec.phi");
4593     State.set(PhiR, VecPhi, 0);
4594     OrigPHIsToFix.push_back(P);
4595 
4596     return;
4597   }
4598 
4599   assert(PN->getParent() == OrigLoop->getHeader() &&
4600          "Non-header phis should have been handled elsewhere");
4601 
4602   // In order to support recurrences we need to be able to vectorize Phi nodes.
4603   // Phi nodes have cycles, so we need to vectorize them in two stages. This is
4604   // stage #1: We create a new vector PHI node with no incoming edges. We'll use
4605   // this value when we vectorize all of the instructions that use the PHI.
4606 
4607   assert(!Legal->isReductionVariable(P) &&
4608          "reductions should be handled elsewhere");
4609 
4610   setDebugLocFromInst(P);
4611 
4612   // This PHINode must be an induction variable.
4613   // Make sure that we know about it.
4614   assert(Legal->getInductionVars().count(P) && "Not an induction variable");
4615 
4616   InductionDescriptor II = Legal->getInductionVars().lookup(P);
4617   const DataLayout &DL = OrigLoop->getHeader()->getModule()->getDataLayout();
4618 
4619   // FIXME: The newly created binary instructions should contain nsw/nuw flags,
4620   // which can be found from the original scalar operations.
4621   switch (II.getKind()) {
4622   case InductionDescriptor::IK_NoInduction:
4623     llvm_unreachable("Unknown induction");
4624   case InductionDescriptor::IK_IntInduction:
4625   case InductionDescriptor::IK_FpInduction:
4626     llvm_unreachable("Integer/fp induction is handled elsewhere.");
4627   case InductionDescriptor::IK_PtrInduction: {
4628     // Handle the pointer induction variable case.
4629     assert(P->getType()->isPointerTy() && "Unexpected type.");
4630 
4631     if (Cost->isScalarAfterVectorization(P, State.VF)) {
4632       // This is the normalized GEP that starts counting at zero.
4633       Value *PtrInd =
4634           Builder.CreateSExtOrTrunc(Induction, II.getStep()->getType());
4635       // Determine the number of scalars we need to generate for each unroll
4636       // iteration. If the instruction is uniform, we only need to generate the
4637       // first lane. Otherwise, we generate all VF values.
4638       bool IsUniform = Cost->isUniformAfterVectorization(P, State.VF);
4639       assert((IsUniform || !State.VF.isScalable()) &&
4640              "Cannot scalarize a scalable VF");
4641       unsigned Lanes = IsUniform ? 1 : State.VF.getFixedValue();
4642 
4643       for (unsigned Part = 0; Part < UF; ++Part) {
4644         Value *PartStart =
4645             createStepForVF(Builder, PtrInd->getType(), VF, Part);
4646 
4647         for (unsigned Lane = 0; Lane < Lanes; ++Lane) {
4648           Value *Idx = Builder.CreateAdd(
4649               PartStart, ConstantInt::get(PtrInd->getType(), Lane));
4650           Value *GlobalIdx = Builder.CreateAdd(PtrInd, Idx);
4651           Value *SclrGep =
4652               emitTransformedIndex(Builder, GlobalIdx, PSE.getSE(), DL, II);
4653           SclrGep->setName("next.gep");
4654           State.set(PhiR, SclrGep, VPIteration(Part, Lane));
4655         }
4656       }
4657       return;
4658     }
4659     assert(isa<SCEVConstant>(II.getStep()) &&
4660            "Induction step not a SCEV constant!");
4661     Type *PhiType = II.getStep()->getType();
4662 
4663     // Build a pointer phi
4664     Value *ScalarStartValue = II.getStartValue();
4665     Type *ScStValueType = ScalarStartValue->getType();
4666     PHINode *NewPointerPhi =
4667         PHINode::Create(ScStValueType, 2, "pointer.phi", Induction);
4668     NewPointerPhi->addIncoming(ScalarStartValue, LoopVectorPreHeader);
4669 
4670     // A pointer induction, performed by using a gep
4671     BasicBlock *LoopLatch = LI->getLoopFor(LoopVectorBody)->getLoopLatch();
4672     Instruction *InductionLoc = LoopLatch->getTerminator();
4673     const SCEV *ScalarStep = II.getStep();
4674     SCEVExpander Exp(*PSE.getSE(), DL, "induction");
4675     Value *ScalarStepValue =
4676         Exp.expandCodeFor(ScalarStep, PhiType, InductionLoc);
4677     Value *RuntimeVF = getRuntimeVF(Builder, PhiType, VF);
4678     Value *NumUnrolledElems =
4679         Builder.CreateMul(RuntimeVF, ConstantInt::get(PhiType, State.UF));
4680     Value *InductionGEP = GetElementPtrInst::Create(
4681         II.getElementType(), NewPointerPhi,
4682         Builder.CreateMul(ScalarStepValue, NumUnrolledElems), "ptr.ind",
4683         InductionLoc);
4684     NewPointerPhi->addIncoming(InductionGEP, LoopLatch);
4685 
4686     // Create UF many actual address geps that use the pointer
4687     // phi as base and a vectorized version of the step value
4688     // (<step*0, ..., step*N>) as offset.
4689     for (unsigned Part = 0; Part < State.UF; ++Part) {
4690       Type *VecPhiType = VectorType::get(PhiType, State.VF);
4691       Value *StartOffsetScalar =
4692           Builder.CreateMul(RuntimeVF, ConstantInt::get(PhiType, Part));
4693       Value *StartOffset =
4694           Builder.CreateVectorSplat(State.VF, StartOffsetScalar);
4695       // Create a vector of consecutive numbers from zero to VF.
4696       StartOffset =
4697           Builder.CreateAdd(StartOffset, Builder.CreateStepVector(VecPhiType));
4698 
4699       Value *GEP = Builder.CreateGEP(
4700           II.getElementType(), NewPointerPhi,
4701           Builder.CreateMul(
4702               StartOffset, Builder.CreateVectorSplat(State.VF, ScalarStepValue),
4703               "vector.gep"));
4704       State.set(PhiR, GEP, Part);
4705     }
4706   }
4707   }
4708 }
4709 
4710 /// A helper function for checking whether an integer division-related
4711 /// instruction may divide by zero (in which case it must be predicated if
4712 /// executed conditionally in the scalar code).
4713 /// TODO: It may be worthwhile to generalize and check isKnownNonZero().
4714 /// Non-zero divisors that are non compile-time constants will not be
4715 /// converted into multiplication, so we will still end up scalarizing
4716 /// the division, but can do so w/o predication.
4717 static bool mayDivideByZero(Instruction &I) {
4718   assert((I.getOpcode() == Instruction::UDiv ||
4719           I.getOpcode() == Instruction::SDiv ||
4720           I.getOpcode() == Instruction::URem ||
4721           I.getOpcode() == Instruction::SRem) &&
4722          "Unexpected instruction");
4723   Value *Divisor = I.getOperand(1);
4724   auto *CInt = dyn_cast<ConstantInt>(Divisor);
4725   return !CInt || CInt->isZero();
4726 }
4727 
4728 void InnerLoopVectorizer::widenCallInstruction(CallInst &I, VPValue *Def,
4729                                                VPUser &ArgOperands,
4730                                                VPTransformState &State) {
4731   assert(!isa<DbgInfoIntrinsic>(I) &&
4732          "DbgInfoIntrinsic should have been dropped during VPlan construction");
4733   setDebugLocFromInst(&I);
4734 
4735   Module *M = I.getParent()->getParent()->getParent();
4736   auto *CI = cast<CallInst>(&I);
4737 
4738   SmallVector<Type *, 4> Tys;
4739   for (Value *ArgOperand : CI->args())
4740     Tys.push_back(ToVectorTy(ArgOperand->getType(), VF.getKnownMinValue()));
4741 
4742   Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
4743 
4744   // The flag shows whether we use Intrinsic or a usual Call for vectorized
4745   // version of the instruction.
4746   // Is it beneficial to perform intrinsic call compared to lib call?
4747   bool NeedToScalarize = false;
4748   InstructionCost CallCost = Cost->getVectorCallCost(CI, VF, NeedToScalarize);
4749   InstructionCost IntrinsicCost = ID ? Cost->getVectorIntrinsicCost(CI, VF) : 0;
4750   bool UseVectorIntrinsic = ID && IntrinsicCost <= CallCost;
4751   assert((UseVectorIntrinsic || !NeedToScalarize) &&
4752          "Instruction should be scalarized elsewhere.");
4753   assert((IntrinsicCost.isValid() || CallCost.isValid()) &&
4754          "Either the intrinsic cost or vector call cost must be valid");
4755 
4756   for (unsigned Part = 0; Part < UF; ++Part) {
4757     SmallVector<Type *, 2> TysForDecl = {CI->getType()};
4758     SmallVector<Value *, 4> Args;
4759     for (auto &I : enumerate(ArgOperands.operands())) {
4760       // Some intrinsics have a scalar argument - don't replace it with a
4761       // vector.
4762       Value *Arg;
4763       if (!UseVectorIntrinsic || !hasVectorInstrinsicScalarOpd(ID, I.index()))
4764         Arg = State.get(I.value(), Part);
4765       else {
4766         Arg = State.get(I.value(), VPIteration(0, 0));
4767         if (hasVectorInstrinsicOverloadedScalarOpd(ID, I.index()))
4768           TysForDecl.push_back(Arg->getType());
4769       }
4770       Args.push_back(Arg);
4771     }
4772 
4773     Function *VectorF;
4774     if (UseVectorIntrinsic) {
4775       // Use vector version of the intrinsic.
4776       if (VF.isVector())
4777         TysForDecl[0] = VectorType::get(CI->getType()->getScalarType(), VF);
4778       VectorF = Intrinsic::getDeclaration(M, ID, TysForDecl);
4779       assert(VectorF && "Can't retrieve vector intrinsic.");
4780     } else {
4781       // Use vector version of the function call.
4782       const VFShape Shape = VFShape::get(*CI, VF, false /*HasGlobalPred*/);
4783 #ifndef NDEBUG
4784       assert(VFDatabase(*CI).getVectorizedFunction(Shape) != nullptr &&
4785              "Can't create vector function.");
4786 #endif
4787         VectorF = VFDatabase(*CI).getVectorizedFunction(Shape);
4788     }
4789       SmallVector<OperandBundleDef, 1> OpBundles;
4790       CI->getOperandBundlesAsDefs(OpBundles);
4791       CallInst *V = Builder.CreateCall(VectorF, Args, OpBundles);
4792 
4793       if (isa<FPMathOperator>(V))
4794         V->copyFastMathFlags(CI);
4795 
4796       State.set(Def, V, Part);
4797       addMetadata(V, &I);
4798   }
4799 }
4800 
4801 void LoopVectorizationCostModel::collectLoopScalars(ElementCount VF) {
4802   // We should not collect Scalars more than once per VF. Right now, this
4803   // function is called from collectUniformsAndScalars(), which already does
4804   // this check. Collecting Scalars for VF=1 does not make any sense.
4805   assert(VF.isVector() && Scalars.find(VF) == Scalars.end() &&
4806          "This function should not be visited twice for the same VF");
4807 
4808   SmallSetVector<Instruction *, 8> Worklist;
4809 
4810   // These sets are used to seed the analysis with pointers used by memory
4811   // accesses that will remain scalar.
4812   SmallSetVector<Instruction *, 8> ScalarPtrs;
4813   SmallPtrSet<Instruction *, 8> PossibleNonScalarPtrs;
4814   auto *Latch = TheLoop->getLoopLatch();
4815 
4816   // A helper that returns true if the use of Ptr by MemAccess will be scalar.
4817   // The pointer operands of loads and stores will be scalar as long as the
4818   // memory access is not a gather or scatter operation. The value operand of a
4819   // store will remain scalar if the store is scalarized.
4820   auto isScalarUse = [&](Instruction *MemAccess, Value *Ptr) {
4821     InstWidening WideningDecision = getWideningDecision(MemAccess, VF);
4822     assert(WideningDecision != CM_Unknown &&
4823            "Widening decision should be ready at this moment");
4824     if (auto *Store = dyn_cast<StoreInst>(MemAccess))
4825       if (Ptr == Store->getValueOperand())
4826         return WideningDecision == CM_Scalarize;
4827     assert(Ptr == getLoadStorePointerOperand(MemAccess) &&
4828            "Ptr is neither a value or pointer operand");
4829     return WideningDecision != CM_GatherScatter;
4830   };
4831 
4832   // A helper that returns true if the given value is a bitcast or
4833   // getelementptr instruction contained in the loop.
4834   auto isLoopVaryingBitCastOrGEP = [&](Value *V) {
4835     return ((isa<BitCastInst>(V) && V->getType()->isPointerTy()) ||
4836             isa<GetElementPtrInst>(V)) &&
4837            !TheLoop->isLoopInvariant(V);
4838   };
4839 
4840   // A helper that evaluates a memory access's use of a pointer. If the use will
4841   // be a scalar use and the pointer is only used by memory accesses, we place
4842   // the pointer in ScalarPtrs. Otherwise, the pointer is placed in
4843   // PossibleNonScalarPtrs.
4844   auto evaluatePtrUse = [&](Instruction *MemAccess, Value *Ptr) {
4845     // We only care about bitcast and getelementptr instructions contained in
4846     // the loop.
4847     if (!isLoopVaryingBitCastOrGEP(Ptr))
4848       return;
4849 
4850     // If the pointer has already been identified as scalar (e.g., if it was
4851     // also identified as uniform), there's nothing to do.
4852     auto *I = cast<Instruction>(Ptr);
4853     if (Worklist.count(I))
4854       return;
4855 
4856     // If the use of the pointer will be a scalar use, and all users of the
4857     // pointer are memory accesses, place the pointer in ScalarPtrs. Otherwise,
4858     // place the pointer in PossibleNonScalarPtrs.
4859     if (isScalarUse(MemAccess, Ptr) && llvm::all_of(I->users(), [&](User *U) {
4860           return isa<LoadInst>(U) || isa<StoreInst>(U);
4861         }))
4862       ScalarPtrs.insert(I);
4863     else
4864       PossibleNonScalarPtrs.insert(I);
4865   };
4866 
4867   // We seed the scalars analysis with three classes of instructions: (1)
4868   // instructions marked uniform-after-vectorization and (2) bitcast,
4869   // getelementptr and (pointer) phi instructions used by memory accesses
4870   // requiring a scalar use.
4871   //
4872   // (1) Add to the worklist all instructions that have been identified as
4873   // uniform-after-vectorization.
4874   Worklist.insert(Uniforms[VF].begin(), Uniforms[VF].end());
4875 
4876   // (2) Add to the worklist all bitcast and getelementptr instructions used by
4877   // memory accesses requiring a scalar use. The pointer operands of loads and
4878   // stores will be scalar as long as the memory accesses is not a gather or
4879   // scatter operation. The value operand of a store will remain scalar if the
4880   // store is scalarized.
4881   for (auto *BB : TheLoop->blocks())
4882     for (auto &I : *BB) {
4883       if (auto *Load = dyn_cast<LoadInst>(&I)) {
4884         evaluatePtrUse(Load, Load->getPointerOperand());
4885       } else if (auto *Store = dyn_cast<StoreInst>(&I)) {
4886         evaluatePtrUse(Store, Store->getPointerOperand());
4887         evaluatePtrUse(Store, Store->getValueOperand());
4888       }
4889     }
4890   for (auto *I : ScalarPtrs)
4891     if (!PossibleNonScalarPtrs.count(I)) {
4892       LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *I << "\n");
4893       Worklist.insert(I);
4894     }
4895 
4896   // Insert the forced scalars.
4897   // FIXME: Currently widenPHIInstruction() often creates a dead vector
4898   // induction variable when the PHI user is scalarized.
4899   auto ForcedScalar = ForcedScalars.find(VF);
4900   if (ForcedScalar != ForcedScalars.end())
4901     for (auto *I : ForcedScalar->second)
4902       Worklist.insert(I);
4903 
4904   // Expand the worklist by looking through any bitcasts and getelementptr
4905   // instructions we've already identified as scalar. This is similar to the
4906   // expansion step in collectLoopUniforms(); however, here we're only
4907   // expanding to include additional bitcasts and getelementptr instructions.
4908   unsigned Idx = 0;
4909   while (Idx != Worklist.size()) {
4910     Instruction *Dst = Worklist[Idx++];
4911     if (!isLoopVaryingBitCastOrGEP(Dst->getOperand(0)))
4912       continue;
4913     auto *Src = cast<Instruction>(Dst->getOperand(0));
4914     if (llvm::all_of(Src->users(), [&](User *U) -> bool {
4915           auto *J = cast<Instruction>(U);
4916           return !TheLoop->contains(J) || Worklist.count(J) ||
4917                  ((isa<LoadInst>(J) || isa<StoreInst>(J)) &&
4918                   isScalarUse(J, Src));
4919         })) {
4920       Worklist.insert(Src);
4921       LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Src << "\n");
4922     }
4923   }
4924 
4925   // An induction variable will remain scalar if all users of the induction
4926   // variable and induction variable update remain scalar.
4927   for (auto &Induction : Legal->getInductionVars()) {
4928     auto *Ind = Induction.first;
4929     auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
4930 
4931     // If tail-folding is applied, the primary induction variable will be used
4932     // to feed a vector compare.
4933     if (Ind == Legal->getPrimaryInduction() && foldTailByMasking())
4934       continue;
4935 
4936     // Returns true if \p Indvar is a pointer induction that is used directly by
4937     // load/store instruction \p I.
4938     auto IsDirectLoadStoreFromPtrIndvar = [&](Instruction *Indvar,
4939                                               Instruction *I) {
4940       return Induction.second.getKind() ==
4941                  InductionDescriptor::IK_PtrInduction &&
4942              (isa<LoadInst>(I) || isa<StoreInst>(I)) &&
4943              Indvar == getLoadStorePointerOperand(I) && isScalarUse(I, Indvar);
4944     };
4945 
4946     // Determine if all users of the induction variable are scalar after
4947     // vectorization.
4948     auto ScalarInd = llvm::all_of(Ind->users(), [&](User *U) -> bool {
4949       auto *I = cast<Instruction>(U);
4950       return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) ||
4951              IsDirectLoadStoreFromPtrIndvar(Ind, I);
4952     });
4953     if (!ScalarInd)
4954       continue;
4955 
4956     // Determine if all users of the induction variable update instruction are
4957     // scalar after vectorization.
4958     auto ScalarIndUpdate =
4959         llvm::all_of(IndUpdate->users(), [&](User *U) -> bool {
4960           auto *I = cast<Instruction>(U);
4961           return I == Ind || !TheLoop->contains(I) || Worklist.count(I) ||
4962                  IsDirectLoadStoreFromPtrIndvar(IndUpdate, I);
4963         });
4964     if (!ScalarIndUpdate)
4965       continue;
4966 
4967     // The induction variable and its update instruction will remain scalar.
4968     Worklist.insert(Ind);
4969     Worklist.insert(IndUpdate);
4970     LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *Ind << "\n");
4971     LLVM_DEBUG(dbgs() << "LV: Found scalar instruction: " << *IndUpdate
4972                       << "\n");
4973   }
4974 
4975   Scalars[VF].insert(Worklist.begin(), Worklist.end());
4976 }
4977 
4978 bool LoopVectorizationCostModel::isScalarWithPredication(Instruction *I) const {
4979   if (!blockNeedsPredicationForAnyReason(I->getParent()))
4980     return false;
4981   switch(I->getOpcode()) {
4982   default:
4983     break;
4984   case Instruction::Load:
4985   case Instruction::Store: {
4986     if (!Legal->isMaskRequired(I))
4987       return false;
4988     auto *Ptr = getLoadStorePointerOperand(I);
4989     auto *Ty = getLoadStoreType(I);
4990     const Align Alignment = getLoadStoreAlignment(I);
4991     return isa<LoadInst>(I) ? !(isLegalMaskedLoad(Ty, Ptr, Alignment) ||
4992                                 TTI.isLegalMaskedGather(Ty, Alignment))
4993                             : !(isLegalMaskedStore(Ty, Ptr, Alignment) ||
4994                                 TTI.isLegalMaskedScatter(Ty, Alignment));
4995   }
4996   case Instruction::UDiv:
4997   case Instruction::SDiv:
4998   case Instruction::SRem:
4999   case Instruction::URem:
5000     return mayDivideByZero(*I);
5001   }
5002   return false;
5003 }
5004 
5005 bool LoopVectorizationCostModel::interleavedAccessCanBeWidened(
5006     Instruction *I, ElementCount VF) {
5007   assert(isAccessInterleaved(I) && "Expecting interleaved access.");
5008   assert(getWideningDecision(I, VF) == CM_Unknown &&
5009          "Decision should not be set yet.");
5010   auto *Group = getInterleavedAccessGroup(I);
5011   assert(Group && "Must have a group.");
5012 
5013   // If the instruction's allocated size doesn't equal it's type size, it
5014   // requires padding and will be scalarized.
5015   auto &DL = I->getModule()->getDataLayout();
5016   auto *ScalarTy = getLoadStoreType(I);
5017   if (hasIrregularType(ScalarTy, DL))
5018     return false;
5019 
5020   // Check if masking is required.
5021   // A Group may need masking for one of two reasons: it resides in a block that
5022   // needs predication, or it was decided to use masking to deal with gaps
5023   // (either a gap at the end of a load-access that may result in a speculative
5024   // load, or any gaps in a store-access).
5025   bool PredicatedAccessRequiresMasking =
5026       blockNeedsPredicationForAnyReason(I->getParent()) &&
5027       Legal->isMaskRequired(I);
5028   bool LoadAccessWithGapsRequiresEpilogMasking =
5029       isa<LoadInst>(I) && Group->requiresScalarEpilogue() &&
5030       !isScalarEpilogueAllowed();
5031   bool StoreAccessWithGapsRequiresMasking =
5032       isa<StoreInst>(I) && (Group->getNumMembers() < Group->getFactor());
5033   if (!PredicatedAccessRequiresMasking &&
5034       !LoadAccessWithGapsRequiresEpilogMasking &&
5035       !StoreAccessWithGapsRequiresMasking)
5036     return true;
5037 
5038   // If masked interleaving is required, we expect that the user/target had
5039   // enabled it, because otherwise it either wouldn't have been created or
5040   // it should have been invalidated by the CostModel.
5041   assert(useMaskedInterleavedAccesses(TTI) &&
5042          "Masked interleave-groups for predicated accesses are not enabled.");
5043 
5044   if (Group->isReverse())
5045     return false;
5046 
5047   auto *Ty = getLoadStoreType(I);
5048   const Align Alignment = getLoadStoreAlignment(I);
5049   return isa<LoadInst>(I) ? TTI.isLegalMaskedLoad(Ty, Alignment)
5050                           : TTI.isLegalMaskedStore(Ty, Alignment);
5051 }
5052 
5053 bool LoopVectorizationCostModel::memoryInstructionCanBeWidened(
5054     Instruction *I, ElementCount VF) {
5055   // Get and ensure we have a valid memory instruction.
5056   assert((isa<LoadInst, StoreInst>(I)) && "Invalid memory instruction");
5057 
5058   auto *Ptr = getLoadStorePointerOperand(I);
5059   auto *ScalarTy = getLoadStoreType(I);
5060 
5061   // In order to be widened, the pointer should be consecutive, first of all.
5062   if (!Legal->isConsecutivePtr(ScalarTy, Ptr))
5063     return false;
5064 
5065   // If the instruction is a store located in a predicated block, it will be
5066   // scalarized.
5067   if (isScalarWithPredication(I))
5068     return false;
5069 
5070   // If the instruction's allocated size doesn't equal it's type size, it
5071   // requires padding and will be scalarized.
5072   auto &DL = I->getModule()->getDataLayout();
5073   if (hasIrregularType(ScalarTy, DL))
5074     return false;
5075 
5076   return true;
5077 }
5078 
5079 void LoopVectorizationCostModel::collectLoopUniforms(ElementCount VF) {
5080   // We should not collect Uniforms more than once per VF. Right now,
5081   // this function is called from collectUniformsAndScalars(), which
5082   // already does this check. Collecting Uniforms for VF=1 does not make any
5083   // sense.
5084 
5085   assert(VF.isVector() && Uniforms.find(VF) == Uniforms.end() &&
5086          "This function should not be visited twice for the same VF");
5087 
5088   // Visit the list of Uniforms. If we'll not find any uniform value, we'll
5089   // not analyze again.  Uniforms.count(VF) will return 1.
5090   Uniforms[VF].clear();
5091 
5092   // We now know that the loop is vectorizable!
5093   // Collect instructions inside the loop that will remain uniform after
5094   // vectorization.
5095 
5096   // Global values, params and instructions outside of current loop are out of
5097   // scope.
5098   auto isOutOfScope = [&](Value *V) -> bool {
5099     Instruction *I = dyn_cast<Instruction>(V);
5100     return (!I || !TheLoop->contains(I));
5101   };
5102 
5103   // Worklist containing uniform instructions demanding lane 0.
5104   SetVector<Instruction *> Worklist;
5105   BasicBlock *Latch = TheLoop->getLoopLatch();
5106 
5107   // Add uniform instructions demanding lane 0 to the worklist. Instructions
5108   // that are scalar with predication must not be considered uniform after
5109   // vectorization, because that would create an erroneous replicating region
5110   // where only a single instance out of VF should be formed.
5111   // TODO: optimize such seldom cases if found important, see PR40816.
5112   auto addToWorklistIfAllowed = [&](Instruction *I) -> void {
5113     if (isOutOfScope(I)) {
5114       LLVM_DEBUG(dbgs() << "LV: Found not uniform due to scope: "
5115                         << *I << "\n");
5116       return;
5117     }
5118     if (isScalarWithPredication(I)) {
5119       LLVM_DEBUG(dbgs() << "LV: Found not uniform being ScalarWithPredication: "
5120                         << *I << "\n");
5121       return;
5122     }
5123     LLVM_DEBUG(dbgs() << "LV: Found uniform instruction: " << *I << "\n");
5124     Worklist.insert(I);
5125   };
5126 
5127   // Start with the conditional branch. If the branch condition is an
5128   // instruction contained in the loop that is only used by the branch, it is
5129   // uniform.
5130   auto *Cmp = dyn_cast<Instruction>(Latch->getTerminator()->getOperand(0));
5131   if (Cmp && TheLoop->contains(Cmp) && Cmp->hasOneUse())
5132     addToWorklistIfAllowed(Cmp);
5133 
5134   auto isUniformDecision = [&](Instruction *I, ElementCount VF) {
5135     InstWidening WideningDecision = getWideningDecision(I, VF);
5136     assert(WideningDecision != CM_Unknown &&
5137            "Widening decision should be ready at this moment");
5138 
5139     // A uniform memory op is itself uniform.  We exclude uniform stores
5140     // here as they demand the last lane, not the first one.
5141     if (isa<LoadInst>(I) && Legal->isUniformMemOp(*I)) {
5142       assert(WideningDecision == CM_Scalarize);
5143       return true;
5144     }
5145 
5146     return (WideningDecision == CM_Widen ||
5147             WideningDecision == CM_Widen_Reverse ||
5148             WideningDecision == CM_Interleave);
5149   };
5150 
5151 
5152   // Returns true if Ptr is the pointer operand of a memory access instruction
5153   // I, and I is known to not require scalarization.
5154   auto isVectorizedMemAccessUse = [&](Instruction *I, Value *Ptr) -> bool {
5155     return getLoadStorePointerOperand(I) == Ptr && isUniformDecision(I, VF);
5156   };
5157 
5158   // Holds a list of values which are known to have at least one uniform use.
5159   // Note that there may be other uses which aren't uniform.  A "uniform use"
5160   // here is something which only demands lane 0 of the unrolled iterations;
5161   // it does not imply that all lanes produce the same value (e.g. this is not
5162   // the usual meaning of uniform)
5163   SetVector<Value *> HasUniformUse;
5164 
5165   // Scan the loop for instructions which are either a) known to have only
5166   // lane 0 demanded or b) are uses which demand only lane 0 of their operand.
5167   for (auto *BB : TheLoop->blocks())
5168     for (auto &I : *BB) {
5169       if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(&I)) {
5170         switch (II->getIntrinsicID()) {
5171         case Intrinsic::sideeffect:
5172         case Intrinsic::experimental_noalias_scope_decl:
5173         case Intrinsic::assume:
5174         case Intrinsic::lifetime_start:
5175         case Intrinsic::lifetime_end:
5176           if (TheLoop->hasLoopInvariantOperands(&I))
5177             addToWorklistIfAllowed(&I);
5178           break;
5179         default:
5180           break;
5181         }
5182       }
5183 
5184       // ExtractValue instructions must be uniform, because the operands are
5185       // known to be loop-invariant.
5186       if (auto *EVI = dyn_cast<ExtractValueInst>(&I)) {
5187         assert(isOutOfScope(EVI->getAggregateOperand()) &&
5188                "Expected aggregate value to be loop invariant");
5189         addToWorklistIfAllowed(EVI);
5190         continue;
5191       }
5192 
5193       // If there's no pointer operand, there's nothing to do.
5194       auto *Ptr = getLoadStorePointerOperand(&I);
5195       if (!Ptr)
5196         continue;
5197 
5198       // A uniform memory op is itself uniform.  We exclude uniform stores
5199       // here as they demand the last lane, not the first one.
5200       if (isa<LoadInst>(I) && Legal->isUniformMemOp(I))
5201         addToWorklistIfAllowed(&I);
5202 
5203       if (isUniformDecision(&I, VF)) {
5204         assert(isVectorizedMemAccessUse(&I, Ptr) && "consistency check");
5205         HasUniformUse.insert(Ptr);
5206       }
5207     }
5208 
5209   // Add to the worklist any operands which have *only* uniform (e.g. lane 0
5210   // demanding) users.  Since loops are assumed to be in LCSSA form, this
5211   // disallows uses outside the loop as well.
5212   for (auto *V : HasUniformUse) {
5213     if (isOutOfScope(V))
5214       continue;
5215     auto *I = cast<Instruction>(V);
5216     auto UsersAreMemAccesses =
5217       llvm::all_of(I->users(), [&](User *U) -> bool {
5218         return isVectorizedMemAccessUse(cast<Instruction>(U), V);
5219       });
5220     if (UsersAreMemAccesses)
5221       addToWorklistIfAllowed(I);
5222   }
5223 
5224   // Expand Worklist in topological order: whenever a new instruction
5225   // is added , its users should be already inside Worklist.  It ensures
5226   // a uniform instruction will only be used by uniform instructions.
5227   unsigned idx = 0;
5228   while (idx != Worklist.size()) {
5229     Instruction *I = Worklist[idx++];
5230 
5231     for (auto OV : I->operand_values()) {
5232       // isOutOfScope operands cannot be uniform instructions.
5233       if (isOutOfScope(OV))
5234         continue;
5235       // First order recurrence Phi's should typically be considered
5236       // non-uniform.
5237       auto *OP = dyn_cast<PHINode>(OV);
5238       if (OP && Legal->isFirstOrderRecurrence(OP))
5239         continue;
5240       // If all the users of the operand are uniform, then add the
5241       // operand into the uniform worklist.
5242       auto *OI = cast<Instruction>(OV);
5243       if (llvm::all_of(OI->users(), [&](User *U) -> bool {
5244             auto *J = cast<Instruction>(U);
5245             return Worklist.count(J) || isVectorizedMemAccessUse(J, OI);
5246           }))
5247         addToWorklistIfAllowed(OI);
5248     }
5249   }
5250 
5251   // For an instruction to be added into Worklist above, all its users inside
5252   // the loop should also be in Worklist. However, this condition cannot be
5253   // true for phi nodes that form a cyclic dependence. We must process phi
5254   // nodes separately. An induction variable will remain uniform if all users
5255   // of the induction variable and induction variable update remain uniform.
5256   // The code below handles both pointer and non-pointer induction variables.
5257   for (auto &Induction : Legal->getInductionVars()) {
5258     auto *Ind = Induction.first;
5259     auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
5260 
5261     // Determine if all users of the induction variable are uniform after
5262     // vectorization.
5263     auto UniformInd = llvm::all_of(Ind->users(), [&](User *U) -> bool {
5264       auto *I = cast<Instruction>(U);
5265       return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) ||
5266              isVectorizedMemAccessUse(I, Ind);
5267     });
5268     if (!UniformInd)
5269       continue;
5270 
5271     // Determine if all users of the induction variable update instruction are
5272     // uniform after vectorization.
5273     auto UniformIndUpdate =
5274         llvm::all_of(IndUpdate->users(), [&](User *U) -> bool {
5275           auto *I = cast<Instruction>(U);
5276           return I == Ind || !TheLoop->contains(I) || Worklist.count(I) ||
5277                  isVectorizedMemAccessUse(I, IndUpdate);
5278         });
5279     if (!UniformIndUpdate)
5280       continue;
5281 
5282     // The induction variable and its update instruction will remain uniform.
5283     addToWorklistIfAllowed(Ind);
5284     addToWorklistIfAllowed(IndUpdate);
5285   }
5286 
5287   Uniforms[VF].insert(Worklist.begin(), Worklist.end());
5288 }
5289 
5290 bool LoopVectorizationCostModel::runtimeChecksRequired() {
5291   LLVM_DEBUG(dbgs() << "LV: Performing code size checks.\n");
5292 
5293   if (Legal->getRuntimePointerChecking()->Need) {
5294     reportVectorizationFailure("Runtime ptr check is required with -Os/-Oz",
5295         "runtime pointer checks needed. Enable vectorization of this "
5296         "loop with '#pragma clang loop vectorize(enable)' when "
5297         "compiling with -Os/-Oz",
5298         "CantVersionLoopWithOptForSize", ORE, TheLoop);
5299     return true;
5300   }
5301 
5302   if (!PSE.getUnionPredicate().getPredicates().empty()) {
5303     reportVectorizationFailure("Runtime SCEV check is required with -Os/-Oz",
5304         "runtime SCEV checks needed. Enable vectorization of this "
5305         "loop with '#pragma clang loop vectorize(enable)' when "
5306         "compiling with -Os/-Oz",
5307         "CantVersionLoopWithOptForSize", ORE, TheLoop);
5308     return true;
5309   }
5310 
5311   // FIXME: Avoid specializing for stride==1 instead of bailing out.
5312   if (!Legal->getLAI()->getSymbolicStrides().empty()) {
5313     reportVectorizationFailure("Runtime stride check for small trip count",
5314         "runtime stride == 1 checks needed. Enable vectorization of "
5315         "this loop without such check by compiling with -Os/-Oz",
5316         "CantVersionLoopWithOptForSize", ORE, TheLoop);
5317     return true;
5318   }
5319 
5320   return false;
5321 }
5322 
5323 ElementCount
5324 LoopVectorizationCostModel::getMaxLegalScalableVF(unsigned MaxSafeElements) {
5325   if (!TTI.supportsScalableVectors() && !ForceTargetSupportsScalableVectors)
5326     return ElementCount::getScalable(0);
5327 
5328   if (Hints->isScalableVectorizationDisabled()) {
5329     reportVectorizationInfo("Scalable vectorization is explicitly disabled",
5330                             "ScalableVectorizationDisabled", ORE, TheLoop);
5331     return ElementCount::getScalable(0);
5332   }
5333 
5334   LLVM_DEBUG(dbgs() << "LV: Scalable vectorization is available\n");
5335 
5336   auto MaxScalableVF = ElementCount::getScalable(
5337       std::numeric_limits<ElementCount::ScalarTy>::max());
5338 
5339   // Test that the loop-vectorizer can legalize all operations for this MaxVF.
5340   // FIXME: While for scalable vectors this is currently sufficient, this should
5341   // be replaced by a more detailed mechanism that filters out specific VFs,
5342   // instead of invalidating vectorization for a whole set of VFs based on the
5343   // MaxVF.
5344 
5345   // Disable scalable vectorization if the loop contains unsupported reductions.
5346   if (!canVectorizeReductions(MaxScalableVF)) {
5347     reportVectorizationInfo(
5348         "Scalable vectorization not supported for the reduction "
5349         "operations found in this loop.",
5350         "ScalableVFUnfeasible", ORE, TheLoop);
5351     return ElementCount::getScalable(0);
5352   }
5353 
5354   // Disable scalable vectorization if the loop contains any instructions
5355   // with element types not supported for scalable vectors.
5356   if (any_of(ElementTypesInLoop, [&](Type *Ty) {
5357         return !Ty->isVoidTy() &&
5358                !this->TTI.isElementTypeLegalForScalableVector(Ty);
5359       })) {
5360     reportVectorizationInfo("Scalable vectorization is not supported "
5361                             "for all element types found in this loop.",
5362                             "ScalableVFUnfeasible", ORE, TheLoop);
5363     return ElementCount::getScalable(0);
5364   }
5365 
5366   if (Legal->isSafeForAnyVectorWidth())
5367     return MaxScalableVF;
5368 
5369   // Limit MaxScalableVF by the maximum safe dependence distance.
5370   Optional<unsigned> MaxVScale = TTI.getMaxVScale();
5371   if (!MaxVScale && TheFunction->hasFnAttribute(Attribute::VScaleRange)) {
5372     unsigned VScaleMax = TheFunction->getFnAttribute(Attribute::VScaleRange)
5373                              .getVScaleRangeArgs()
5374                              .second;
5375     if (VScaleMax > 0)
5376       MaxVScale = VScaleMax;
5377   }
5378   MaxScalableVF = ElementCount::getScalable(
5379       MaxVScale ? (MaxSafeElements / MaxVScale.getValue()) : 0);
5380   if (!MaxScalableVF)
5381     reportVectorizationInfo(
5382         "Max legal vector width too small, scalable vectorization "
5383         "unfeasible.",
5384         "ScalableVFUnfeasible", ORE, TheLoop);
5385 
5386   return MaxScalableVF;
5387 }
5388 
5389 FixedScalableVFPair
5390 LoopVectorizationCostModel::computeFeasibleMaxVF(unsigned ConstTripCount,
5391                                                  ElementCount UserVF) {
5392   MinBWs = computeMinimumValueSizes(TheLoop->getBlocks(), *DB, &TTI);
5393   unsigned SmallestType, WidestType;
5394   std::tie(SmallestType, WidestType) = getSmallestAndWidestTypes();
5395 
5396   // Get the maximum safe dependence distance in bits computed by LAA.
5397   // It is computed by MaxVF * sizeOf(type) * 8, where type is taken from
5398   // the memory accesses that is most restrictive (involved in the smallest
5399   // dependence distance).
5400   unsigned MaxSafeElements =
5401       PowerOf2Floor(Legal->getMaxSafeVectorWidthInBits() / WidestType);
5402 
5403   auto MaxSafeFixedVF = ElementCount::getFixed(MaxSafeElements);
5404   auto MaxSafeScalableVF = getMaxLegalScalableVF(MaxSafeElements);
5405 
5406   LLVM_DEBUG(dbgs() << "LV: The max safe fixed VF is: " << MaxSafeFixedVF
5407                     << ".\n");
5408   LLVM_DEBUG(dbgs() << "LV: The max safe scalable VF is: " << MaxSafeScalableVF
5409                     << ".\n");
5410 
5411   // First analyze the UserVF, fall back if the UserVF should be ignored.
5412   if (UserVF) {
5413     auto MaxSafeUserVF =
5414         UserVF.isScalable() ? MaxSafeScalableVF : MaxSafeFixedVF;
5415 
5416     if (ElementCount::isKnownLE(UserVF, MaxSafeUserVF)) {
5417       // If `VF=vscale x N` is safe, then so is `VF=N`
5418       if (UserVF.isScalable())
5419         return FixedScalableVFPair(
5420             ElementCount::getFixed(UserVF.getKnownMinValue()), UserVF);
5421       else
5422         return UserVF;
5423     }
5424 
5425     assert(ElementCount::isKnownGT(UserVF, MaxSafeUserVF));
5426 
5427     // Only clamp if the UserVF is not scalable. If the UserVF is scalable, it
5428     // is better to ignore the hint and let the compiler choose a suitable VF.
5429     if (!UserVF.isScalable()) {
5430       LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF
5431                         << " is unsafe, clamping to max safe VF="
5432                         << MaxSafeFixedVF << ".\n");
5433       ORE->emit([&]() {
5434         return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor",
5435                                           TheLoop->getStartLoc(),
5436                                           TheLoop->getHeader())
5437                << "User-specified vectorization factor "
5438                << ore::NV("UserVectorizationFactor", UserVF)
5439                << " is unsafe, clamping to maximum safe vectorization factor "
5440                << ore::NV("VectorizationFactor", MaxSafeFixedVF);
5441       });
5442       return MaxSafeFixedVF;
5443     }
5444 
5445     if (!TTI.supportsScalableVectors() && !ForceTargetSupportsScalableVectors) {
5446       LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF
5447                         << " is ignored because scalable vectors are not "
5448                            "available.\n");
5449       ORE->emit([&]() {
5450         return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor",
5451                                           TheLoop->getStartLoc(),
5452                                           TheLoop->getHeader())
5453                << "User-specified vectorization factor "
5454                << ore::NV("UserVectorizationFactor", UserVF)
5455                << " is ignored because the target does not support scalable "
5456                   "vectors. The compiler will pick a more suitable value.";
5457       });
5458     } else {
5459       LLVM_DEBUG(dbgs() << "LV: User VF=" << UserVF
5460                         << " is unsafe. Ignoring scalable UserVF.\n");
5461       ORE->emit([&]() {
5462         return OptimizationRemarkAnalysis(DEBUG_TYPE, "VectorizationFactor",
5463                                           TheLoop->getStartLoc(),
5464                                           TheLoop->getHeader())
5465                << "User-specified vectorization factor "
5466                << ore::NV("UserVectorizationFactor", UserVF)
5467                << " is unsafe. Ignoring the hint to let the compiler pick a "
5468                   "more suitable value.";
5469       });
5470     }
5471   }
5472 
5473   LLVM_DEBUG(dbgs() << "LV: The Smallest and Widest types: " << SmallestType
5474                     << " / " << WidestType << " bits.\n");
5475 
5476   FixedScalableVFPair Result(ElementCount::getFixed(1),
5477                              ElementCount::getScalable(0));
5478   if (auto MaxVF = getMaximizedVFForTarget(ConstTripCount, SmallestType,
5479                                            WidestType, MaxSafeFixedVF))
5480     Result.FixedVF = MaxVF;
5481 
5482   if (auto MaxVF = getMaximizedVFForTarget(ConstTripCount, SmallestType,
5483                                            WidestType, MaxSafeScalableVF))
5484     if (MaxVF.isScalable()) {
5485       Result.ScalableVF = MaxVF;
5486       LLVM_DEBUG(dbgs() << "LV: Found feasible scalable VF = " << MaxVF
5487                         << "\n");
5488     }
5489 
5490   return Result;
5491 }
5492 
5493 FixedScalableVFPair
5494 LoopVectorizationCostModel::computeMaxVF(ElementCount UserVF, unsigned UserIC) {
5495   if (Legal->getRuntimePointerChecking()->Need && TTI.hasBranchDivergence()) {
5496     // TODO: It may by useful to do since it's still likely to be dynamically
5497     // uniform if the target can skip.
5498     reportVectorizationFailure(
5499         "Not inserting runtime ptr check for divergent target",
5500         "runtime pointer checks needed. Not enabled for divergent target",
5501         "CantVersionLoopWithDivergentTarget", ORE, TheLoop);
5502     return FixedScalableVFPair::getNone();
5503   }
5504 
5505   unsigned TC = PSE.getSE()->getSmallConstantTripCount(TheLoop);
5506   LLVM_DEBUG(dbgs() << "LV: Found trip count: " << TC << '\n');
5507   if (TC == 1) {
5508     reportVectorizationFailure("Single iteration (non) loop",
5509         "loop trip count is one, irrelevant for vectorization",
5510         "SingleIterationLoop", ORE, TheLoop);
5511     return FixedScalableVFPair::getNone();
5512   }
5513 
5514   switch (ScalarEpilogueStatus) {
5515   case CM_ScalarEpilogueAllowed:
5516     return computeFeasibleMaxVF(TC, UserVF);
5517   case CM_ScalarEpilogueNotAllowedUsePredicate:
5518     LLVM_FALLTHROUGH;
5519   case CM_ScalarEpilogueNotNeededUsePredicate:
5520     LLVM_DEBUG(
5521         dbgs() << "LV: vector predicate hint/switch found.\n"
5522                << "LV: Not allowing scalar epilogue, creating predicated "
5523                << "vector loop.\n");
5524     break;
5525   case CM_ScalarEpilogueNotAllowedLowTripLoop:
5526     // fallthrough as a special case of OptForSize
5527   case CM_ScalarEpilogueNotAllowedOptSize:
5528     if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedOptSize)
5529       LLVM_DEBUG(
5530           dbgs() << "LV: Not allowing scalar epilogue due to -Os/-Oz.\n");
5531     else
5532       LLVM_DEBUG(dbgs() << "LV: Not allowing scalar epilogue due to low trip "
5533                         << "count.\n");
5534 
5535     // Bail if runtime checks are required, which are not good when optimising
5536     // for size.
5537     if (runtimeChecksRequired())
5538       return FixedScalableVFPair::getNone();
5539 
5540     break;
5541   }
5542 
5543   // The only loops we can vectorize without a scalar epilogue, are loops with
5544   // a bottom-test and a single exiting block. We'd have to handle the fact
5545   // that not every instruction executes on the last iteration.  This will
5546   // require a lane mask which varies through the vector loop body.  (TODO)
5547   if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch()) {
5548     // If there was a tail-folding hint/switch, but we can't fold the tail by
5549     // masking, fallback to a vectorization with a scalar epilogue.
5550     if (ScalarEpilogueStatus == CM_ScalarEpilogueNotNeededUsePredicate) {
5551       LLVM_DEBUG(dbgs() << "LV: Cannot fold tail by masking: vectorize with a "
5552                            "scalar epilogue instead.\n");
5553       ScalarEpilogueStatus = CM_ScalarEpilogueAllowed;
5554       return computeFeasibleMaxVF(TC, UserVF);
5555     }
5556     return FixedScalableVFPair::getNone();
5557   }
5558 
5559   // Now try the tail folding
5560 
5561   // Invalidate interleave groups that require an epilogue if we can't mask
5562   // the interleave-group.
5563   if (!useMaskedInterleavedAccesses(TTI)) {
5564     assert(WideningDecisions.empty() && Uniforms.empty() && Scalars.empty() &&
5565            "No decisions should have been taken at this point");
5566     // Note: There is no need to invalidate any cost modeling decisions here, as
5567     // non where taken so far.
5568     InterleaveInfo.invalidateGroupsRequiringScalarEpilogue();
5569   }
5570 
5571   FixedScalableVFPair MaxFactors = computeFeasibleMaxVF(TC, UserVF);
5572   // Avoid tail folding if the trip count is known to be a multiple of any VF
5573   // we chose.
5574   // FIXME: The condition below pessimises the case for fixed-width vectors,
5575   // when scalable VFs are also candidates for vectorization.
5576   if (MaxFactors.FixedVF.isVector() && !MaxFactors.ScalableVF) {
5577     ElementCount MaxFixedVF = MaxFactors.FixedVF;
5578     assert((UserVF.isNonZero() || isPowerOf2_32(MaxFixedVF.getFixedValue())) &&
5579            "MaxFixedVF must be a power of 2");
5580     unsigned MaxVFtimesIC = UserIC ? MaxFixedVF.getFixedValue() * UserIC
5581                                    : MaxFixedVF.getFixedValue();
5582     ScalarEvolution *SE = PSE.getSE();
5583     const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount();
5584     const SCEV *ExitCount = SE->getAddExpr(
5585         BackedgeTakenCount, SE->getOne(BackedgeTakenCount->getType()));
5586     const SCEV *Rem = SE->getURemExpr(
5587         SE->applyLoopGuards(ExitCount, TheLoop),
5588         SE->getConstant(BackedgeTakenCount->getType(), MaxVFtimesIC));
5589     if (Rem->isZero()) {
5590       // Accept MaxFixedVF if we do not have a tail.
5591       LLVM_DEBUG(dbgs() << "LV: No tail will remain for any chosen VF.\n");
5592       return MaxFactors;
5593     }
5594   }
5595 
5596   // For scalable vectors, don't use tail folding as this is currently not yet
5597   // supported. The code is likely to have ended up here if the tripcount is
5598   // low, in which case it makes sense not to use scalable vectors.
5599   if (MaxFactors.ScalableVF.isVector())
5600     MaxFactors.ScalableVF = ElementCount::getScalable(0);
5601 
5602   // If we don't know the precise trip count, or if the trip count that we
5603   // found modulo the vectorization factor is not zero, try to fold the tail
5604   // by masking.
5605   // FIXME: look for a smaller MaxVF that does divide TC rather than masking.
5606   if (Legal->prepareToFoldTailByMasking()) {
5607     FoldTailByMasking = true;
5608     return MaxFactors;
5609   }
5610 
5611   // If there was a tail-folding hint/switch, but we can't fold the tail by
5612   // masking, fallback to a vectorization with a scalar epilogue.
5613   if (ScalarEpilogueStatus == CM_ScalarEpilogueNotNeededUsePredicate) {
5614     LLVM_DEBUG(dbgs() << "LV: Cannot fold tail by masking: vectorize with a "
5615                          "scalar epilogue instead.\n");
5616     ScalarEpilogueStatus = CM_ScalarEpilogueAllowed;
5617     return MaxFactors;
5618   }
5619 
5620   if (ScalarEpilogueStatus == CM_ScalarEpilogueNotAllowedUsePredicate) {
5621     LLVM_DEBUG(dbgs() << "LV: Can't fold tail by masking: don't vectorize\n");
5622     return FixedScalableVFPair::getNone();
5623   }
5624 
5625   if (TC == 0) {
5626     reportVectorizationFailure(
5627         "Unable to calculate the loop count due to complex control flow",
5628         "unable to calculate the loop count due to complex control flow",
5629         "UnknownLoopCountComplexCFG", ORE, TheLoop);
5630     return FixedScalableVFPair::getNone();
5631   }
5632 
5633   reportVectorizationFailure(
5634       "Cannot optimize for size and vectorize at the same time.",
5635       "cannot optimize for size and vectorize at the same time. "
5636       "Enable vectorization of this loop with '#pragma clang loop "
5637       "vectorize(enable)' when compiling with -Os/-Oz",
5638       "NoTailLoopWithOptForSize", ORE, TheLoop);
5639   return FixedScalableVFPair::getNone();
5640 }
5641 
5642 ElementCount LoopVectorizationCostModel::getMaximizedVFForTarget(
5643     unsigned ConstTripCount, unsigned SmallestType, unsigned WidestType,
5644     const ElementCount &MaxSafeVF) {
5645   bool ComputeScalableMaxVF = MaxSafeVF.isScalable();
5646   TypeSize WidestRegister = TTI.getRegisterBitWidth(
5647       ComputeScalableMaxVF ? TargetTransformInfo::RGK_ScalableVector
5648                            : TargetTransformInfo::RGK_FixedWidthVector);
5649 
5650   // Convenience function to return the minimum of two ElementCounts.
5651   auto MinVF = [](const ElementCount &LHS, const ElementCount &RHS) {
5652     assert((LHS.isScalable() == RHS.isScalable()) &&
5653            "Scalable flags must match");
5654     return ElementCount::isKnownLT(LHS, RHS) ? LHS : RHS;
5655   };
5656 
5657   // Ensure MaxVF is a power of 2; the dependence distance bound may not be.
5658   // Note that both WidestRegister and WidestType may not be a powers of 2.
5659   auto MaxVectorElementCount = ElementCount::get(
5660       PowerOf2Floor(WidestRegister.getKnownMinSize() / WidestType),
5661       ComputeScalableMaxVF);
5662   MaxVectorElementCount = MinVF(MaxVectorElementCount, MaxSafeVF);
5663   LLVM_DEBUG(dbgs() << "LV: The Widest register safe to use is: "
5664                     << (MaxVectorElementCount * WidestType) << " bits.\n");
5665 
5666   if (!MaxVectorElementCount) {
5667     LLVM_DEBUG(dbgs() << "LV: The target has no "
5668                       << (ComputeScalableMaxVF ? "scalable" : "fixed")
5669                       << " vector registers.\n");
5670     return ElementCount::getFixed(1);
5671   }
5672 
5673   const auto TripCountEC = ElementCount::getFixed(ConstTripCount);
5674   if (ConstTripCount &&
5675       ElementCount::isKnownLE(TripCountEC, MaxVectorElementCount) &&
5676       isPowerOf2_32(ConstTripCount)) {
5677     // We need to clamp the VF to be the ConstTripCount. There is no point in
5678     // choosing a higher viable VF as done in the loop below. If
5679     // MaxVectorElementCount is scalable, we only fall back on a fixed VF when
5680     // the TC is less than or equal to the known number of lanes.
5681     LLVM_DEBUG(dbgs() << "LV: Clamping the MaxVF to the constant trip count: "
5682                       << ConstTripCount << "\n");
5683     return TripCountEC;
5684   }
5685 
5686   ElementCount MaxVF = MaxVectorElementCount;
5687   if (TTI.shouldMaximizeVectorBandwidth() ||
5688       (MaximizeBandwidth && isScalarEpilogueAllowed())) {
5689     auto MaxVectorElementCountMaxBW = ElementCount::get(
5690         PowerOf2Floor(WidestRegister.getKnownMinSize() / SmallestType),
5691         ComputeScalableMaxVF);
5692     MaxVectorElementCountMaxBW = MinVF(MaxVectorElementCountMaxBW, MaxSafeVF);
5693 
5694     // Collect all viable vectorization factors larger than the default MaxVF
5695     // (i.e. MaxVectorElementCount).
5696     SmallVector<ElementCount, 8> VFs;
5697     for (ElementCount VS = MaxVectorElementCount * 2;
5698          ElementCount::isKnownLE(VS, MaxVectorElementCountMaxBW); VS *= 2)
5699       VFs.push_back(VS);
5700 
5701     // For each VF calculate its register usage.
5702     auto RUs = calculateRegisterUsage(VFs);
5703 
5704     // Select the largest VF which doesn't require more registers than existing
5705     // ones.
5706     for (int i = RUs.size() - 1; i >= 0; --i) {
5707       bool Selected = true;
5708       for (auto &pair : RUs[i].MaxLocalUsers) {
5709         unsigned TargetNumRegisters = TTI.getNumberOfRegisters(pair.first);
5710         if (pair.second > TargetNumRegisters)
5711           Selected = false;
5712       }
5713       if (Selected) {
5714         MaxVF = VFs[i];
5715         break;
5716       }
5717     }
5718     if (ElementCount MinVF =
5719             TTI.getMinimumVF(SmallestType, ComputeScalableMaxVF)) {
5720       if (ElementCount::isKnownLT(MaxVF, MinVF)) {
5721         LLVM_DEBUG(dbgs() << "LV: Overriding calculated MaxVF(" << MaxVF
5722                           << ") with target's minimum: " << MinVF << '\n');
5723         MaxVF = MinVF;
5724       }
5725     }
5726   }
5727   return MaxVF;
5728 }
5729 
5730 bool LoopVectorizationCostModel::isMoreProfitable(
5731     const VectorizationFactor &A, const VectorizationFactor &B) const {
5732   InstructionCost CostA = A.Cost;
5733   InstructionCost CostB = B.Cost;
5734 
5735   unsigned MaxTripCount = PSE.getSE()->getSmallConstantMaxTripCount(TheLoop);
5736 
5737   if (!A.Width.isScalable() && !B.Width.isScalable() && FoldTailByMasking &&
5738       MaxTripCount) {
5739     // If we are folding the tail and the trip count is a known (possibly small)
5740     // constant, the trip count will be rounded up to an integer number of
5741     // iterations. The total cost will be PerIterationCost*ceil(TripCount/VF),
5742     // which we compare directly. When not folding the tail, the total cost will
5743     // be PerIterationCost*floor(TC/VF) + Scalar remainder cost, and so is
5744     // approximated with the per-lane cost below instead of using the tripcount
5745     // as here.
5746     auto RTCostA = CostA * divideCeil(MaxTripCount, A.Width.getFixedValue());
5747     auto RTCostB = CostB * divideCeil(MaxTripCount, B.Width.getFixedValue());
5748     return RTCostA < RTCostB;
5749   }
5750 
5751   // Improve estimate for the vector width if it is scalable.
5752   unsigned EstimatedWidthA = A.Width.getKnownMinValue();
5753   unsigned EstimatedWidthB = B.Width.getKnownMinValue();
5754   if (Optional<unsigned> VScale = TTI.getVScaleForTuning()) {
5755     if (A.Width.isScalable())
5756       EstimatedWidthA *= VScale.getValue();
5757     if (B.Width.isScalable())
5758       EstimatedWidthB *= VScale.getValue();
5759   }
5760 
5761   // When set to preferred, for now assume vscale may be larger than 1 (or the
5762   // one being tuned for), so that scalable vectorization is slightly favorable
5763   // over fixed-width vectorization.
5764   if (Hints->isScalableVectorizationPreferred())
5765     if (A.Width.isScalable() && !B.Width.isScalable())
5766       return (CostA * B.Width.getFixedValue()) <= (CostB * EstimatedWidthA);
5767 
5768   // To avoid the need for FP division:
5769   //      (CostA / A.Width) < (CostB / B.Width)
5770   // <=>  (CostA * B.Width) < (CostB * A.Width)
5771   return (CostA * EstimatedWidthB) < (CostB * EstimatedWidthA);
5772 }
5773 
5774 VectorizationFactor LoopVectorizationCostModel::selectVectorizationFactor(
5775     const ElementCountSet &VFCandidates) {
5776   InstructionCost ExpectedCost = expectedCost(ElementCount::getFixed(1)).first;
5777   LLVM_DEBUG(dbgs() << "LV: Scalar loop costs: " << ExpectedCost << ".\n");
5778   assert(ExpectedCost.isValid() && "Unexpected invalid cost for scalar loop");
5779   assert(VFCandidates.count(ElementCount::getFixed(1)) &&
5780          "Expected Scalar VF to be a candidate");
5781 
5782   const VectorizationFactor ScalarCost(ElementCount::getFixed(1), ExpectedCost);
5783   VectorizationFactor ChosenFactor = ScalarCost;
5784 
5785   bool ForceVectorization = Hints->getForce() == LoopVectorizeHints::FK_Enabled;
5786   if (ForceVectorization && VFCandidates.size() > 1) {
5787     // Ignore scalar width, because the user explicitly wants vectorization.
5788     // Initialize cost to max so that VF = 2 is, at least, chosen during cost
5789     // evaluation.
5790     ChosenFactor.Cost = InstructionCost::getMax();
5791   }
5792 
5793   SmallVector<InstructionVFPair> InvalidCosts;
5794   for (const auto &i : VFCandidates) {
5795     // The cost for scalar VF=1 is already calculated, so ignore it.
5796     if (i.isScalar())
5797       continue;
5798 
5799     VectorizationCostTy C = expectedCost(i, &InvalidCosts);
5800     VectorizationFactor Candidate(i, C.first);
5801 
5802 #ifndef NDEBUG
5803     unsigned AssumedMinimumVscale = 1;
5804     if (Optional<unsigned> VScale = TTI.getVScaleForTuning())
5805       AssumedMinimumVscale = VScale.getValue();
5806     unsigned Width =
5807         Candidate.Width.isScalable()
5808             ? Candidate.Width.getKnownMinValue() * AssumedMinimumVscale
5809             : Candidate.Width.getFixedValue();
5810     LLVM_DEBUG(dbgs() << "LV: Vector loop of width " << i
5811                       << " costs: " << (Candidate.Cost / Width));
5812     if (i.isScalable())
5813       LLVM_DEBUG(dbgs() << " (assuming a minimum vscale of "
5814                         << AssumedMinimumVscale << ")");
5815     LLVM_DEBUG(dbgs() << ".\n");
5816 #endif
5817 
5818     if (!C.second && !ForceVectorization) {
5819       LLVM_DEBUG(
5820           dbgs() << "LV: Not considering vector loop of width " << i
5821                  << " because it will not generate any vector instructions.\n");
5822       continue;
5823     }
5824 
5825     // If profitable add it to ProfitableVF list.
5826     if (isMoreProfitable(Candidate, ScalarCost))
5827       ProfitableVFs.push_back(Candidate);
5828 
5829     if (isMoreProfitable(Candidate, ChosenFactor))
5830       ChosenFactor = Candidate;
5831   }
5832 
5833   // Emit a report of VFs with invalid costs in the loop.
5834   if (!InvalidCosts.empty()) {
5835     // Group the remarks per instruction, keeping the instruction order from
5836     // InvalidCosts.
5837     std::map<Instruction *, unsigned> Numbering;
5838     unsigned I = 0;
5839     for (auto &Pair : InvalidCosts)
5840       if (!Numbering.count(Pair.first))
5841         Numbering[Pair.first] = I++;
5842 
5843     // Sort the list, first on instruction(number) then on VF.
5844     llvm::sort(InvalidCosts,
5845                [&Numbering](InstructionVFPair &A, InstructionVFPair &B) {
5846                  if (Numbering[A.first] != Numbering[B.first])
5847                    return Numbering[A.first] < Numbering[B.first];
5848                  ElementCountComparator ECC;
5849                  return ECC(A.second, B.second);
5850                });
5851 
5852     // For a list of ordered instruction-vf pairs:
5853     //   [(load, vf1), (load, vf2), (store, vf1)]
5854     // Group the instructions together to emit separate remarks for:
5855     //   load  (vf1, vf2)
5856     //   store (vf1)
5857     auto Tail = ArrayRef<InstructionVFPair>(InvalidCosts);
5858     auto Subset = ArrayRef<InstructionVFPair>();
5859     do {
5860       if (Subset.empty())
5861         Subset = Tail.take_front(1);
5862 
5863       Instruction *I = Subset.front().first;
5864 
5865       // If the next instruction is different, or if there are no other pairs,
5866       // emit a remark for the collated subset. e.g.
5867       //   [(load, vf1), (load, vf2))]
5868       // to emit:
5869       //  remark: invalid costs for 'load' at VF=(vf, vf2)
5870       if (Subset == Tail || Tail[Subset.size()].first != I) {
5871         std::string OutString;
5872         raw_string_ostream OS(OutString);
5873         assert(!Subset.empty() && "Unexpected empty range");
5874         OS << "Instruction with invalid costs prevented vectorization at VF=(";
5875         for (auto &Pair : Subset)
5876           OS << (Pair.second == Subset.front().second ? "" : ", ")
5877              << Pair.second;
5878         OS << "):";
5879         if (auto *CI = dyn_cast<CallInst>(I))
5880           OS << " call to " << CI->getCalledFunction()->getName();
5881         else
5882           OS << " " << I->getOpcodeName();
5883         OS.flush();
5884         reportVectorizationInfo(OutString, "InvalidCost", ORE, TheLoop, I);
5885         Tail = Tail.drop_front(Subset.size());
5886         Subset = {};
5887       } else
5888         // Grow the subset by one element
5889         Subset = Tail.take_front(Subset.size() + 1);
5890     } while (!Tail.empty());
5891   }
5892 
5893   if (!EnableCondStoresVectorization && NumPredStores) {
5894     reportVectorizationFailure("There are conditional stores.",
5895         "store that is conditionally executed prevents vectorization",
5896         "ConditionalStore", ORE, TheLoop);
5897     ChosenFactor = ScalarCost;
5898   }
5899 
5900   LLVM_DEBUG(if (ForceVectorization && !ChosenFactor.Width.isScalar() &&
5901                  ChosenFactor.Cost >= ScalarCost.Cost) dbgs()
5902              << "LV: Vectorization seems to be not beneficial, "
5903              << "but was forced by a user.\n");
5904   LLVM_DEBUG(dbgs() << "LV: Selecting VF: " << ChosenFactor.Width << ".\n");
5905   return ChosenFactor;
5906 }
5907 
5908 bool LoopVectorizationCostModel::isCandidateForEpilogueVectorization(
5909     const Loop &L, ElementCount VF) const {
5910   // Cross iteration phis such as reductions need special handling and are
5911   // currently unsupported.
5912   if (any_of(L.getHeader()->phis(), [&](PHINode &Phi) {
5913         return Legal->isFirstOrderRecurrence(&Phi) ||
5914                Legal->isReductionVariable(&Phi);
5915       }))
5916     return false;
5917 
5918   // Phis with uses outside of the loop require special handling and are
5919   // currently unsupported.
5920   for (auto &Entry : Legal->getInductionVars()) {
5921     // Look for uses of the value of the induction at the last iteration.
5922     Value *PostInc = Entry.first->getIncomingValueForBlock(L.getLoopLatch());
5923     for (User *U : PostInc->users())
5924       if (!L.contains(cast<Instruction>(U)))
5925         return false;
5926     // Look for uses of penultimate value of the induction.
5927     for (User *U : Entry.first->users())
5928       if (!L.contains(cast<Instruction>(U)))
5929         return false;
5930   }
5931 
5932   // Induction variables that are widened require special handling that is
5933   // currently not supported.
5934   if (any_of(Legal->getInductionVars(), [&](auto &Entry) {
5935         return !(this->isScalarAfterVectorization(Entry.first, VF) ||
5936                  this->isProfitableToScalarize(Entry.first, VF));
5937       }))
5938     return false;
5939 
5940   // Epilogue vectorization code has not been auditted to ensure it handles
5941   // non-latch exits properly.  It may be fine, but it needs auditted and
5942   // tested.
5943   if (L.getExitingBlock() != L.getLoopLatch())
5944     return false;
5945 
5946   return true;
5947 }
5948 
5949 bool LoopVectorizationCostModel::isEpilogueVectorizationProfitable(
5950     const ElementCount VF) const {
5951   // FIXME: We need a much better cost-model to take different parameters such
5952   // as register pressure, code size increase and cost of extra branches into
5953   // account. For now we apply a very crude heuristic and only consider loops
5954   // with vectorization factors larger than a certain value.
5955   // We also consider epilogue vectorization unprofitable for targets that don't
5956   // consider interleaving beneficial (eg. MVE).
5957   if (TTI.getMaxInterleaveFactor(VF.getKnownMinValue()) <= 1)
5958     return false;
5959   if (VF.getFixedValue() >= EpilogueVectorizationMinVF)
5960     return true;
5961   return false;
5962 }
5963 
5964 VectorizationFactor
5965 LoopVectorizationCostModel::selectEpilogueVectorizationFactor(
5966     const ElementCount MainLoopVF, const LoopVectorizationPlanner &LVP) {
5967   VectorizationFactor Result = VectorizationFactor::Disabled();
5968   if (!EnableEpilogueVectorization) {
5969     LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization is disabled.\n";);
5970     return Result;
5971   }
5972 
5973   if (!isScalarEpilogueAllowed()) {
5974     LLVM_DEBUG(
5975         dbgs() << "LEV: Unable to vectorize epilogue because no epilogue is "
5976                   "allowed.\n";);
5977     return Result;
5978   }
5979 
5980   // Not really a cost consideration, but check for unsupported cases here to
5981   // simplify the logic.
5982   if (!isCandidateForEpilogueVectorization(*TheLoop, MainLoopVF)) {
5983     LLVM_DEBUG(
5984         dbgs() << "LEV: Unable to vectorize epilogue because the loop is "
5985                   "not a supported candidate.\n";);
5986     return Result;
5987   }
5988 
5989   if (EpilogueVectorizationForceVF > 1) {
5990     LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization factor is forced.\n";);
5991     ElementCount ForcedEC = ElementCount::getFixed(EpilogueVectorizationForceVF);
5992     if (LVP.hasPlanWithVF(ForcedEC))
5993       return {ForcedEC, 0};
5994     else {
5995       LLVM_DEBUG(
5996           dbgs()
5997               << "LEV: Epilogue vectorization forced factor is not viable.\n";);
5998       return Result;
5999     }
6000   }
6001 
6002   if (TheLoop->getHeader()->getParent()->hasOptSize() ||
6003       TheLoop->getHeader()->getParent()->hasMinSize()) {
6004     LLVM_DEBUG(
6005         dbgs()
6006             << "LEV: Epilogue vectorization skipped due to opt for size.\n";);
6007     return Result;
6008   }
6009 
6010   auto FixedMainLoopVF = ElementCount::getFixed(MainLoopVF.getKnownMinValue());
6011   if (MainLoopVF.isScalable())
6012     LLVM_DEBUG(
6013         dbgs() << "LEV: Epilogue vectorization using scalable vectors not "
6014                   "yet supported. Converting to fixed-width (VF="
6015                << FixedMainLoopVF << ") instead\n");
6016 
6017   if (!isEpilogueVectorizationProfitable(FixedMainLoopVF)) {
6018     LLVM_DEBUG(dbgs() << "LEV: Epilogue vectorization is not profitable for "
6019                          "this loop\n");
6020     return Result;
6021   }
6022 
6023   for (auto &NextVF : ProfitableVFs)
6024     if (ElementCount::isKnownLT(NextVF.Width, FixedMainLoopVF) &&
6025         (Result.Width.getFixedValue() == 1 ||
6026          isMoreProfitable(NextVF, Result)) &&
6027         LVP.hasPlanWithVF(NextVF.Width))
6028       Result = NextVF;
6029 
6030   if (Result != VectorizationFactor::Disabled())
6031     LLVM_DEBUG(dbgs() << "LEV: Vectorizing epilogue loop with VF = "
6032                       << Result.Width.getFixedValue() << "\n";);
6033   return Result;
6034 }
6035 
6036 std::pair<unsigned, unsigned>
6037 LoopVectorizationCostModel::getSmallestAndWidestTypes() {
6038   unsigned MinWidth = -1U;
6039   unsigned MaxWidth = 8;
6040   const DataLayout &DL = TheFunction->getParent()->getDataLayout();
6041   for (Type *T : ElementTypesInLoop) {
6042     MinWidth = std::min<unsigned>(
6043         MinWidth, DL.getTypeSizeInBits(T->getScalarType()).getFixedSize());
6044     MaxWidth = std::max<unsigned>(
6045         MaxWidth, DL.getTypeSizeInBits(T->getScalarType()).getFixedSize());
6046   }
6047   return {MinWidth, MaxWidth};
6048 }
6049 
6050 void LoopVectorizationCostModel::collectElementTypesForWidening() {
6051   ElementTypesInLoop.clear();
6052   // For each block.
6053   for (BasicBlock *BB : TheLoop->blocks()) {
6054     // For each instruction in the loop.
6055     for (Instruction &I : BB->instructionsWithoutDebug()) {
6056       Type *T = I.getType();
6057 
6058       // Skip ignored values.
6059       if (ValuesToIgnore.count(&I))
6060         continue;
6061 
6062       // Only examine Loads, Stores and PHINodes.
6063       if (!isa<LoadInst>(I) && !isa<StoreInst>(I) && !isa<PHINode>(I))
6064         continue;
6065 
6066       // Examine PHI nodes that are reduction variables. Update the type to
6067       // account for the recurrence type.
6068       if (auto *PN = dyn_cast<PHINode>(&I)) {
6069         if (!Legal->isReductionVariable(PN))
6070           continue;
6071         const RecurrenceDescriptor &RdxDesc = Legal->getReductionVars()[PN];
6072         if (PreferInLoopReductions || useOrderedReductions(RdxDesc) ||
6073             TTI.preferInLoopReduction(RdxDesc.getOpcode(),
6074                                       RdxDesc.getRecurrenceType(),
6075                                       TargetTransformInfo::ReductionFlags()))
6076           continue;
6077         T = RdxDesc.getRecurrenceType();
6078       }
6079 
6080       // Examine the stored values.
6081       if (auto *ST = dyn_cast<StoreInst>(&I))
6082         T = ST->getValueOperand()->getType();
6083 
6084       // Ignore loaded pointer types and stored pointer types that are not
6085       // vectorizable.
6086       //
6087       // FIXME: The check here attempts to predict whether a load or store will
6088       //        be vectorized. We only know this for certain after a VF has
6089       //        been selected. Here, we assume that if an access can be
6090       //        vectorized, it will be. We should also look at extending this
6091       //        optimization to non-pointer types.
6092       //
6093       if (T->isPointerTy() && !isConsecutiveLoadOrStore(&I) &&
6094           !isAccessInterleaved(&I) && !isLegalGatherOrScatter(&I))
6095         continue;
6096 
6097       ElementTypesInLoop.insert(T);
6098     }
6099   }
6100 }
6101 
6102 unsigned LoopVectorizationCostModel::selectInterleaveCount(ElementCount VF,
6103                                                            unsigned LoopCost) {
6104   // -- The interleave heuristics --
6105   // We interleave the loop in order to expose ILP and reduce the loop overhead.
6106   // There are many micro-architectural considerations that we can't predict
6107   // at this level. For example, frontend pressure (on decode or fetch) due to
6108   // code size, or the number and capabilities of the execution ports.
6109   //
6110   // We use the following heuristics to select the interleave count:
6111   // 1. If the code has reductions, then we interleave to break the cross
6112   // iteration dependency.
6113   // 2. If the loop is really small, then we interleave to reduce the loop
6114   // overhead.
6115   // 3. We don't interleave if we think that we will spill registers to memory
6116   // due to the increased register pressure.
6117 
6118   if (!isScalarEpilogueAllowed())
6119     return 1;
6120 
6121   // We used the distance for the interleave count.
6122   if (Legal->getMaxSafeDepDistBytes() != -1U)
6123     return 1;
6124 
6125   auto BestKnownTC = getSmallBestKnownTC(*PSE.getSE(), TheLoop);
6126   const bool HasReductions = !Legal->getReductionVars().empty();
6127   // Do not interleave loops with a relatively small known or estimated trip
6128   // count. But we will interleave when InterleaveSmallLoopScalarReduction is
6129   // enabled, and the code has scalar reductions(HasReductions && VF = 1),
6130   // because with the above conditions interleaving can expose ILP and break
6131   // cross iteration dependences for reductions.
6132   if (BestKnownTC && (*BestKnownTC < TinyTripCountInterleaveThreshold) &&
6133       !(InterleaveSmallLoopScalarReduction && HasReductions && VF.isScalar()))
6134     return 1;
6135 
6136   RegisterUsage R = calculateRegisterUsage({VF})[0];
6137   // We divide by these constants so assume that we have at least one
6138   // instruction that uses at least one register.
6139   for (auto& pair : R.MaxLocalUsers) {
6140     pair.second = std::max(pair.second, 1U);
6141   }
6142 
6143   // We calculate the interleave count using the following formula.
6144   // Subtract the number of loop invariants from the number of available
6145   // registers. These registers are used by all of the interleaved instances.
6146   // Next, divide the remaining registers by the number of registers that is
6147   // required by the loop, in order to estimate how many parallel instances
6148   // fit without causing spills. All of this is rounded down if necessary to be
6149   // a power of two. We want power of two interleave count to simplify any
6150   // addressing operations or alignment considerations.
6151   // We also want power of two interleave counts to ensure that the induction
6152   // variable of the vector loop wraps to zero, when tail is folded by masking;
6153   // this currently happens when OptForSize, in which case IC is set to 1 above.
6154   unsigned IC = UINT_MAX;
6155 
6156   for (auto& pair : R.MaxLocalUsers) {
6157     unsigned TargetNumRegisters = TTI.getNumberOfRegisters(pair.first);
6158     LLVM_DEBUG(dbgs() << "LV: The target has " << TargetNumRegisters
6159                       << " registers of "
6160                       << TTI.getRegisterClassName(pair.first) << " register class\n");
6161     if (VF.isScalar()) {
6162       if (ForceTargetNumScalarRegs.getNumOccurrences() > 0)
6163         TargetNumRegisters = ForceTargetNumScalarRegs;
6164     } else {
6165       if (ForceTargetNumVectorRegs.getNumOccurrences() > 0)
6166         TargetNumRegisters = ForceTargetNumVectorRegs;
6167     }
6168     unsigned MaxLocalUsers = pair.second;
6169     unsigned LoopInvariantRegs = 0;
6170     if (R.LoopInvariantRegs.find(pair.first) != R.LoopInvariantRegs.end())
6171       LoopInvariantRegs = R.LoopInvariantRegs[pair.first];
6172 
6173     unsigned TmpIC = PowerOf2Floor((TargetNumRegisters - LoopInvariantRegs) / MaxLocalUsers);
6174     // Don't count the induction variable as interleaved.
6175     if (EnableIndVarRegisterHeur) {
6176       TmpIC =
6177           PowerOf2Floor((TargetNumRegisters - LoopInvariantRegs - 1) /
6178                         std::max(1U, (MaxLocalUsers - 1)));
6179     }
6180 
6181     IC = std::min(IC, TmpIC);
6182   }
6183 
6184   // Clamp the interleave ranges to reasonable counts.
6185   unsigned MaxInterleaveCount =
6186       TTI.getMaxInterleaveFactor(VF.getKnownMinValue());
6187 
6188   // Check if the user has overridden the max.
6189   if (VF.isScalar()) {
6190     if (ForceTargetMaxScalarInterleaveFactor.getNumOccurrences() > 0)
6191       MaxInterleaveCount = ForceTargetMaxScalarInterleaveFactor;
6192   } else {
6193     if (ForceTargetMaxVectorInterleaveFactor.getNumOccurrences() > 0)
6194       MaxInterleaveCount = ForceTargetMaxVectorInterleaveFactor;
6195   }
6196 
6197   // If trip count is known or estimated compile time constant, limit the
6198   // interleave count to be less than the trip count divided by VF, provided it
6199   // is at least 1.
6200   //
6201   // For scalable vectors we can't know if interleaving is beneficial. It may
6202   // not be beneficial for small loops if none of the lanes in the second vector
6203   // iterations is enabled. However, for larger loops, there is likely to be a
6204   // similar benefit as for fixed-width vectors. For now, we choose to leave
6205   // the InterleaveCount as if vscale is '1', although if some information about
6206   // the vector is known (e.g. min vector size), we can make a better decision.
6207   if (BestKnownTC) {
6208     MaxInterleaveCount =
6209         std::min(*BestKnownTC / VF.getKnownMinValue(), MaxInterleaveCount);
6210     // Make sure MaxInterleaveCount is greater than 0.
6211     MaxInterleaveCount = std::max(1u, MaxInterleaveCount);
6212   }
6213 
6214   assert(MaxInterleaveCount > 0 &&
6215          "Maximum interleave count must be greater than 0");
6216 
6217   // Clamp the calculated IC to be between the 1 and the max interleave count
6218   // that the target and trip count allows.
6219   if (IC > MaxInterleaveCount)
6220     IC = MaxInterleaveCount;
6221   else
6222     // Make sure IC is greater than 0.
6223     IC = std::max(1u, IC);
6224 
6225   assert(IC > 0 && "Interleave count must be greater than 0.");
6226 
6227   // If we did not calculate the cost for VF (because the user selected the VF)
6228   // then we calculate the cost of VF here.
6229   if (LoopCost == 0) {
6230     InstructionCost C = expectedCost(VF).first;
6231     assert(C.isValid() && "Expected to have chosen a VF with valid cost");
6232     LoopCost = *C.getValue();
6233   }
6234 
6235   assert(LoopCost && "Non-zero loop cost expected");
6236 
6237   // Interleave if we vectorized this loop and there is a reduction that could
6238   // benefit from interleaving.
6239   if (VF.isVector() && HasReductions) {
6240     LLVM_DEBUG(dbgs() << "LV: Interleaving because of reductions.\n");
6241     return IC;
6242   }
6243 
6244   // Note that if we've already vectorized the loop we will have done the
6245   // runtime check and so interleaving won't require further checks.
6246   bool InterleavingRequiresRuntimePointerCheck =
6247       (VF.isScalar() && Legal->getRuntimePointerChecking()->Need);
6248 
6249   // We want to interleave small loops in order to reduce the loop overhead and
6250   // potentially expose ILP opportunities.
6251   LLVM_DEBUG(dbgs() << "LV: Loop cost is " << LoopCost << '\n'
6252                     << "LV: IC is " << IC << '\n'
6253                     << "LV: VF is " << VF << '\n');
6254   const bool AggressivelyInterleaveReductions =
6255       TTI.enableAggressiveInterleaving(HasReductions);
6256   if (!InterleavingRequiresRuntimePointerCheck && LoopCost < SmallLoopCost) {
6257     // We assume that the cost overhead is 1 and we use the cost model
6258     // to estimate the cost of the loop and interleave until the cost of the
6259     // loop overhead is about 5% of the cost of the loop.
6260     unsigned SmallIC =
6261         std::min(IC, (unsigned)PowerOf2Floor(SmallLoopCost / LoopCost));
6262 
6263     // Interleave until store/load ports (estimated by max interleave count) are
6264     // saturated.
6265     unsigned NumStores = Legal->getNumStores();
6266     unsigned NumLoads = Legal->getNumLoads();
6267     unsigned StoresIC = IC / (NumStores ? NumStores : 1);
6268     unsigned LoadsIC = IC / (NumLoads ? NumLoads : 1);
6269 
6270     // There is little point in interleaving for reductions containing selects
6271     // and compares when VF=1 since it may just create more overhead than it's
6272     // worth for loops with small trip counts. This is because we still have to
6273     // do the final reduction after the loop.
6274     bool HasSelectCmpReductions =
6275         HasReductions &&
6276         any_of(Legal->getReductionVars(), [&](auto &Reduction) -> bool {
6277           const RecurrenceDescriptor &RdxDesc = Reduction.second;
6278           return RecurrenceDescriptor::isSelectCmpRecurrenceKind(
6279               RdxDesc.getRecurrenceKind());
6280         });
6281     if (HasSelectCmpReductions) {
6282       LLVM_DEBUG(dbgs() << "LV: Not interleaving select-cmp reductions.\n");
6283       return 1;
6284     }
6285 
6286     // If we have a scalar reduction (vector reductions are already dealt with
6287     // by this point), we can increase the critical path length if the loop
6288     // we're interleaving is inside another loop. For tree-wise reductions
6289     // set the limit to 2, and for ordered reductions it's best to disable
6290     // interleaving entirely.
6291     if (HasReductions && TheLoop->getLoopDepth() > 1) {
6292       bool HasOrderedReductions =
6293           any_of(Legal->getReductionVars(), [&](auto &Reduction) -> bool {
6294             const RecurrenceDescriptor &RdxDesc = Reduction.second;
6295             return RdxDesc.isOrdered();
6296           });
6297       if (HasOrderedReductions) {
6298         LLVM_DEBUG(
6299             dbgs() << "LV: Not interleaving scalar ordered reductions.\n");
6300         return 1;
6301       }
6302 
6303       unsigned F = static_cast<unsigned>(MaxNestedScalarReductionIC);
6304       SmallIC = std::min(SmallIC, F);
6305       StoresIC = std::min(StoresIC, F);
6306       LoadsIC = std::min(LoadsIC, F);
6307     }
6308 
6309     if (EnableLoadStoreRuntimeInterleave &&
6310         std::max(StoresIC, LoadsIC) > SmallIC) {
6311       LLVM_DEBUG(
6312           dbgs() << "LV: Interleaving to saturate store or load ports.\n");
6313       return std::max(StoresIC, LoadsIC);
6314     }
6315 
6316     // If there are scalar reductions and TTI has enabled aggressive
6317     // interleaving for reductions, we will interleave to expose ILP.
6318     if (InterleaveSmallLoopScalarReduction && VF.isScalar() &&
6319         AggressivelyInterleaveReductions) {
6320       LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n");
6321       // Interleave no less than SmallIC but not as aggressive as the normal IC
6322       // to satisfy the rare situation when resources are too limited.
6323       return std::max(IC / 2, SmallIC);
6324     } else {
6325       LLVM_DEBUG(dbgs() << "LV: Interleaving to reduce branch cost.\n");
6326       return SmallIC;
6327     }
6328   }
6329 
6330   // Interleave if this is a large loop (small loops are already dealt with by
6331   // this point) that could benefit from interleaving.
6332   if (AggressivelyInterleaveReductions) {
6333     LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n");
6334     return IC;
6335   }
6336 
6337   LLVM_DEBUG(dbgs() << "LV: Not Interleaving.\n");
6338   return 1;
6339 }
6340 
6341 SmallVector<LoopVectorizationCostModel::RegisterUsage, 8>
6342 LoopVectorizationCostModel::calculateRegisterUsage(ArrayRef<ElementCount> VFs) {
6343   // This function calculates the register usage by measuring the highest number
6344   // of values that are alive at a single location. Obviously, this is a very
6345   // rough estimation. We scan the loop in a topological order in order and
6346   // assign a number to each instruction. We use RPO to ensure that defs are
6347   // met before their users. We assume that each instruction that has in-loop
6348   // users starts an interval. We record every time that an in-loop value is
6349   // used, so we have a list of the first and last occurrences of each
6350   // instruction. Next, we transpose this data structure into a multi map that
6351   // holds the list of intervals that *end* at a specific location. This multi
6352   // map allows us to perform a linear search. We scan the instructions linearly
6353   // and record each time that a new interval starts, by placing it in a set.
6354   // If we find this value in the multi-map then we remove it from the set.
6355   // The max register usage is the maximum size of the set.
6356   // We also search for instructions that are defined outside the loop, but are
6357   // used inside the loop. We need this number separately from the max-interval
6358   // usage number because when we unroll, loop-invariant values do not take
6359   // more register.
6360   LoopBlocksDFS DFS(TheLoop);
6361   DFS.perform(LI);
6362 
6363   RegisterUsage RU;
6364 
6365   // Each 'key' in the map opens a new interval. The values
6366   // of the map are the index of the 'last seen' usage of the
6367   // instruction that is the key.
6368   using IntervalMap = DenseMap<Instruction *, unsigned>;
6369 
6370   // Maps instruction to its index.
6371   SmallVector<Instruction *, 64> IdxToInstr;
6372   // Marks the end of each interval.
6373   IntervalMap EndPoint;
6374   // Saves the list of instruction indices that are used in the loop.
6375   SmallPtrSet<Instruction *, 8> Ends;
6376   // Saves the list of values that are used in the loop but are
6377   // defined outside the loop, such as arguments and constants.
6378   SmallPtrSet<Value *, 8> LoopInvariants;
6379 
6380   for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) {
6381     for (Instruction &I : BB->instructionsWithoutDebug()) {
6382       IdxToInstr.push_back(&I);
6383 
6384       // Save the end location of each USE.
6385       for (Value *U : I.operands()) {
6386         auto *Instr = dyn_cast<Instruction>(U);
6387 
6388         // Ignore non-instruction values such as arguments, constants, etc.
6389         if (!Instr)
6390           continue;
6391 
6392         // If this instruction is outside the loop then record it and continue.
6393         if (!TheLoop->contains(Instr)) {
6394           LoopInvariants.insert(Instr);
6395           continue;
6396         }
6397 
6398         // Overwrite previous end points.
6399         EndPoint[Instr] = IdxToInstr.size();
6400         Ends.insert(Instr);
6401       }
6402     }
6403   }
6404 
6405   // Saves the list of intervals that end with the index in 'key'.
6406   using InstrList = SmallVector<Instruction *, 2>;
6407   DenseMap<unsigned, InstrList> TransposeEnds;
6408 
6409   // Transpose the EndPoints to a list of values that end at each index.
6410   for (auto &Interval : EndPoint)
6411     TransposeEnds[Interval.second].push_back(Interval.first);
6412 
6413   SmallPtrSet<Instruction *, 8> OpenIntervals;
6414   SmallVector<RegisterUsage, 8> RUs(VFs.size());
6415   SmallVector<SmallMapVector<unsigned, unsigned, 4>, 8> MaxUsages(VFs.size());
6416 
6417   LLVM_DEBUG(dbgs() << "LV(REG): Calculating max register usage:\n");
6418 
6419   // A lambda that gets the register usage for the given type and VF.
6420   const auto &TTICapture = TTI;
6421   auto GetRegUsage = [&TTICapture](Type *Ty, ElementCount VF) -> unsigned {
6422     if (Ty->isTokenTy() || !VectorType::isValidElementType(Ty))
6423       return 0;
6424     InstructionCost::CostType RegUsage =
6425         *TTICapture.getRegUsageForType(VectorType::get(Ty, VF)).getValue();
6426     assert(RegUsage >= 0 && RegUsage <= std::numeric_limits<unsigned>::max() &&
6427            "Nonsensical values for register usage.");
6428     return RegUsage;
6429   };
6430 
6431   for (unsigned int i = 0, s = IdxToInstr.size(); i < s; ++i) {
6432     Instruction *I = IdxToInstr[i];
6433 
6434     // Remove all of the instructions that end at this location.
6435     InstrList &List = TransposeEnds[i];
6436     for (Instruction *ToRemove : List)
6437       OpenIntervals.erase(ToRemove);
6438 
6439     // Ignore instructions that are never used within the loop.
6440     if (!Ends.count(I))
6441       continue;
6442 
6443     // Skip ignored values.
6444     if (ValuesToIgnore.count(I))
6445       continue;
6446 
6447     // For each VF find the maximum usage of registers.
6448     for (unsigned j = 0, e = VFs.size(); j < e; ++j) {
6449       // Count the number of live intervals.
6450       SmallMapVector<unsigned, unsigned, 4> RegUsage;
6451 
6452       if (VFs[j].isScalar()) {
6453         for (auto Inst : OpenIntervals) {
6454           unsigned ClassID = TTI.getRegisterClassForType(false, Inst->getType());
6455           if (RegUsage.find(ClassID) == RegUsage.end())
6456             RegUsage[ClassID] = 1;
6457           else
6458             RegUsage[ClassID] += 1;
6459         }
6460       } else {
6461         collectUniformsAndScalars(VFs[j]);
6462         for (auto Inst : OpenIntervals) {
6463           // Skip ignored values for VF > 1.
6464           if (VecValuesToIgnore.count(Inst))
6465             continue;
6466           if (isScalarAfterVectorization(Inst, VFs[j])) {
6467             unsigned ClassID = TTI.getRegisterClassForType(false, Inst->getType());
6468             if (RegUsage.find(ClassID) == RegUsage.end())
6469               RegUsage[ClassID] = 1;
6470             else
6471               RegUsage[ClassID] += 1;
6472           } else {
6473             unsigned ClassID = TTI.getRegisterClassForType(true, Inst->getType());
6474             if (RegUsage.find(ClassID) == RegUsage.end())
6475               RegUsage[ClassID] = GetRegUsage(Inst->getType(), VFs[j]);
6476             else
6477               RegUsage[ClassID] += GetRegUsage(Inst->getType(), VFs[j]);
6478           }
6479         }
6480       }
6481 
6482       for (auto& pair : RegUsage) {
6483         if (MaxUsages[j].find(pair.first) != MaxUsages[j].end())
6484           MaxUsages[j][pair.first] = std::max(MaxUsages[j][pair.first], pair.second);
6485         else
6486           MaxUsages[j][pair.first] = pair.second;
6487       }
6488     }
6489 
6490     LLVM_DEBUG(dbgs() << "LV(REG): At #" << i << " Interval # "
6491                       << OpenIntervals.size() << '\n');
6492 
6493     // Add the current instruction to the list of open intervals.
6494     OpenIntervals.insert(I);
6495   }
6496 
6497   for (unsigned i = 0, e = VFs.size(); i < e; ++i) {
6498     SmallMapVector<unsigned, unsigned, 4> Invariant;
6499 
6500     for (auto Inst : LoopInvariants) {
6501       unsigned Usage =
6502           VFs[i].isScalar() ? 1 : GetRegUsage(Inst->getType(), VFs[i]);
6503       unsigned ClassID =
6504           TTI.getRegisterClassForType(VFs[i].isVector(), Inst->getType());
6505       if (Invariant.find(ClassID) == Invariant.end())
6506         Invariant[ClassID] = Usage;
6507       else
6508         Invariant[ClassID] += Usage;
6509     }
6510 
6511     LLVM_DEBUG({
6512       dbgs() << "LV(REG): VF = " << VFs[i] << '\n';
6513       dbgs() << "LV(REG): Found max usage: " << MaxUsages[i].size()
6514              << " item\n";
6515       for (const auto &pair : MaxUsages[i]) {
6516         dbgs() << "LV(REG): RegisterClass: "
6517                << TTI.getRegisterClassName(pair.first) << ", " << pair.second
6518                << " registers\n";
6519       }
6520       dbgs() << "LV(REG): Found invariant usage: " << Invariant.size()
6521              << " item\n";
6522       for (const auto &pair : Invariant) {
6523         dbgs() << "LV(REG): RegisterClass: "
6524                << TTI.getRegisterClassName(pair.first) << ", " << pair.second
6525                << " registers\n";
6526       }
6527     });
6528 
6529     RU.LoopInvariantRegs = Invariant;
6530     RU.MaxLocalUsers = MaxUsages[i];
6531     RUs[i] = RU;
6532   }
6533 
6534   return RUs;
6535 }
6536 
6537 bool LoopVectorizationCostModel::useEmulatedMaskMemRefHack(Instruction *I){
6538   // TODO: Cost model for emulated masked load/store is completely
6539   // broken. This hack guides the cost model to use an artificially
6540   // high enough value to practically disable vectorization with such
6541   // operations, except where previously deployed legality hack allowed
6542   // using very low cost values. This is to avoid regressions coming simply
6543   // from moving "masked load/store" check from legality to cost model.
6544   // Masked Load/Gather emulation was previously never allowed.
6545   // Limited number of Masked Store/Scatter emulation was allowed.
6546   assert(isPredicatedInst(I) &&
6547          "Expecting a scalar emulated instruction");
6548   return isa<LoadInst>(I) ||
6549          (isa<StoreInst>(I) &&
6550           NumPredStores > NumberOfStoresToPredicate);
6551 }
6552 
6553 void LoopVectorizationCostModel::collectInstsToScalarize(ElementCount VF) {
6554   // If we aren't vectorizing the loop, or if we've already collected the
6555   // instructions to scalarize, there's nothing to do. Collection may already
6556   // have occurred if we have a user-selected VF and are now computing the
6557   // expected cost for interleaving.
6558   if (VF.isScalar() || VF.isZero() ||
6559       InstsToScalarize.find(VF) != InstsToScalarize.end())
6560     return;
6561 
6562   // Initialize a mapping for VF in InstsToScalalarize. If we find that it's
6563   // not profitable to scalarize any instructions, the presence of VF in the
6564   // map will indicate that we've analyzed it already.
6565   ScalarCostsTy &ScalarCostsVF = InstsToScalarize[VF];
6566 
6567   // Find all the instructions that are scalar with predication in the loop and
6568   // determine if it would be better to not if-convert the blocks they are in.
6569   // If so, we also record the instructions to scalarize.
6570   for (BasicBlock *BB : TheLoop->blocks()) {
6571     if (!blockNeedsPredicationForAnyReason(BB))
6572       continue;
6573     for (Instruction &I : *BB)
6574       if (isScalarWithPredication(&I)) {
6575         ScalarCostsTy ScalarCosts;
6576         // Do not apply discount if scalable, because that would lead to
6577         // invalid scalarization costs.
6578         // Do not apply discount logic if hacked cost is needed
6579         // for emulated masked memrefs.
6580         if (!VF.isScalable() && !useEmulatedMaskMemRefHack(&I) &&
6581             computePredInstDiscount(&I, ScalarCosts, VF) >= 0)
6582           ScalarCostsVF.insert(ScalarCosts.begin(), ScalarCosts.end());
6583         // Remember that BB will remain after vectorization.
6584         PredicatedBBsAfterVectorization.insert(BB);
6585       }
6586   }
6587 }
6588 
6589 int LoopVectorizationCostModel::computePredInstDiscount(
6590     Instruction *PredInst, ScalarCostsTy &ScalarCosts, ElementCount VF) {
6591   assert(!isUniformAfterVectorization(PredInst, VF) &&
6592          "Instruction marked uniform-after-vectorization will be predicated");
6593 
6594   // Initialize the discount to zero, meaning that the scalar version and the
6595   // vector version cost the same.
6596   InstructionCost Discount = 0;
6597 
6598   // Holds instructions to analyze. The instructions we visit are mapped in
6599   // ScalarCosts. Those instructions are the ones that would be scalarized if
6600   // we find that the scalar version costs less.
6601   SmallVector<Instruction *, 8> Worklist;
6602 
6603   // Returns true if the given instruction can be scalarized.
6604   auto canBeScalarized = [&](Instruction *I) -> bool {
6605     // We only attempt to scalarize instructions forming a single-use chain
6606     // from the original predicated block that would otherwise be vectorized.
6607     // Although not strictly necessary, we give up on instructions we know will
6608     // already be scalar to avoid traversing chains that are unlikely to be
6609     // beneficial.
6610     if (!I->hasOneUse() || PredInst->getParent() != I->getParent() ||
6611         isScalarAfterVectorization(I, VF))
6612       return false;
6613 
6614     // If the instruction is scalar with predication, it will be analyzed
6615     // separately. We ignore it within the context of PredInst.
6616     if (isScalarWithPredication(I))
6617       return false;
6618 
6619     // If any of the instruction's operands are uniform after vectorization,
6620     // the instruction cannot be scalarized. This prevents, for example, a
6621     // masked load from being scalarized.
6622     //
6623     // We assume we will only emit a value for lane zero of an instruction
6624     // marked uniform after vectorization, rather than VF identical values.
6625     // Thus, if we scalarize an instruction that uses a uniform, we would
6626     // create uses of values corresponding to the lanes we aren't emitting code
6627     // for. This behavior can be changed by allowing getScalarValue to clone
6628     // the lane zero values for uniforms rather than asserting.
6629     for (Use &U : I->operands())
6630       if (auto *J = dyn_cast<Instruction>(U.get()))
6631         if (isUniformAfterVectorization(J, VF))
6632           return false;
6633 
6634     // Otherwise, we can scalarize the instruction.
6635     return true;
6636   };
6637 
6638   // Compute the expected cost discount from scalarizing the entire expression
6639   // feeding the predicated instruction. We currently only consider expressions
6640   // that are single-use instruction chains.
6641   Worklist.push_back(PredInst);
6642   while (!Worklist.empty()) {
6643     Instruction *I = Worklist.pop_back_val();
6644 
6645     // If we've already analyzed the instruction, there's nothing to do.
6646     if (ScalarCosts.find(I) != ScalarCosts.end())
6647       continue;
6648 
6649     // Compute the cost of the vector instruction. Note that this cost already
6650     // includes the scalarization overhead of the predicated instruction.
6651     InstructionCost VectorCost = getInstructionCost(I, VF).first;
6652 
6653     // Compute the cost of the scalarized instruction. This cost is the cost of
6654     // the instruction as if it wasn't if-converted and instead remained in the
6655     // predicated block. We will scale this cost by block probability after
6656     // computing the scalarization overhead.
6657     InstructionCost ScalarCost =
6658         VF.getFixedValue() *
6659         getInstructionCost(I, ElementCount::getFixed(1)).first;
6660 
6661     // Compute the scalarization overhead of needed insertelement instructions
6662     // and phi nodes.
6663     if (isScalarWithPredication(I) && !I->getType()->isVoidTy()) {
6664       ScalarCost += TTI.getScalarizationOverhead(
6665           cast<VectorType>(ToVectorTy(I->getType(), VF)),
6666           APInt::getAllOnes(VF.getFixedValue()), true, false);
6667       ScalarCost +=
6668           VF.getFixedValue() *
6669           TTI.getCFInstrCost(Instruction::PHI, TTI::TCK_RecipThroughput);
6670     }
6671 
6672     // Compute the scalarization overhead of needed extractelement
6673     // instructions. For each of the instruction's operands, if the operand can
6674     // be scalarized, add it to the worklist; otherwise, account for the
6675     // overhead.
6676     for (Use &U : I->operands())
6677       if (auto *J = dyn_cast<Instruction>(U.get())) {
6678         assert(VectorType::isValidElementType(J->getType()) &&
6679                "Instruction has non-scalar type");
6680         if (canBeScalarized(J))
6681           Worklist.push_back(J);
6682         else if (needsExtract(J, VF)) {
6683           ScalarCost += TTI.getScalarizationOverhead(
6684               cast<VectorType>(ToVectorTy(J->getType(), VF)),
6685               APInt::getAllOnes(VF.getFixedValue()), false, true);
6686         }
6687       }
6688 
6689     // Scale the total scalar cost by block probability.
6690     ScalarCost /= getReciprocalPredBlockProb();
6691 
6692     // Compute the discount. A non-negative discount means the vector version
6693     // of the instruction costs more, and scalarizing would be beneficial.
6694     Discount += VectorCost - ScalarCost;
6695     ScalarCosts[I] = ScalarCost;
6696   }
6697 
6698   return *Discount.getValue();
6699 }
6700 
6701 LoopVectorizationCostModel::VectorizationCostTy
6702 LoopVectorizationCostModel::expectedCost(
6703     ElementCount VF, SmallVectorImpl<InstructionVFPair> *Invalid) {
6704   VectorizationCostTy Cost;
6705 
6706   // For each block.
6707   for (BasicBlock *BB : TheLoop->blocks()) {
6708     VectorizationCostTy BlockCost;
6709 
6710     // For each instruction in the old loop.
6711     for (Instruction &I : BB->instructionsWithoutDebug()) {
6712       // Skip ignored values.
6713       if (ValuesToIgnore.count(&I) ||
6714           (VF.isVector() && VecValuesToIgnore.count(&I)))
6715         continue;
6716 
6717       VectorizationCostTy C = getInstructionCost(&I, VF);
6718 
6719       // Check if we should override the cost.
6720       if (C.first.isValid() &&
6721           ForceTargetInstructionCost.getNumOccurrences() > 0)
6722         C.first = InstructionCost(ForceTargetInstructionCost);
6723 
6724       // Keep a list of instructions with invalid costs.
6725       if (Invalid && !C.first.isValid())
6726         Invalid->emplace_back(&I, VF);
6727 
6728       BlockCost.first += C.first;
6729       BlockCost.second |= C.second;
6730       LLVM_DEBUG(dbgs() << "LV: Found an estimated cost of " << C.first
6731                         << " for VF " << VF << " For instruction: " << I
6732                         << '\n');
6733     }
6734 
6735     // If we are vectorizing a predicated block, it will have been
6736     // if-converted. This means that the block's instructions (aside from
6737     // stores and instructions that may divide by zero) will now be
6738     // unconditionally executed. For the scalar case, we may not always execute
6739     // the predicated block, if it is an if-else block. Thus, scale the block's
6740     // cost by the probability of executing it. blockNeedsPredication from
6741     // Legal is used so as to not include all blocks in tail folded loops.
6742     if (VF.isScalar() && Legal->blockNeedsPredication(BB))
6743       BlockCost.first /= getReciprocalPredBlockProb();
6744 
6745     Cost.first += BlockCost.first;
6746     Cost.second |= BlockCost.second;
6747   }
6748 
6749   return Cost;
6750 }
6751 
6752 /// Gets Address Access SCEV after verifying that the access pattern
6753 /// is loop invariant except the induction variable dependence.
6754 ///
6755 /// This SCEV can be sent to the Target in order to estimate the address
6756 /// calculation cost.
6757 static const SCEV *getAddressAccessSCEV(
6758               Value *Ptr,
6759               LoopVectorizationLegality *Legal,
6760               PredicatedScalarEvolution &PSE,
6761               const Loop *TheLoop) {
6762 
6763   auto *Gep = dyn_cast<GetElementPtrInst>(Ptr);
6764   if (!Gep)
6765     return nullptr;
6766 
6767   // We are looking for a gep with all loop invariant indices except for one
6768   // which should be an induction variable.
6769   auto SE = PSE.getSE();
6770   unsigned NumOperands = Gep->getNumOperands();
6771   for (unsigned i = 1; i < NumOperands; ++i) {
6772     Value *Opd = Gep->getOperand(i);
6773     if (!SE->isLoopInvariant(SE->getSCEV(Opd), TheLoop) &&
6774         !Legal->isInductionVariable(Opd))
6775       return nullptr;
6776   }
6777 
6778   // Now we know we have a GEP ptr, %inv, %ind, %inv. return the Ptr SCEV.
6779   return PSE.getSCEV(Ptr);
6780 }
6781 
6782 static bool isStrideMul(Instruction *I, LoopVectorizationLegality *Legal) {
6783   return Legal->hasStride(I->getOperand(0)) ||
6784          Legal->hasStride(I->getOperand(1));
6785 }
6786 
6787 InstructionCost
6788 LoopVectorizationCostModel::getMemInstScalarizationCost(Instruction *I,
6789                                                         ElementCount VF) {
6790   assert(VF.isVector() &&
6791          "Scalarization cost of instruction implies vectorization.");
6792   if (VF.isScalable())
6793     return InstructionCost::getInvalid();
6794 
6795   Type *ValTy = getLoadStoreType(I);
6796   auto SE = PSE.getSE();
6797 
6798   unsigned AS = getLoadStoreAddressSpace(I);
6799   Value *Ptr = getLoadStorePointerOperand(I);
6800   Type *PtrTy = ToVectorTy(Ptr->getType(), VF);
6801   // NOTE: PtrTy is a vector to signal `TTI::getAddressComputationCost`
6802   //       that it is being called from this specific place.
6803 
6804   // Figure out whether the access is strided and get the stride value
6805   // if it's known in compile time
6806   const SCEV *PtrSCEV = getAddressAccessSCEV(Ptr, Legal, PSE, TheLoop);
6807 
6808   // Get the cost of the scalar memory instruction and address computation.
6809   InstructionCost Cost =
6810       VF.getKnownMinValue() * TTI.getAddressComputationCost(PtrTy, SE, PtrSCEV);
6811 
6812   // Don't pass *I here, since it is scalar but will actually be part of a
6813   // vectorized loop where the user of it is a vectorized instruction.
6814   const Align Alignment = getLoadStoreAlignment(I);
6815   Cost += VF.getKnownMinValue() *
6816           TTI.getMemoryOpCost(I->getOpcode(), ValTy->getScalarType(), Alignment,
6817                               AS, TTI::TCK_RecipThroughput);
6818 
6819   // Get the overhead of the extractelement and insertelement instructions
6820   // we might create due to scalarization.
6821   Cost += getScalarizationOverhead(I, VF);
6822 
6823   // If we have a predicated load/store, it will need extra i1 extracts and
6824   // conditional branches, but may not be executed for each vector lane. Scale
6825   // the cost by the probability of executing the predicated block.
6826   if (isPredicatedInst(I)) {
6827     Cost /= getReciprocalPredBlockProb();
6828 
6829     // Add the cost of an i1 extract and a branch
6830     auto *Vec_i1Ty =
6831         VectorType::get(IntegerType::getInt1Ty(ValTy->getContext()), VF);
6832     Cost += TTI.getScalarizationOverhead(
6833         Vec_i1Ty, APInt::getAllOnes(VF.getKnownMinValue()),
6834         /*Insert=*/false, /*Extract=*/true);
6835     Cost += TTI.getCFInstrCost(Instruction::Br, TTI::TCK_RecipThroughput);
6836 
6837     if (useEmulatedMaskMemRefHack(I))
6838       // Artificially setting to a high enough value to practically disable
6839       // vectorization with such operations.
6840       Cost = 3000000;
6841   }
6842 
6843   return Cost;
6844 }
6845 
6846 InstructionCost
6847 LoopVectorizationCostModel::getConsecutiveMemOpCost(Instruction *I,
6848                                                     ElementCount VF) {
6849   Type *ValTy = getLoadStoreType(I);
6850   auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
6851   Value *Ptr = getLoadStorePointerOperand(I);
6852   unsigned AS = getLoadStoreAddressSpace(I);
6853   int ConsecutiveStride = Legal->isConsecutivePtr(ValTy, Ptr);
6854   enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
6855 
6856   assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) &&
6857          "Stride should be 1 or -1 for consecutive memory access");
6858   const Align Alignment = getLoadStoreAlignment(I);
6859   InstructionCost Cost = 0;
6860   if (Legal->isMaskRequired(I))
6861     Cost += TTI.getMaskedMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS,
6862                                       CostKind);
6863   else
6864     Cost += TTI.getMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS,
6865                                 CostKind, I);
6866 
6867   bool Reverse = ConsecutiveStride < 0;
6868   if (Reverse)
6869     Cost +=
6870         TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, None, 0);
6871   return Cost;
6872 }
6873 
6874 InstructionCost
6875 LoopVectorizationCostModel::getUniformMemOpCost(Instruction *I,
6876                                                 ElementCount VF) {
6877   assert(Legal->isUniformMemOp(*I));
6878 
6879   Type *ValTy = getLoadStoreType(I);
6880   auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
6881   const Align Alignment = getLoadStoreAlignment(I);
6882   unsigned AS = getLoadStoreAddressSpace(I);
6883   enum TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
6884   if (isa<LoadInst>(I)) {
6885     return TTI.getAddressComputationCost(ValTy) +
6886            TTI.getMemoryOpCost(Instruction::Load, ValTy, Alignment, AS,
6887                                CostKind) +
6888            TTI.getShuffleCost(TargetTransformInfo::SK_Broadcast, VectorTy);
6889   }
6890   StoreInst *SI = cast<StoreInst>(I);
6891 
6892   bool isLoopInvariantStoreValue = Legal->isUniform(SI->getValueOperand());
6893   return TTI.getAddressComputationCost(ValTy) +
6894          TTI.getMemoryOpCost(Instruction::Store, ValTy, Alignment, AS,
6895                              CostKind) +
6896          (isLoopInvariantStoreValue
6897               ? 0
6898               : TTI.getVectorInstrCost(Instruction::ExtractElement, VectorTy,
6899                                        VF.getKnownMinValue() - 1));
6900 }
6901 
6902 InstructionCost
6903 LoopVectorizationCostModel::getGatherScatterCost(Instruction *I,
6904                                                  ElementCount VF) {
6905   Type *ValTy = getLoadStoreType(I);
6906   auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
6907   const Align Alignment = getLoadStoreAlignment(I);
6908   const Value *Ptr = getLoadStorePointerOperand(I);
6909 
6910   return TTI.getAddressComputationCost(VectorTy) +
6911          TTI.getGatherScatterOpCost(
6912              I->getOpcode(), VectorTy, Ptr, Legal->isMaskRequired(I), Alignment,
6913              TargetTransformInfo::TCK_RecipThroughput, I);
6914 }
6915 
6916 InstructionCost
6917 LoopVectorizationCostModel::getInterleaveGroupCost(Instruction *I,
6918                                                    ElementCount VF) {
6919   // TODO: Once we have support for interleaving with scalable vectors
6920   // we can calculate the cost properly here.
6921   if (VF.isScalable())
6922     return InstructionCost::getInvalid();
6923 
6924   Type *ValTy = getLoadStoreType(I);
6925   auto *VectorTy = cast<VectorType>(ToVectorTy(ValTy, VF));
6926   unsigned AS = getLoadStoreAddressSpace(I);
6927 
6928   auto Group = getInterleavedAccessGroup(I);
6929   assert(Group && "Fail to get an interleaved access group.");
6930 
6931   unsigned InterleaveFactor = Group->getFactor();
6932   auto *WideVecTy = VectorType::get(ValTy, VF * InterleaveFactor);
6933 
6934   // Holds the indices of existing members in the interleaved group.
6935   SmallVector<unsigned, 4> Indices;
6936   for (unsigned IF = 0; IF < InterleaveFactor; IF++)
6937     if (Group->getMember(IF))
6938       Indices.push_back(IF);
6939 
6940   // Calculate the cost of the whole interleaved group.
6941   bool UseMaskForGaps =
6942       (Group->requiresScalarEpilogue() && !isScalarEpilogueAllowed()) ||
6943       (isa<StoreInst>(I) && (Group->getNumMembers() < Group->getFactor()));
6944   InstructionCost Cost = TTI.getInterleavedMemoryOpCost(
6945       I->getOpcode(), WideVecTy, Group->getFactor(), Indices, Group->getAlign(),
6946       AS, TTI::TCK_RecipThroughput, Legal->isMaskRequired(I), UseMaskForGaps);
6947 
6948   if (Group->isReverse()) {
6949     // TODO: Add support for reversed masked interleaved access.
6950     assert(!Legal->isMaskRequired(I) &&
6951            "Reverse masked interleaved access not supported.");
6952     Cost +=
6953         Group->getNumMembers() *
6954         TTI.getShuffleCost(TargetTransformInfo::SK_Reverse, VectorTy, None, 0);
6955   }
6956   return Cost;
6957 }
6958 
6959 Optional<InstructionCost> LoopVectorizationCostModel::getReductionPatternCost(
6960     Instruction *I, ElementCount VF, Type *Ty, TTI::TargetCostKind CostKind) {
6961   using namespace llvm::PatternMatch;
6962   // Early exit for no inloop reductions
6963   if (InLoopReductionChains.empty() || VF.isScalar() || !isa<VectorType>(Ty))
6964     return None;
6965   auto *VectorTy = cast<VectorType>(Ty);
6966 
6967   // We are looking for a pattern of, and finding the minimal acceptable cost:
6968   //  reduce(mul(ext(A), ext(B))) or
6969   //  reduce(mul(A, B)) or
6970   //  reduce(ext(A)) or
6971   //  reduce(A).
6972   // The basic idea is that we walk down the tree to do that, finding the root
6973   // reduction instruction in InLoopReductionImmediateChains. From there we find
6974   // the pattern of mul/ext and test the cost of the entire pattern vs the cost
6975   // of the components. If the reduction cost is lower then we return it for the
6976   // reduction instruction and 0 for the other instructions in the pattern. If
6977   // it is not we return an invalid cost specifying the orignal cost method
6978   // should be used.
6979   Instruction *RetI = I;
6980   if (match(RetI, m_ZExtOrSExt(m_Value()))) {
6981     if (!RetI->hasOneUser())
6982       return None;
6983     RetI = RetI->user_back();
6984   }
6985   if (match(RetI, m_Mul(m_Value(), m_Value())) &&
6986       RetI->user_back()->getOpcode() == Instruction::Add) {
6987     if (!RetI->hasOneUser())
6988       return None;
6989     RetI = RetI->user_back();
6990   }
6991 
6992   // Test if the found instruction is a reduction, and if not return an invalid
6993   // cost specifying the parent to use the original cost modelling.
6994   if (!InLoopReductionImmediateChains.count(RetI))
6995     return None;
6996 
6997   // Find the reduction this chain is a part of and calculate the basic cost of
6998   // the reduction on its own.
6999   Instruction *LastChain = InLoopReductionImmediateChains[RetI];
7000   Instruction *ReductionPhi = LastChain;
7001   while (!isa<PHINode>(ReductionPhi))
7002     ReductionPhi = InLoopReductionImmediateChains[ReductionPhi];
7003 
7004   const RecurrenceDescriptor &RdxDesc =
7005       Legal->getReductionVars()[cast<PHINode>(ReductionPhi)];
7006 
7007   InstructionCost BaseCost = TTI.getArithmeticReductionCost(
7008       RdxDesc.getOpcode(), VectorTy, RdxDesc.getFastMathFlags(), CostKind);
7009 
7010   // For a call to the llvm.fmuladd intrinsic we need to add the cost of a
7011   // normal fmul instruction to the cost of the fadd reduction.
7012   if (RdxDesc.getRecurrenceKind() == RecurKind::FMulAdd)
7013     BaseCost +=
7014         TTI.getArithmeticInstrCost(Instruction::FMul, VectorTy, CostKind);
7015 
7016   // If we're using ordered reductions then we can just return the base cost
7017   // here, since getArithmeticReductionCost calculates the full ordered
7018   // reduction cost when FP reassociation is not allowed.
7019   if (useOrderedReductions(RdxDesc))
7020     return BaseCost;
7021 
7022   // Get the operand that was not the reduction chain and match it to one of the
7023   // patterns, returning the better cost if it is found.
7024   Instruction *RedOp = RetI->getOperand(1) == LastChain
7025                            ? dyn_cast<Instruction>(RetI->getOperand(0))
7026                            : dyn_cast<Instruction>(RetI->getOperand(1));
7027 
7028   VectorTy = VectorType::get(I->getOperand(0)->getType(), VectorTy);
7029 
7030   Instruction *Op0, *Op1;
7031   if (RedOp &&
7032       match(RedOp,
7033             m_ZExtOrSExt(m_Mul(m_Instruction(Op0), m_Instruction(Op1)))) &&
7034       match(Op0, m_ZExtOrSExt(m_Value())) &&
7035       Op0->getOpcode() == Op1->getOpcode() &&
7036       Op0->getOperand(0)->getType() == Op1->getOperand(0)->getType() &&
7037       !TheLoop->isLoopInvariant(Op0) && !TheLoop->isLoopInvariant(Op1) &&
7038       (Op0->getOpcode() == RedOp->getOpcode() || Op0 == Op1)) {
7039 
7040     // Matched reduce(ext(mul(ext(A), ext(B)))
7041     // Note that the extend opcodes need to all match, or if A==B they will have
7042     // been converted to zext(mul(sext(A), sext(A))) as it is known positive,
7043     // which is equally fine.
7044     bool IsUnsigned = isa<ZExtInst>(Op0);
7045     auto *ExtType = VectorType::get(Op0->getOperand(0)->getType(), VectorTy);
7046     auto *MulType = VectorType::get(Op0->getType(), VectorTy);
7047 
7048     InstructionCost ExtCost =
7049         TTI.getCastInstrCost(Op0->getOpcode(), MulType, ExtType,
7050                              TTI::CastContextHint::None, CostKind, Op0);
7051     InstructionCost MulCost =
7052         TTI.getArithmeticInstrCost(Instruction::Mul, MulType, CostKind);
7053     InstructionCost Ext2Cost =
7054         TTI.getCastInstrCost(RedOp->getOpcode(), VectorTy, MulType,
7055                              TTI::CastContextHint::None, CostKind, RedOp);
7056 
7057     InstructionCost RedCost = TTI.getExtendedAddReductionCost(
7058         /*IsMLA=*/true, IsUnsigned, RdxDesc.getRecurrenceType(), ExtType,
7059         CostKind);
7060 
7061     if (RedCost.isValid() &&
7062         RedCost < ExtCost * 2 + MulCost + Ext2Cost + BaseCost)
7063       return I == RetI ? RedCost : 0;
7064   } else if (RedOp && match(RedOp, m_ZExtOrSExt(m_Value())) &&
7065              !TheLoop->isLoopInvariant(RedOp)) {
7066     // Matched reduce(ext(A))
7067     bool IsUnsigned = isa<ZExtInst>(RedOp);
7068     auto *ExtType = VectorType::get(RedOp->getOperand(0)->getType(), VectorTy);
7069     InstructionCost RedCost = TTI.getExtendedAddReductionCost(
7070         /*IsMLA=*/false, IsUnsigned, RdxDesc.getRecurrenceType(), ExtType,
7071         CostKind);
7072 
7073     InstructionCost ExtCost =
7074         TTI.getCastInstrCost(RedOp->getOpcode(), VectorTy, ExtType,
7075                              TTI::CastContextHint::None, CostKind, RedOp);
7076     if (RedCost.isValid() && RedCost < BaseCost + ExtCost)
7077       return I == RetI ? RedCost : 0;
7078   } else if (RedOp &&
7079              match(RedOp, m_Mul(m_Instruction(Op0), m_Instruction(Op1)))) {
7080     if (match(Op0, m_ZExtOrSExt(m_Value())) &&
7081         Op0->getOpcode() == Op1->getOpcode() &&
7082         Op0->getOperand(0)->getType() == Op1->getOperand(0)->getType() &&
7083         !TheLoop->isLoopInvariant(Op0) && !TheLoop->isLoopInvariant(Op1)) {
7084       bool IsUnsigned = isa<ZExtInst>(Op0);
7085       auto *ExtType = VectorType::get(Op0->getOperand(0)->getType(), VectorTy);
7086       // Matched reduce(mul(ext, ext))
7087       InstructionCost ExtCost =
7088           TTI.getCastInstrCost(Op0->getOpcode(), VectorTy, ExtType,
7089                                TTI::CastContextHint::None, CostKind, Op0);
7090       InstructionCost MulCost =
7091           TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind);
7092 
7093       InstructionCost RedCost = TTI.getExtendedAddReductionCost(
7094           /*IsMLA=*/true, IsUnsigned, RdxDesc.getRecurrenceType(), ExtType,
7095           CostKind);
7096 
7097       if (RedCost.isValid() && RedCost < ExtCost * 2 + MulCost + BaseCost)
7098         return I == RetI ? RedCost : 0;
7099     } else if (!match(I, m_ZExtOrSExt(m_Value()))) {
7100       // Matched reduce(mul())
7101       InstructionCost MulCost =
7102           TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind);
7103 
7104       InstructionCost RedCost = TTI.getExtendedAddReductionCost(
7105           /*IsMLA=*/true, true, RdxDesc.getRecurrenceType(), VectorTy,
7106           CostKind);
7107 
7108       if (RedCost.isValid() && RedCost < MulCost + BaseCost)
7109         return I == RetI ? RedCost : 0;
7110     }
7111   }
7112 
7113   return I == RetI ? Optional<InstructionCost>(BaseCost) : None;
7114 }
7115 
7116 InstructionCost
7117 LoopVectorizationCostModel::getMemoryInstructionCost(Instruction *I,
7118                                                      ElementCount VF) {
7119   // Calculate scalar cost only. Vectorization cost should be ready at this
7120   // moment.
7121   if (VF.isScalar()) {
7122     Type *ValTy = getLoadStoreType(I);
7123     const Align Alignment = getLoadStoreAlignment(I);
7124     unsigned AS = getLoadStoreAddressSpace(I);
7125 
7126     return TTI.getAddressComputationCost(ValTy) +
7127            TTI.getMemoryOpCost(I->getOpcode(), ValTy, Alignment, AS,
7128                                TTI::TCK_RecipThroughput, I);
7129   }
7130   return getWideningCost(I, VF);
7131 }
7132 
7133 LoopVectorizationCostModel::VectorizationCostTy
7134 LoopVectorizationCostModel::getInstructionCost(Instruction *I,
7135                                                ElementCount VF) {
7136   // If we know that this instruction will remain uniform, check the cost of
7137   // the scalar version.
7138   if (isUniformAfterVectorization(I, VF))
7139     VF = ElementCount::getFixed(1);
7140 
7141   if (VF.isVector() && isProfitableToScalarize(I, VF))
7142     return VectorizationCostTy(InstsToScalarize[VF][I], false);
7143 
7144   // Forced scalars do not have any scalarization overhead.
7145   auto ForcedScalar = ForcedScalars.find(VF);
7146   if (VF.isVector() && ForcedScalar != ForcedScalars.end()) {
7147     auto InstSet = ForcedScalar->second;
7148     if (InstSet.count(I))
7149       return VectorizationCostTy(
7150           (getInstructionCost(I, ElementCount::getFixed(1)).first *
7151            VF.getKnownMinValue()),
7152           false);
7153   }
7154 
7155   Type *VectorTy;
7156   InstructionCost C = getInstructionCost(I, VF, VectorTy);
7157 
7158   bool TypeNotScalarized = false;
7159   if (VF.isVector() && VectorTy->isVectorTy()) {
7160     unsigned NumParts = TTI.getNumberOfParts(VectorTy);
7161     if (NumParts)
7162       TypeNotScalarized = NumParts < VF.getKnownMinValue();
7163     else
7164       C = InstructionCost::getInvalid();
7165   }
7166   return VectorizationCostTy(C, TypeNotScalarized);
7167 }
7168 
7169 InstructionCost
7170 LoopVectorizationCostModel::getScalarizationOverhead(Instruction *I,
7171                                                      ElementCount VF) const {
7172 
7173   // There is no mechanism yet to create a scalable scalarization loop,
7174   // so this is currently Invalid.
7175   if (VF.isScalable())
7176     return InstructionCost::getInvalid();
7177 
7178   if (VF.isScalar())
7179     return 0;
7180 
7181   InstructionCost Cost = 0;
7182   Type *RetTy = ToVectorTy(I->getType(), VF);
7183   if (!RetTy->isVoidTy() &&
7184       (!isa<LoadInst>(I) || !TTI.supportsEfficientVectorElementLoadStore()))
7185     Cost += TTI.getScalarizationOverhead(
7186         cast<VectorType>(RetTy), APInt::getAllOnes(VF.getKnownMinValue()), true,
7187         false);
7188 
7189   // Some targets keep addresses scalar.
7190   if (isa<LoadInst>(I) && !TTI.prefersVectorizedAddressing())
7191     return Cost;
7192 
7193   // Some targets support efficient element stores.
7194   if (isa<StoreInst>(I) && TTI.supportsEfficientVectorElementLoadStore())
7195     return Cost;
7196 
7197   // Collect operands to consider.
7198   CallInst *CI = dyn_cast<CallInst>(I);
7199   Instruction::op_range Ops = CI ? CI->args() : I->operands();
7200 
7201   // Skip operands that do not require extraction/scalarization and do not incur
7202   // any overhead.
7203   SmallVector<Type *> Tys;
7204   for (auto *V : filterExtractingOperands(Ops, VF))
7205     Tys.push_back(MaybeVectorizeType(V->getType(), VF));
7206   return Cost + TTI.getOperandsScalarizationOverhead(
7207                     filterExtractingOperands(Ops, VF), Tys);
7208 }
7209 
7210 void LoopVectorizationCostModel::setCostBasedWideningDecision(ElementCount VF) {
7211   if (VF.isScalar())
7212     return;
7213   NumPredStores = 0;
7214   for (BasicBlock *BB : TheLoop->blocks()) {
7215     // For each instruction in the old loop.
7216     for (Instruction &I : *BB) {
7217       Value *Ptr =  getLoadStorePointerOperand(&I);
7218       if (!Ptr)
7219         continue;
7220 
7221       // TODO: We should generate better code and update the cost model for
7222       // predicated uniform stores. Today they are treated as any other
7223       // predicated store (see added test cases in
7224       // invariant-store-vectorization.ll).
7225       if (isa<StoreInst>(&I) && isScalarWithPredication(&I))
7226         NumPredStores++;
7227 
7228       if (Legal->isUniformMemOp(I)) {
7229         // TODO: Avoid replicating loads and stores instead of
7230         // relying on instcombine to remove them.
7231         // Load: Scalar load + broadcast
7232         // Store: Scalar store + isLoopInvariantStoreValue ? 0 : extract
7233         InstructionCost Cost;
7234         if (isa<StoreInst>(&I) && VF.isScalable() &&
7235             isLegalGatherOrScatter(&I)) {
7236           Cost = getGatherScatterCost(&I, VF);
7237           setWideningDecision(&I, VF, CM_GatherScatter, Cost);
7238         } else {
7239           assert((isa<LoadInst>(&I) || !VF.isScalable()) &&
7240                  "Cannot yet scalarize uniform stores");
7241           Cost = getUniformMemOpCost(&I, VF);
7242           setWideningDecision(&I, VF, CM_Scalarize, Cost);
7243         }
7244         continue;
7245       }
7246 
7247       // We assume that widening is the best solution when possible.
7248       if (memoryInstructionCanBeWidened(&I, VF)) {
7249         InstructionCost Cost = getConsecutiveMemOpCost(&I, VF);
7250         int ConsecutiveStride = Legal->isConsecutivePtr(
7251             getLoadStoreType(&I), getLoadStorePointerOperand(&I));
7252         assert((ConsecutiveStride == 1 || ConsecutiveStride == -1) &&
7253                "Expected consecutive stride.");
7254         InstWidening Decision =
7255             ConsecutiveStride == 1 ? CM_Widen : CM_Widen_Reverse;
7256         setWideningDecision(&I, VF, Decision, Cost);
7257         continue;
7258       }
7259 
7260       // Choose between Interleaving, Gather/Scatter or Scalarization.
7261       InstructionCost InterleaveCost = InstructionCost::getInvalid();
7262       unsigned NumAccesses = 1;
7263       if (isAccessInterleaved(&I)) {
7264         auto Group = getInterleavedAccessGroup(&I);
7265         assert(Group && "Fail to get an interleaved access group.");
7266 
7267         // Make one decision for the whole group.
7268         if (getWideningDecision(&I, VF) != CM_Unknown)
7269           continue;
7270 
7271         NumAccesses = Group->getNumMembers();
7272         if (interleavedAccessCanBeWidened(&I, VF))
7273           InterleaveCost = getInterleaveGroupCost(&I, VF);
7274       }
7275 
7276       InstructionCost GatherScatterCost =
7277           isLegalGatherOrScatter(&I)
7278               ? getGatherScatterCost(&I, VF) * NumAccesses
7279               : InstructionCost::getInvalid();
7280 
7281       InstructionCost ScalarizationCost =
7282           getMemInstScalarizationCost(&I, VF) * NumAccesses;
7283 
7284       // Choose better solution for the current VF,
7285       // write down this decision and use it during vectorization.
7286       InstructionCost Cost;
7287       InstWidening Decision;
7288       if (InterleaveCost <= GatherScatterCost &&
7289           InterleaveCost < ScalarizationCost) {
7290         Decision = CM_Interleave;
7291         Cost = InterleaveCost;
7292       } else if (GatherScatterCost < ScalarizationCost) {
7293         Decision = CM_GatherScatter;
7294         Cost = GatherScatterCost;
7295       } else {
7296         Decision = CM_Scalarize;
7297         Cost = ScalarizationCost;
7298       }
7299       // If the instructions belongs to an interleave group, the whole group
7300       // receives the same decision. The whole group receives the cost, but
7301       // the cost will actually be assigned to one instruction.
7302       if (auto Group = getInterleavedAccessGroup(&I))
7303         setWideningDecision(Group, VF, Decision, Cost);
7304       else
7305         setWideningDecision(&I, VF, Decision, Cost);
7306     }
7307   }
7308 
7309   // Make sure that any load of address and any other address computation
7310   // remains scalar unless there is gather/scatter support. This avoids
7311   // inevitable extracts into address registers, and also has the benefit of
7312   // activating LSR more, since that pass can't optimize vectorized
7313   // addresses.
7314   if (TTI.prefersVectorizedAddressing())
7315     return;
7316 
7317   // Start with all scalar pointer uses.
7318   SmallPtrSet<Instruction *, 8> AddrDefs;
7319   for (BasicBlock *BB : TheLoop->blocks())
7320     for (Instruction &I : *BB) {
7321       Instruction *PtrDef =
7322         dyn_cast_or_null<Instruction>(getLoadStorePointerOperand(&I));
7323       if (PtrDef && TheLoop->contains(PtrDef) &&
7324           getWideningDecision(&I, VF) != CM_GatherScatter)
7325         AddrDefs.insert(PtrDef);
7326     }
7327 
7328   // Add all instructions used to generate the addresses.
7329   SmallVector<Instruction *, 4> Worklist;
7330   append_range(Worklist, AddrDefs);
7331   while (!Worklist.empty()) {
7332     Instruction *I = Worklist.pop_back_val();
7333     for (auto &Op : I->operands())
7334       if (auto *InstOp = dyn_cast<Instruction>(Op))
7335         if ((InstOp->getParent() == I->getParent()) && !isa<PHINode>(InstOp) &&
7336             AddrDefs.insert(InstOp).second)
7337           Worklist.push_back(InstOp);
7338   }
7339 
7340   for (auto *I : AddrDefs) {
7341     if (isa<LoadInst>(I)) {
7342       // Setting the desired widening decision should ideally be handled in
7343       // by cost functions, but since this involves the task of finding out
7344       // if the loaded register is involved in an address computation, it is
7345       // instead changed here when we know this is the case.
7346       InstWidening Decision = getWideningDecision(I, VF);
7347       if (Decision == CM_Widen || Decision == CM_Widen_Reverse)
7348         // Scalarize a widened load of address.
7349         setWideningDecision(
7350             I, VF, CM_Scalarize,
7351             (VF.getKnownMinValue() *
7352              getMemoryInstructionCost(I, ElementCount::getFixed(1))));
7353       else if (auto Group = getInterleavedAccessGroup(I)) {
7354         // Scalarize an interleave group of address loads.
7355         for (unsigned I = 0; I < Group->getFactor(); ++I) {
7356           if (Instruction *Member = Group->getMember(I))
7357             setWideningDecision(
7358                 Member, VF, CM_Scalarize,
7359                 (VF.getKnownMinValue() *
7360                  getMemoryInstructionCost(Member, ElementCount::getFixed(1))));
7361         }
7362       }
7363     } else
7364       // Make sure I gets scalarized and a cost estimate without
7365       // scalarization overhead.
7366       ForcedScalars[VF].insert(I);
7367   }
7368 }
7369 
7370 InstructionCost
7371 LoopVectorizationCostModel::getInstructionCost(Instruction *I, ElementCount VF,
7372                                                Type *&VectorTy) {
7373   Type *RetTy = I->getType();
7374   if (canTruncateToMinimalBitwidth(I, VF))
7375     RetTy = IntegerType::get(RetTy->getContext(), MinBWs[I]);
7376   auto SE = PSE.getSE();
7377   TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
7378 
7379   auto hasSingleCopyAfterVectorization = [this](Instruction *I,
7380                                                 ElementCount VF) -> bool {
7381     if (VF.isScalar())
7382       return true;
7383 
7384     auto Scalarized = InstsToScalarize.find(VF);
7385     assert(Scalarized != InstsToScalarize.end() &&
7386            "VF not yet analyzed for scalarization profitability");
7387     return !Scalarized->second.count(I) &&
7388            llvm::all_of(I->users(), [&](User *U) {
7389              auto *UI = cast<Instruction>(U);
7390              return !Scalarized->second.count(UI);
7391            });
7392   };
7393   (void) hasSingleCopyAfterVectorization;
7394 
7395   if (isScalarAfterVectorization(I, VF)) {
7396     // With the exception of GEPs and PHIs, after scalarization there should
7397     // only be one copy of the instruction generated in the loop. This is
7398     // because the VF is either 1, or any instructions that need scalarizing
7399     // have already been dealt with by the the time we get here. As a result,
7400     // it means we don't have to multiply the instruction cost by VF.
7401     assert(I->getOpcode() == Instruction::GetElementPtr ||
7402            I->getOpcode() == Instruction::PHI ||
7403            (I->getOpcode() == Instruction::BitCast &&
7404             I->getType()->isPointerTy()) ||
7405            hasSingleCopyAfterVectorization(I, VF));
7406     VectorTy = RetTy;
7407   } else
7408     VectorTy = ToVectorTy(RetTy, VF);
7409 
7410   // TODO: We need to estimate the cost of intrinsic calls.
7411   switch (I->getOpcode()) {
7412   case Instruction::GetElementPtr:
7413     // We mark this instruction as zero-cost because the cost of GEPs in
7414     // vectorized code depends on whether the corresponding memory instruction
7415     // is scalarized or not. Therefore, we handle GEPs with the memory
7416     // instruction cost.
7417     return 0;
7418   case Instruction::Br: {
7419     // In cases of scalarized and predicated instructions, there will be VF
7420     // predicated blocks in the vectorized loop. Each branch around these
7421     // blocks requires also an extract of its vector compare i1 element.
7422     bool ScalarPredicatedBB = false;
7423     BranchInst *BI = cast<BranchInst>(I);
7424     if (VF.isVector() && BI->isConditional() &&
7425         (PredicatedBBsAfterVectorization.count(BI->getSuccessor(0)) ||
7426          PredicatedBBsAfterVectorization.count(BI->getSuccessor(1))))
7427       ScalarPredicatedBB = true;
7428 
7429     if (ScalarPredicatedBB) {
7430       // Not possible to scalarize scalable vector with predicated instructions.
7431       if (VF.isScalable())
7432         return InstructionCost::getInvalid();
7433       // Return cost for branches around scalarized and predicated blocks.
7434       auto *Vec_i1Ty =
7435           VectorType::get(IntegerType::getInt1Ty(RetTy->getContext()), VF);
7436       return (
7437           TTI.getScalarizationOverhead(
7438               Vec_i1Ty, APInt::getAllOnes(VF.getFixedValue()), false, true) +
7439           (TTI.getCFInstrCost(Instruction::Br, CostKind) * VF.getFixedValue()));
7440     } else if (I->getParent() == TheLoop->getLoopLatch() || VF.isScalar())
7441       // The back-edge branch will remain, as will all scalar branches.
7442       return TTI.getCFInstrCost(Instruction::Br, CostKind);
7443     else
7444       // This branch will be eliminated by if-conversion.
7445       return 0;
7446     // Note: We currently assume zero cost for an unconditional branch inside
7447     // a predicated block since it will become a fall-through, although we
7448     // may decide in the future to call TTI for all branches.
7449   }
7450   case Instruction::PHI: {
7451     auto *Phi = cast<PHINode>(I);
7452 
7453     // First-order recurrences are replaced by vector shuffles inside the loop.
7454     // NOTE: Don't use ToVectorTy as SK_ExtractSubvector expects a vector type.
7455     if (VF.isVector() && Legal->isFirstOrderRecurrence(Phi))
7456       return TTI.getShuffleCost(
7457           TargetTransformInfo::SK_ExtractSubvector, cast<VectorType>(VectorTy),
7458           None, VF.getKnownMinValue() - 1, FixedVectorType::get(RetTy, 1));
7459 
7460     // Phi nodes in non-header blocks (not inductions, reductions, etc.) are
7461     // converted into select instructions. We require N - 1 selects per phi
7462     // node, where N is the number of incoming values.
7463     if (VF.isVector() && Phi->getParent() != TheLoop->getHeader())
7464       return (Phi->getNumIncomingValues() - 1) *
7465              TTI.getCmpSelInstrCost(
7466                  Instruction::Select, ToVectorTy(Phi->getType(), VF),
7467                  ToVectorTy(Type::getInt1Ty(Phi->getContext()), VF),
7468                  CmpInst::BAD_ICMP_PREDICATE, CostKind);
7469 
7470     return TTI.getCFInstrCost(Instruction::PHI, CostKind);
7471   }
7472   case Instruction::UDiv:
7473   case Instruction::SDiv:
7474   case Instruction::URem:
7475   case Instruction::SRem:
7476     // If we have a predicated instruction, it may not be executed for each
7477     // vector lane. Get the scalarization cost and scale this amount by the
7478     // probability of executing the predicated block. If the instruction is not
7479     // predicated, we fall through to the next case.
7480     if (VF.isVector() && isScalarWithPredication(I)) {
7481       InstructionCost Cost = 0;
7482 
7483       // These instructions have a non-void type, so account for the phi nodes
7484       // that we will create. This cost is likely to be zero. The phi node
7485       // cost, if any, should be scaled by the block probability because it
7486       // models a copy at the end of each predicated block.
7487       Cost += VF.getKnownMinValue() *
7488               TTI.getCFInstrCost(Instruction::PHI, CostKind);
7489 
7490       // The cost of the non-predicated instruction.
7491       Cost += VF.getKnownMinValue() *
7492               TTI.getArithmeticInstrCost(I->getOpcode(), RetTy, CostKind);
7493 
7494       // The cost of insertelement and extractelement instructions needed for
7495       // scalarization.
7496       Cost += getScalarizationOverhead(I, VF);
7497 
7498       // Scale the cost by the probability of executing the predicated blocks.
7499       // This assumes the predicated block for each vector lane is equally
7500       // likely.
7501       return Cost / getReciprocalPredBlockProb();
7502     }
7503     LLVM_FALLTHROUGH;
7504   case Instruction::Add:
7505   case Instruction::FAdd:
7506   case Instruction::Sub:
7507   case Instruction::FSub:
7508   case Instruction::Mul:
7509   case Instruction::FMul:
7510   case Instruction::FDiv:
7511   case Instruction::FRem:
7512   case Instruction::Shl:
7513   case Instruction::LShr:
7514   case Instruction::AShr:
7515   case Instruction::And:
7516   case Instruction::Or:
7517   case Instruction::Xor: {
7518     // Since we will replace the stride by 1 the multiplication should go away.
7519     if (I->getOpcode() == Instruction::Mul && isStrideMul(I, Legal))
7520       return 0;
7521 
7522     // Detect reduction patterns
7523     if (auto RedCost = getReductionPatternCost(I, VF, VectorTy, CostKind))
7524       return *RedCost;
7525 
7526     // Certain instructions can be cheaper to vectorize if they have a constant
7527     // second vector operand. One example of this are shifts on x86.
7528     Value *Op2 = I->getOperand(1);
7529     TargetTransformInfo::OperandValueProperties Op2VP;
7530     TargetTransformInfo::OperandValueKind Op2VK =
7531         TTI.getOperandInfo(Op2, Op2VP);
7532     if (Op2VK == TargetTransformInfo::OK_AnyValue && Legal->isUniform(Op2))
7533       Op2VK = TargetTransformInfo::OK_UniformValue;
7534 
7535     SmallVector<const Value *, 4> Operands(I->operand_values());
7536     return TTI.getArithmeticInstrCost(
7537         I->getOpcode(), VectorTy, CostKind, TargetTransformInfo::OK_AnyValue,
7538         Op2VK, TargetTransformInfo::OP_None, Op2VP, Operands, I);
7539   }
7540   case Instruction::FNeg: {
7541     return TTI.getArithmeticInstrCost(
7542         I->getOpcode(), VectorTy, CostKind, TargetTransformInfo::OK_AnyValue,
7543         TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None,
7544         TargetTransformInfo::OP_None, I->getOperand(0), I);
7545   }
7546   case Instruction::Select: {
7547     SelectInst *SI = cast<SelectInst>(I);
7548     const SCEV *CondSCEV = SE->getSCEV(SI->getCondition());
7549     bool ScalarCond = (SE->isLoopInvariant(CondSCEV, TheLoop));
7550 
7551     const Value *Op0, *Op1;
7552     using namespace llvm::PatternMatch;
7553     if (!ScalarCond && (match(I, m_LogicalAnd(m_Value(Op0), m_Value(Op1))) ||
7554                         match(I, m_LogicalOr(m_Value(Op0), m_Value(Op1))))) {
7555       // select x, y, false --> x & y
7556       // select x, true, y --> x | y
7557       TTI::OperandValueProperties Op1VP = TTI::OP_None;
7558       TTI::OperandValueProperties Op2VP = TTI::OP_None;
7559       TTI::OperandValueKind Op1VK = TTI::getOperandInfo(Op0, Op1VP);
7560       TTI::OperandValueKind Op2VK = TTI::getOperandInfo(Op1, Op2VP);
7561       assert(Op0->getType()->getScalarSizeInBits() == 1 &&
7562               Op1->getType()->getScalarSizeInBits() == 1);
7563 
7564       SmallVector<const Value *, 2> Operands{Op0, Op1};
7565       return TTI.getArithmeticInstrCost(
7566           match(I, m_LogicalOr()) ? Instruction::Or : Instruction::And, VectorTy,
7567           CostKind, Op1VK, Op2VK, Op1VP, Op2VP, Operands, I);
7568     }
7569 
7570     Type *CondTy = SI->getCondition()->getType();
7571     if (!ScalarCond)
7572       CondTy = VectorType::get(CondTy, VF);
7573     return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, CondTy,
7574                                   CmpInst::BAD_ICMP_PREDICATE, CostKind, I);
7575   }
7576   case Instruction::ICmp:
7577   case Instruction::FCmp: {
7578     Type *ValTy = I->getOperand(0)->getType();
7579     Instruction *Op0AsInstruction = dyn_cast<Instruction>(I->getOperand(0));
7580     if (canTruncateToMinimalBitwidth(Op0AsInstruction, VF))
7581       ValTy = IntegerType::get(ValTy->getContext(), MinBWs[Op0AsInstruction]);
7582     VectorTy = ToVectorTy(ValTy, VF);
7583     return TTI.getCmpSelInstrCost(I->getOpcode(), VectorTy, nullptr,
7584                                   CmpInst::BAD_ICMP_PREDICATE, CostKind, I);
7585   }
7586   case Instruction::Store:
7587   case Instruction::Load: {
7588     ElementCount Width = VF;
7589     if (Width.isVector()) {
7590       InstWidening Decision = getWideningDecision(I, Width);
7591       assert(Decision != CM_Unknown &&
7592              "CM decision should be taken at this point");
7593       if (Decision == CM_Scalarize)
7594         Width = ElementCount::getFixed(1);
7595     }
7596     VectorTy = ToVectorTy(getLoadStoreType(I), Width);
7597     return getMemoryInstructionCost(I, VF);
7598   }
7599   case Instruction::BitCast:
7600     if (I->getType()->isPointerTy())
7601       return 0;
7602     LLVM_FALLTHROUGH;
7603   case Instruction::ZExt:
7604   case Instruction::SExt:
7605   case Instruction::FPToUI:
7606   case Instruction::FPToSI:
7607   case Instruction::FPExt:
7608   case Instruction::PtrToInt:
7609   case Instruction::IntToPtr:
7610   case Instruction::SIToFP:
7611   case Instruction::UIToFP:
7612   case Instruction::Trunc:
7613   case Instruction::FPTrunc: {
7614     // Computes the CastContextHint from a Load/Store instruction.
7615     auto ComputeCCH = [&](Instruction *I) -> TTI::CastContextHint {
7616       assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&
7617              "Expected a load or a store!");
7618 
7619       if (VF.isScalar() || !TheLoop->contains(I))
7620         return TTI::CastContextHint::Normal;
7621 
7622       switch (getWideningDecision(I, VF)) {
7623       case LoopVectorizationCostModel::CM_GatherScatter:
7624         return TTI::CastContextHint::GatherScatter;
7625       case LoopVectorizationCostModel::CM_Interleave:
7626         return TTI::CastContextHint::Interleave;
7627       case LoopVectorizationCostModel::CM_Scalarize:
7628       case LoopVectorizationCostModel::CM_Widen:
7629         return Legal->isMaskRequired(I) ? TTI::CastContextHint::Masked
7630                                         : TTI::CastContextHint::Normal;
7631       case LoopVectorizationCostModel::CM_Widen_Reverse:
7632         return TTI::CastContextHint::Reversed;
7633       case LoopVectorizationCostModel::CM_Unknown:
7634         llvm_unreachable("Instr did not go through cost modelling?");
7635       }
7636 
7637       llvm_unreachable("Unhandled case!");
7638     };
7639 
7640     unsigned Opcode = I->getOpcode();
7641     TTI::CastContextHint CCH = TTI::CastContextHint::None;
7642     // For Trunc, the context is the only user, which must be a StoreInst.
7643     if (Opcode == Instruction::Trunc || Opcode == Instruction::FPTrunc) {
7644       if (I->hasOneUse())
7645         if (StoreInst *Store = dyn_cast<StoreInst>(*I->user_begin()))
7646           CCH = ComputeCCH(Store);
7647     }
7648     // For Z/Sext, the context is the operand, which must be a LoadInst.
7649     else if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt ||
7650              Opcode == Instruction::FPExt) {
7651       if (LoadInst *Load = dyn_cast<LoadInst>(I->getOperand(0)))
7652         CCH = ComputeCCH(Load);
7653     }
7654 
7655     // We optimize the truncation of induction variables having constant
7656     // integer steps. The cost of these truncations is the same as the scalar
7657     // operation.
7658     if (isOptimizableIVTruncate(I, VF)) {
7659       auto *Trunc = cast<TruncInst>(I);
7660       return TTI.getCastInstrCost(Instruction::Trunc, Trunc->getDestTy(),
7661                                   Trunc->getSrcTy(), CCH, CostKind, Trunc);
7662     }
7663 
7664     // Detect reduction patterns
7665     if (auto RedCost = getReductionPatternCost(I, VF, VectorTy, CostKind))
7666       return *RedCost;
7667 
7668     Type *SrcScalarTy = I->getOperand(0)->getType();
7669     Type *SrcVecTy =
7670         VectorTy->isVectorTy() ? ToVectorTy(SrcScalarTy, VF) : SrcScalarTy;
7671     if (canTruncateToMinimalBitwidth(I, VF)) {
7672       // This cast is going to be shrunk. This may remove the cast or it might
7673       // turn it into slightly different cast. For example, if MinBW == 16,
7674       // "zext i8 %1 to i32" becomes "zext i8 %1 to i16".
7675       //
7676       // Calculate the modified src and dest types.
7677       Type *MinVecTy = VectorTy;
7678       if (Opcode == Instruction::Trunc) {
7679         SrcVecTy = smallestIntegerVectorType(SrcVecTy, MinVecTy);
7680         VectorTy =
7681             largestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy);
7682       } else if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt) {
7683         SrcVecTy = largestIntegerVectorType(SrcVecTy, MinVecTy);
7684         VectorTy =
7685             smallestIntegerVectorType(ToVectorTy(I->getType(), VF), MinVecTy);
7686       }
7687     }
7688 
7689     return TTI.getCastInstrCost(Opcode, VectorTy, SrcVecTy, CCH, CostKind, I);
7690   }
7691   case Instruction::Call: {
7692     if (RecurrenceDescriptor::isFMulAddIntrinsic(I))
7693       if (auto RedCost = getReductionPatternCost(I, VF, VectorTy, CostKind))
7694         return *RedCost;
7695     bool NeedToScalarize;
7696     CallInst *CI = cast<CallInst>(I);
7697     InstructionCost CallCost = getVectorCallCost(CI, VF, NeedToScalarize);
7698     if (getVectorIntrinsicIDForCall(CI, TLI)) {
7699       InstructionCost IntrinsicCost = getVectorIntrinsicCost(CI, VF);
7700       return std::min(CallCost, IntrinsicCost);
7701     }
7702     return CallCost;
7703   }
7704   case Instruction::ExtractValue:
7705     return TTI.getInstructionCost(I, TTI::TCK_RecipThroughput);
7706   case Instruction::Alloca:
7707     // We cannot easily widen alloca to a scalable alloca, as
7708     // the result would need to be a vector of pointers.
7709     if (VF.isScalable())
7710       return InstructionCost::getInvalid();
7711     LLVM_FALLTHROUGH;
7712   default:
7713     // This opcode is unknown. Assume that it is the same as 'mul'.
7714     return TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind);
7715   } // end of switch.
7716 }
7717 
7718 char LoopVectorize::ID = 0;
7719 
7720 static const char lv_name[] = "Loop Vectorization";
7721 
7722 INITIALIZE_PASS_BEGIN(LoopVectorize, LV_NAME, lv_name, false, false)
7723 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
7724 INITIALIZE_PASS_DEPENDENCY(BasicAAWrapperPass)
7725 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
7726 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass)
7727 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
7728 INITIALIZE_PASS_DEPENDENCY(BlockFrequencyInfoWrapperPass)
7729 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
7730 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)
7731 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
7732 INITIALIZE_PASS_DEPENDENCY(LoopAccessLegacyAnalysis)
7733 INITIALIZE_PASS_DEPENDENCY(DemandedBitsWrapperPass)
7734 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass)
7735 INITIALIZE_PASS_DEPENDENCY(ProfileSummaryInfoWrapperPass)
7736 INITIALIZE_PASS_DEPENDENCY(InjectTLIMappingsLegacy)
7737 INITIALIZE_PASS_END(LoopVectorize, LV_NAME, lv_name, false, false)
7738 
7739 namespace llvm {
7740 
7741 Pass *createLoopVectorizePass() { return new LoopVectorize(); }
7742 
7743 Pass *createLoopVectorizePass(bool InterleaveOnlyWhenForced,
7744                               bool VectorizeOnlyWhenForced) {
7745   return new LoopVectorize(InterleaveOnlyWhenForced, VectorizeOnlyWhenForced);
7746 }
7747 
7748 } // end namespace llvm
7749 
7750 bool LoopVectorizationCostModel::isConsecutiveLoadOrStore(Instruction *Inst) {
7751   // Check if the pointer operand of a load or store instruction is
7752   // consecutive.
7753   if (auto *Ptr = getLoadStorePointerOperand(Inst))
7754     return Legal->isConsecutivePtr(getLoadStoreType(Inst), Ptr);
7755   return false;
7756 }
7757 
7758 void LoopVectorizationCostModel::collectValuesToIgnore() {
7759   // Ignore ephemeral values.
7760   CodeMetrics::collectEphemeralValues(TheLoop, AC, ValuesToIgnore);
7761 
7762   // Ignore type-promoting instructions we identified during reduction
7763   // detection.
7764   for (auto &Reduction : Legal->getReductionVars()) {
7765     RecurrenceDescriptor &RedDes = Reduction.second;
7766     const SmallPtrSetImpl<Instruction *> &Casts = RedDes.getCastInsts();
7767     VecValuesToIgnore.insert(Casts.begin(), Casts.end());
7768   }
7769   // Ignore type-casting instructions we identified during induction
7770   // detection.
7771   for (auto &Induction : Legal->getInductionVars()) {
7772     InductionDescriptor &IndDes = Induction.second;
7773     const SmallVectorImpl<Instruction *> &Casts = IndDes.getCastInsts();
7774     VecValuesToIgnore.insert(Casts.begin(), Casts.end());
7775   }
7776 }
7777 
7778 void LoopVectorizationCostModel::collectInLoopReductions() {
7779   for (auto &Reduction : Legal->getReductionVars()) {
7780     PHINode *Phi = Reduction.first;
7781     RecurrenceDescriptor &RdxDesc = Reduction.second;
7782 
7783     // We don't collect reductions that are type promoted (yet).
7784     if (RdxDesc.getRecurrenceType() != Phi->getType())
7785       continue;
7786 
7787     // If the target would prefer this reduction to happen "in-loop", then we
7788     // want to record it as such.
7789     unsigned Opcode = RdxDesc.getOpcode();
7790     if (!PreferInLoopReductions && !useOrderedReductions(RdxDesc) &&
7791         !TTI.preferInLoopReduction(Opcode, Phi->getType(),
7792                                    TargetTransformInfo::ReductionFlags()))
7793       continue;
7794 
7795     // Check that we can correctly put the reductions into the loop, by
7796     // finding the chain of operations that leads from the phi to the loop
7797     // exit value.
7798     SmallVector<Instruction *, 4> ReductionOperations =
7799         RdxDesc.getReductionOpChain(Phi, TheLoop);
7800     bool InLoop = !ReductionOperations.empty();
7801     if (InLoop) {
7802       InLoopReductionChains[Phi] = ReductionOperations;
7803       // Add the elements to InLoopReductionImmediateChains for cost modelling.
7804       Instruction *LastChain = Phi;
7805       for (auto *I : ReductionOperations) {
7806         InLoopReductionImmediateChains[I] = LastChain;
7807         LastChain = I;
7808       }
7809     }
7810     LLVM_DEBUG(dbgs() << "LV: Using " << (InLoop ? "inloop" : "out of loop")
7811                       << " reduction for phi: " << *Phi << "\n");
7812   }
7813 }
7814 
7815 // TODO: we could return a pair of values that specify the max VF and
7816 // min VF, to be used in `buildVPlans(MinVF, MaxVF)` instead of
7817 // `buildVPlans(VF, VF)`. We cannot do it because VPLAN at the moment
7818 // doesn't have a cost model that can choose which plan to execute if
7819 // more than one is generated.
7820 static unsigned determineVPlanVF(const unsigned WidestVectorRegBits,
7821                                  LoopVectorizationCostModel &CM) {
7822   unsigned WidestType;
7823   std::tie(std::ignore, WidestType) = CM.getSmallestAndWidestTypes();
7824   return WidestVectorRegBits / WidestType;
7825 }
7826 
7827 VectorizationFactor
7828 LoopVectorizationPlanner::planInVPlanNativePath(ElementCount UserVF) {
7829   assert(!UserVF.isScalable() && "scalable vectors not yet supported");
7830   ElementCount VF = UserVF;
7831   // Outer loop handling: They may require CFG and instruction level
7832   // transformations before even evaluating whether vectorization is profitable.
7833   // Since we cannot modify the incoming IR, we need to build VPlan upfront in
7834   // the vectorization pipeline.
7835   if (!OrigLoop->isInnermost()) {
7836     // If the user doesn't provide a vectorization factor, determine a
7837     // reasonable one.
7838     if (UserVF.isZero()) {
7839       VF = ElementCount::getFixed(determineVPlanVF(
7840           TTI->getRegisterBitWidth(TargetTransformInfo::RGK_FixedWidthVector)
7841               .getFixedSize(),
7842           CM));
7843       LLVM_DEBUG(dbgs() << "LV: VPlan computed VF " << VF << ".\n");
7844 
7845       // Make sure we have a VF > 1 for stress testing.
7846       if (VPlanBuildStressTest && (VF.isScalar() || VF.isZero())) {
7847         LLVM_DEBUG(dbgs() << "LV: VPlan stress testing: "
7848                           << "overriding computed VF.\n");
7849         VF = ElementCount::getFixed(4);
7850       }
7851     }
7852     assert(EnableVPlanNativePath && "VPlan-native path is not enabled.");
7853     assert(isPowerOf2_32(VF.getKnownMinValue()) &&
7854            "VF needs to be a power of two");
7855     LLVM_DEBUG(dbgs() << "LV: Using " << (!UserVF.isZero() ? "user " : "")
7856                       << "VF " << VF << " to build VPlans.\n");
7857     buildVPlans(VF, VF);
7858 
7859     // For VPlan build stress testing, we bail out after VPlan construction.
7860     if (VPlanBuildStressTest)
7861       return VectorizationFactor::Disabled();
7862 
7863     return {VF, 0 /*Cost*/};
7864   }
7865 
7866   LLVM_DEBUG(
7867       dbgs() << "LV: Not vectorizing. Inner loops aren't supported in the "
7868                 "VPlan-native path.\n");
7869   return VectorizationFactor::Disabled();
7870 }
7871 
7872 Optional<VectorizationFactor>
7873 LoopVectorizationPlanner::plan(ElementCount UserVF, unsigned UserIC) {
7874   assert(OrigLoop->isInnermost() && "Inner loop expected.");
7875   FixedScalableVFPair MaxFactors = CM.computeMaxVF(UserVF, UserIC);
7876   if (!MaxFactors) // Cases that should not to be vectorized nor interleaved.
7877     return None;
7878 
7879   // Invalidate interleave groups if all blocks of loop will be predicated.
7880   if (CM.blockNeedsPredicationForAnyReason(OrigLoop->getHeader()) &&
7881       !useMaskedInterleavedAccesses(*TTI)) {
7882     LLVM_DEBUG(
7883         dbgs()
7884         << "LV: Invalidate all interleaved groups due to fold-tail by masking "
7885            "which requires masked-interleaved support.\n");
7886     if (CM.InterleaveInfo.invalidateGroups())
7887       // Invalidating interleave groups also requires invalidating all decisions
7888       // based on them, which includes widening decisions and uniform and scalar
7889       // values.
7890       CM.invalidateCostModelingDecisions();
7891   }
7892 
7893   ElementCount MaxUserVF =
7894       UserVF.isScalable() ? MaxFactors.ScalableVF : MaxFactors.FixedVF;
7895   bool UserVFIsLegal = ElementCount::isKnownLE(UserVF, MaxUserVF);
7896   if (!UserVF.isZero() && UserVFIsLegal) {
7897     assert(isPowerOf2_32(UserVF.getKnownMinValue()) &&
7898            "VF needs to be a power of two");
7899     // Collect the instructions (and their associated costs) that will be more
7900     // profitable to scalarize.
7901     if (CM.selectUserVectorizationFactor(UserVF)) {
7902       LLVM_DEBUG(dbgs() << "LV: Using user VF " << UserVF << ".\n");
7903       CM.collectInLoopReductions();
7904       buildVPlansWithVPRecipes(UserVF, UserVF);
7905       LLVM_DEBUG(printPlans(dbgs()));
7906       return {{UserVF, 0}};
7907     } else
7908       reportVectorizationInfo("UserVF ignored because of invalid costs.",
7909                               "InvalidCost", ORE, OrigLoop);
7910   }
7911 
7912   // Populate the set of Vectorization Factor Candidates.
7913   ElementCountSet VFCandidates;
7914   for (auto VF = ElementCount::getFixed(1);
7915        ElementCount::isKnownLE(VF, MaxFactors.FixedVF); VF *= 2)
7916     VFCandidates.insert(VF);
7917   for (auto VF = ElementCount::getScalable(1);
7918        ElementCount::isKnownLE(VF, MaxFactors.ScalableVF); VF *= 2)
7919     VFCandidates.insert(VF);
7920 
7921   for (const auto &VF : VFCandidates) {
7922     // Collect Uniform and Scalar instructions after vectorization with VF.
7923     CM.collectUniformsAndScalars(VF);
7924 
7925     // Collect the instructions (and their associated costs) that will be more
7926     // profitable to scalarize.
7927     if (VF.isVector())
7928       CM.collectInstsToScalarize(VF);
7929   }
7930 
7931   CM.collectInLoopReductions();
7932   buildVPlansWithVPRecipes(ElementCount::getFixed(1), MaxFactors.FixedVF);
7933   buildVPlansWithVPRecipes(ElementCount::getScalable(1), MaxFactors.ScalableVF);
7934 
7935   LLVM_DEBUG(printPlans(dbgs()));
7936   if (!MaxFactors.hasVector())
7937     return VectorizationFactor::Disabled();
7938 
7939   // Select the optimal vectorization factor.
7940   auto SelectedVF = CM.selectVectorizationFactor(VFCandidates);
7941 
7942   // Check if it is profitable to vectorize with runtime checks.
7943   unsigned NumRuntimePointerChecks = Requirements.getNumRuntimePointerChecks();
7944   if (SelectedVF.Width.getKnownMinValue() > 1 && NumRuntimePointerChecks) {
7945     bool PragmaThresholdReached =
7946         NumRuntimePointerChecks > PragmaVectorizeMemoryCheckThreshold;
7947     bool ThresholdReached =
7948         NumRuntimePointerChecks > VectorizerParams::RuntimeMemoryCheckThreshold;
7949     if ((ThresholdReached && !Hints.allowReordering()) ||
7950         PragmaThresholdReached) {
7951       ORE->emit([&]() {
7952         return OptimizationRemarkAnalysisAliasing(
7953                    DEBUG_TYPE, "CantReorderMemOps", OrigLoop->getStartLoc(),
7954                    OrigLoop->getHeader())
7955                << "loop not vectorized: cannot prove it is safe to reorder "
7956                   "memory operations";
7957       });
7958       LLVM_DEBUG(dbgs() << "LV: Too many memory checks needed.\n");
7959       Hints.emitRemarkWithHints();
7960       return VectorizationFactor::Disabled();
7961     }
7962   }
7963   return SelectedVF;
7964 }
7965 
7966 VPlan &LoopVectorizationPlanner::getBestPlanFor(ElementCount VF) const {
7967   assert(count_if(VPlans,
7968                   [VF](const VPlanPtr &Plan) { return Plan->hasVF(VF); }) ==
7969              1 &&
7970          "Best VF has not a single VPlan.");
7971 
7972   for (const VPlanPtr &Plan : VPlans) {
7973     if (Plan->hasVF(VF))
7974       return *Plan.get();
7975   }
7976   llvm_unreachable("No plan found!");
7977 }
7978 
7979 void LoopVectorizationPlanner::executePlan(ElementCount BestVF, unsigned BestUF,
7980                                            VPlan &BestVPlan,
7981                                            InnerLoopVectorizer &ILV,
7982                                            DominatorTree *DT) {
7983   LLVM_DEBUG(dbgs() << "Executing best plan with VF=" << BestVF << ", UF=" << BestUF
7984                     << '\n');
7985 
7986   // Perform the actual loop transformation.
7987 
7988   // 1. Create a new empty loop. Unlink the old loop and connect the new one.
7989   VPTransformState State{BestVF, BestUF, LI, DT, ILV.Builder, &ILV, &BestVPlan};
7990   State.CFG.PrevBB = ILV.createVectorizedLoopSkeleton();
7991   State.TripCount = ILV.getOrCreateTripCount(nullptr);
7992   State.CanonicalIV = ILV.Induction;
7993   ILV.collectPoisonGeneratingRecipes(State);
7994 
7995   ILV.printDebugTracesAtStart();
7996 
7997   //===------------------------------------------------===//
7998   //
7999   // Notice: any optimization or new instruction that go
8000   // into the code below should also be implemented in
8001   // the cost-model.
8002   //
8003   //===------------------------------------------------===//
8004 
8005   // 2. Copy and widen instructions from the old loop into the new loop.
8006   BestVPlan.execute(&State);
8007 
8008   // 3. Fix the vectorized code: take care of header phi's, live-outs,
8009   //    predication, updating analyses.
8010   ILV.fixVectorizedLoop(State);
8011 
8012   ILV.printDebugTracesAtEnd();
8013 }
8014 
8015 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
8016 void LoopVectorizationPlanner::printPlans(raw_ostream &O) {
8017   for (const auto &Plan : VPlans)
8018     if (PrintVPlansInDotFormat)
8019       Plan->printDOT(O);
8020     else
8021       Plan->print(O);
8022 }
8023 #endif
8024 
8025 void LoopVectorizationPlanner::collectTriviallyDeadInstructions(
8026     SmallPtrSetImpl<Instruction *> &DeadInstructions) {
8027 
8028   // We create new control-flow for the vectorized loop, so the original exit
8029   // conditions will be dead after vectorization if it's only used by the
8030   // terminator
8031   SmallVector<BasicBlock*> ExitingBlocks;
8032   OrigLoop->getExitingBlocks(ExitingBlocks);
8033   for (auto *BB : ExitingBlocks) {
8034     auto *Cmp = dyn_cast<Instruction>(BB->getTerminator()->getOperand(0));
8035     if (!Cmp || !Cmp->hasOneUse())
8036       continue;
8037 
8038     // TODO: we should introduce a getUniqueExitingBlocks on Loop
8039     if (!DeadInstructions.insert(Cmp).second)
8040       continue;
8041 
8042     // The operands of the icmp is often a dead trunc, used by IndUpdate.
8043     // TODO: can recurse through operands in general
8044     for (Value *Op : Cmp->operands()) {
8045       if (isa<TruncInst>(Op) && Op->hasOneUse())
8046           DeadInstructions.insert(cast<Instruction>(Op));
8047     }
8048   }
8049 
8050   // We create new "steps" for induction variable updates to which the original
8051   // induction variables map. An original update instruction will be dead if
8052   // all its users except the induction variable are dead.
8053   auto *Latch = OrigLoop->getLoopLatch();
8054   for (auto &Induction : Legal->getInductionVars()) {
8055     PHINode *Ind = Induction.first;
8056     auto *IndUpdate = cast<Instruction>(Ind->getIncomingValueForBlock(Latch));
8057 
8058     // If the tail is to be folded by masking, the primary induction variable,
8059     // if exists, isn't dead: it will be used for masking. Don't kill it.
8060     if (CM.foldTailByMasking() && IndUpdate == Legal->getPrimaryInduction())
8061       continue;
8062 
8063     if (llvm::all_of(IndUpdate->users(), [&](User *U) -> bool {
8064           return U == Ind || DeadInstructions.count(cast<Instruction>(U));
8065         }))
8066       DeadInstructions.insert(IndUpdate);
8067 
8068     // We record as "Dead" also the type-casting instructions we had identified
8069     // during induction analysis. We don't need any handling for them in the
8070     // vectorized loop because we have proven that, under a proper runtime
8071     // test guarding the vectorized loop, the value of the phi, and the casted
8072     // value of the phi, are the same. The last instruction in this casting chain
8073     // will get its scalar/vector/widened def from the scalar/vector/widened def
8074     // of the respective phi node. Any other casts in the induction def-use chain
8075     // have no other uses outside the phi update chain, and will be ignored.
8076     InductionDescriptor &IndDes = Induction.second;
8077     const SmallVectorImpl<Instruction *> &Casts = IndDes.getCastInsts();
8078     DeadInstructions.insert(Casts.begin(), Casts.end());
8079   }
8080 }
8081 
8082 Value *InnerLoopUnroller::reverseVector(Value *Vec) { return Vec; }
8083 
8084 Value *InnerLoopUnroller::getBroadcastInstrs(Value *V) { return V; }
8085 
8086 Value *InnerLoopUnroller::getStepVector(Value *Val, Value *StartIdx,
8087                                         Value *Step,
8088                                         Instruction::BinaryOps BinOp) {
8089   // When unrolling and the VF is 1, we only need to add a simple scalar.
8090   Type *Ty = Val->getType();
8091   assert(!Ty->isVectorTy() && "Val must be a scalar");
8092 
8093   if (Ty->isFloatingPointTy()) {
8094     // Floating-point operations inherit FMF via the builder's flags.
8095     Value *MulOp = Builder.CreateFMul(StartIdx, Step);
8096     return Builder.CreateBinOp(BinOp, Val, MulOp);
8097   }
8098   return Builder.CreateAdd(Val, Builder.CreateMul(StartIdx, Step), "induction");
8099 }
8100 
8101 static void AddRuntimeUnrollDisableMetaData(Loop *L) {
8102   SmallVector<Metadata *, 4> MDs;
8103   // Reserve first location for self reference to the LoopID metadata node.
8104   MDs.push_back(nullptr);
8105   bool IsUnrollMetadata = false;
8106   MDNode *LoopID = L->getLoopID();
8107   if (LoopID) {
8108     // First find existing loop unrolling disable metadata.
8109     for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) {
8110       auto *MD = dyn_cast<MDNode>(LoopID->getOperand(i));
8111       if (MD) {
8112         const auto *S = dyn_cast<MDString>(MD->getOperand(0));
8113         IsUnrollMetadata =
8114             S && S->getString().startswith("llvm.loop.unroll.disable");
8115       }
8116       MDs.push_back(LoopID->getOperand(i));
8117     }
8118   }
8119 
8120   if (!IsUnrollMetadata) {
8121     // Add runtime unroll disable metadata.
8122     LLVMContext &Context = L->getHeader()->getContext();
8123     SmallVector<Metadata *, 1> DisableOperands;
8124     DisableOperands.push_back(
8125         MDString::get(Context, "llvm.loop.unroll.runtime.disable"));
8126     MDNode *DisableNode = MDNode::get(Context, DisableOperands);
8127     MDs.push_back(DisableNode);
8128     MDNode *NewLoopID = MDNode::get(Context, MDs);
8129     // Set operand 0 to refer to the loop id itself.
8130     NewLoopID->replaceOperandWith(0, NewLoopID);
8131     L->setLoopID(NewLoopID);
8132   }
8133 }
8134 
8135 //===--------------------------------------------------------------------===//
8136 // EpilogueVectorizerMainLoop
8137 //===--------------------------------------------------------------------===//
8138 
8139 /// This function is partially responsible for generating the control flow
8140 /// depicted in https://llvm.org/docs/Vectorizers.html#epilogue-vectorization.
8141 BasicBlock *EpilogueVectorizerMainLoop::createEpilogueVectorizedLoopSkeleton() {
8142   MDNode *OrigLoopID = OrigLoop->getLoopID();
8143   Loop *Lp = createVectorLoopSkeleton("");
8144 
8145   // Generate the code to check the minimum iteration count of the vector
8146   // epilogue (see below).
8147   EPI.EpilogueIterationCountCheck =
8148       emitMinimumIterationCountCheck(Lp, LoopScalarPreHeader, true);
8149   EPI.EpilogueIterationCountCheck->setName("iter.check");
8150 
8151   // Generate the code to check any assumptions that we've made for SCEV
8152   // expressions.
8153   EPI.SCEVSafetyCheck = emitSCEVChecks(Lp, LoopScalarPreHeader);
8154 
8155   // Generate the code that checks at runtime if arrays overlap. We put the
8156   // checks into a separate block to make the more common case of few elements
8157   // faster.
8158   EPI.MemSafetyCheck = emitMemRuntimeChecks(Lp, LoopScalarPreHeader);
8159 
8160   // Generate the iteration count check for the main loop, *after* the check
8161   // for the epilogue loop, so that the path-length is shorter for the case
8162   // that goes directly through the vector epilogue. The longer-path length for
8163   // the main loop is compensated for, by the gain from vectorizing the larger
8164   // trip count. Note: the branch will get updated later on when we vectorize
8165   // the epilogue.
8166   EPI.MainLoopIterationCountCheck =
8167       emitMinimumIterationCountCheck(Lp, LoopScalarPreHeader, false);
8168 
8169   // Generate the induction variable.
8170   OldInduction = Legal->getPrimaryInduction();
8171   Type *IdxTy = Legal->getWidestInductionType();
8172   Value *StartIdx = ConstantInt::get(IdxTy, 0);
8173 
8174   IRBuilder<> B(&*Lp->getLoopPreheader()->getFirstInsertionPt());
8175   Value *Step = getRuntimeVF(B, IdxTy, VF * UF);
8176   Value *CountRoundDown = getOrCreateVectorTripCount(Lp);
8177   EPI.VectorTripCount = CountRoundDown;
8178   Induction =
8179       createInductionVariable(Lp, StartIdx, CountRoundDown, Step,
8180                               getDebugLocFromInstOrOperands(OldInduction));
8181 
8182   // Skip induction resume value creation here because they will be created in
8183   // the second pass. If we created them here, they wouldn't be used anyway,
8184   // because the vplan in the second pass still contains the inductions from the
8185   // original loop.
8186 
8187   return completeLoopSkeleton(Lp, OrigLoopID);
8188 }
8189 
8190 void EpilogueVectorizerMainLoop::printDebugTracesAtStart() {
8191   LLVM_DEBUG({
8192     dbgs() << "Create Skeleton for epilogue vectorized loop (first pass)\n"
8193            << "Main Loop VF:" << EPI.MainLoopVF
8194            << ", Main Loop UF:" << EPI.MainLoopUF
8195            << ", Epilogue Loop VF:" << EPI.EpilogueVF
8196            << ", Epilogue Loop UF:" << EPI.EpilogueUF << "\n";
8197   });
8198 }
8199 
8200 void EpilogueVectorizerMainLoop::printDebugTracesAtEnd() {
8201   DEBUG_WITH_TYPE(VerboseDebug, {
8202     dbgs() << "intermediate fn:\n"
8203            << *OrigLoop->getHeader()->getParent() << "\n";
8204   });
8205 }
8206 
8207 BasicBlock *EpilogueVectorizerMainLoop::emitMinimumIterationCountCheck(
8208     Loop *L, BasicBlock *Bypass, bool ForEpilogue) {
8209   assert(L && "Expected valid Loop.");
8210   assert(Bypass && "Expected valid bypass basic block.");
8211   ElementCount VFactor = ForEpilogue ? EPI.EpilogueVF : VF;
8212   unsigned UFactor = ForEpilogue ? EPI.EpilogueUF : UF;
8213   Value *Count = getOrCreateTripCount(L);
8214   // Reuse existing vector loop preheader for TC checks.
8215   // Note that new preheader block is generated for vector loop.
8216   BasicBlock *const TCCheckBlock = LoopVectorPreHeader;
8217   IRBuilder<> Builder(TCCheckBlock->getTerminator());
8218 
8219   // Generate code to check if the loop's trip count is less than VF * UF of the
8220   // main vector loop.
8221   auto P = Cost->requiresScalarEpilogue(ForEpilogue ? EPI.EpilogueVF : VF) ?
8222       ICmpInst::ICMP_ULE : ICmpInst::ICMP_ULT;
8223 
8224   Value *CheckMinIters = Builder.CreateICmp(
8225       P, Count, createStepForVF(Builder, Count->getType(), VFactor, UFactor),
8226       "min.iters.check");
8227 
8228   if (!ForEpilogue)
8229     TCCheckBlock->setName("vector.main.loop.iter.check");
8230 
8231   // Create new preheader for vector loop.
8232   LoopVectorPreHeader = SplitBlock(TCCheckBlock, TCCheckBlock->getTerminator(),
8233                                    DT, LI, nullptr, "vector.ph");
8234 
8235   if (ForEpilogue) {
8236     assert(DT->properlyDominates(DT->getNode(TCCheckBlock),
8237                                  DT->getNode(Bypass)->getIDom()) &&
8238            "TC check is expected to dominate Bypass");
8239 
8240     // Update dominator for Bypass & LoopExit.
8241     DT->changeImmediateDominator(Bypass, TCCheckBlock);
8242     if (!Cost->requiresScalarEpilogue(EPI.EpilogueVF))
8243       // For loops with multiple exits, there's no edge from the middle block
8244       // to exit blocks (as the epilogue must run) and thus no need to update
8245       // the immediate dominator of the exit blocks.
8246       DT->changeImmediateDominator(LoopExitBlock, TCCheckBlock);
8247 
8248     LoopBypassBlocks.push_back(TCCheckBlock);
8249 
8250     // Save the trip count so we don't have to regenerate it in the
8251     // vec.epilog.iter.check. This is safe to do because the trip count
8252     // generated here dominates the vector epilog iter check.
8253     EPI.TripCount = Count;
8254   }
8255 
8256   ReplaceInstWithInst(
8257       TCCheckBlock->getTerminator(),
8258       BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters));
8259 
8260   return TCCheckBlock;
8261 }
8262 
8263 //===--------------------------------------------------------------------===//
8264 // EpilogueVectorizerEpilogueLoop
8265 //===--------------------------------------------------------------------===//
8266 
8267 /// This function is partially responsible for generating the control flow
8268 /// depicted in https://llvm.org/docs/Vectorizers.html#epilogue-vectorization.
8269 BasicBlock *
8270 EpilogueVectorizerEpilogueLoop::createEpilogueVectorizedLoopSkeleton() {
8271   MDNode *OrigLoopID = OrigLoop->getLoopID();
8272   Loop *Lp = createVectorLoopSkeleton("vec.epilog.");
8273 
8274   // Now, compare the remaining count and if there aren't enough iterations to
8275   // execute the vectorized epilogue skip to the scalar part.
8276   BasicBlock *VecEpilogueIterationCountCheck = LoopVectorPreHeader;
8277   VecEpilogueIterationCountCheck->setName("vec.epilog.iter.check");
8278   LoopVectorPreHeader =
8279       SplitBlock(LoopVectorPreHeader, LoopVectorPreHeader->getTerminator(), DT,
8280                  LI, nullptr, "vec.epilog.ph");
8281   emitMinimumVectorEpilogueIterCountCheck(Lp, LoopScalarPreHeader,
8282                                           VecEpilogueIterationCountCheck);
8283 
8284   // Adjust the control flow taking the state info from the main loop
8285   // vectorization into account.
8286   assert(EPI.MainLoopIterationCountCheck && EPI.EpilogueIterationCountCheck &&
8287          "expected this to be saved from the previous pass.");
8288   EPI.MainLoopIterationCountCheck->getTerminator()->replaceUsesOfWith(
8289       VecEpilogueIterationCountCheck, LoopVectorPreHeader);
8290 
8291   DT->changeImmediateDominator(LoopVectorPreHeader,
8292                                EPI.MainLoopIterationCountCheck);
8293 
8294   EPI.EpilogueIterationCountCheck->getTerminator()->replaceUsesOfWith(
8295       VecEpilogueIterationCountCheck, LoopScalarPreHeader);
8296 
8297   if (EPI.SCEVSafetyCheck)
8298     EPI.SCEVSafetyCheck->getTerminator()->replaceUsesOfWith(
8299         VecEpilogueIterationCountCheck, LoopScalarPreHeader);
8300   if (EPI.MemSafetyCheck)
8301     EPI.MemSafetyCheck->getTerminator()->replaceUsesOfWith(
8302         VecEpilogueIterationCountCheck, LoopScalarPreHeader);
8303 
8304   DT->changeImmediateDominator(
8305       VecEpilogueIterationCountCheck,
8306       VecEpilogueIterationCountCheck->getSinglePredecessor());
8307 
8308   DT->changeImmediateDominator(LoopScalarPreHeader,
8309                                EPI.EpilogueIterationCountCheck);
8310   if (!Cost->requiresScalarEpilogue(EPI.EpilogueVF))
8311     // If there is an epilogue which must run, there's no edge from the
8312     // middle block to exit blocks  and thus no need to update the immediate
8313     // dominator of the exit blocks.
8314     DT->changeImmediateDominator(LoopExitBlock,
8315                                  EPI.EpilogueIterationCountCheck);
8316 
8317   // Keep track of bypass blocks, as they feed start values to the induction
8318   // phis in the scalar loop preheader.
8319   if (EPI.SCEVSafetyCheck)
8320     LoopBypassBlocks.push_back(EPI.SCEVSafetyCheck);
8321   if (EPI.MemSafetyCheck)
8322     LoopBypassBlocks.push_back(EPI.MemSafetyCheck);
8323   LoopBypassBlocks.push_back(EPI.EpilogueIterationCountCheck);
8324 
8325   // Generate a resume induction for the vector epilogue and put it in the
8326   // vector epilogue preheader
8327   Type *IdxTy = Legal->getWidestInductionType();
8328   PHINode *EPResumeVal = PHINode::Create(IdxTy, 2, "vec.epilog.resume.val",
8329                                          LoopVectorPreHeader->getFirstNonPHI());
8330   EPResumeVal->addIncoming(EPI.VectorTripCount, VecEpilogueIterationCountCheck);
8331   EPResumeVal->addIncoming(ConstantInt::get(IdxTy, 0),
8332                            EPI.MainLoopIterationCountCheck);
8333 
8334   // Generate the induction variable.
8335   OldInduction = Legal->getPrimaryInduction();
8336   Value *CountRoundDown = getOrCreateVectorTripCount(Lp);
8337   Constant *Step = ConstantInt::get(IdxTy, VF.getKnownMinValue() * UF);
8338   Value *StartIdx = EPResumeVal;
8339   Induction =
8340       createInductionVariable(Lp, StartIdx, CountRoundDown, Step,
8341                               getDebugLocFromInstOrOperands(OldInduction));
8342 
8343   // Generate induction resume values. These variables save the new starting
8344   // indexes for the scalar loop. They are used to test if there are any tail
8345   // iterations left once the vector loop has completed.
8346   // Note that when the vectorized epilogue is skipped due to iteration count
8347   // check, then the resume value for the induction variable comes from
8348   // the trip count of the main vector loop, hence passing the AdditionalBypass
8349   // argument.
8350   createInductionResumeValues(Lp, CountRoundDown,
8351                               {VecEpilogueIterationCountCheck,
8352                                EPI.VectorTripCount} /* AdditionalBypass */);
8353 
8354   AddRuntimeUnrollDisableMetaData(Lp);
8355   return completeLoopSkeleton(Lp, OrigLoopID);
8356 }
8357 
8358 BasicBlock *
8359 EpilogueVectorizerEpilogueLoop::emitMinimumVectorEpilogueIterCountCheck(
8360     Loop *L, BasicBlock *Bypass, BasicBlock *Insert) {
8361 
8362   assert(EPI.TripCount &&
8363          "Expected trip count to have been safed in the first pass.");
8364   assert(
8365       (!isa<Instruction>(EPI.TripCount) ||
8366        DT->dominates(cast<Instruction>(EPI.TripCount)->getParent(), Insert)) &&
8367       "saved trip count does not dominate insertion point.");
8368   Value *TC = EPI.TripCount;
8369   IRBuilder<> Builder(Insert->getTerminator());
8370   Value *Count = Builder.CreateSub(TC, EPI.VectorTripCount, "n.vec.remaining");
8371 
8372   // Generate code to check if the loop's trip count is less than VF * UF of the
8373   // vector epilogue loop.
8374   auto P = Cost->requiresScalarEpilogue(EPI.EpilogueVF) ?
8375       ICmpInst::ICMP_ULE : ICmpInst::ICMP_ULT;
8376 
8377   Value *CheckMinIters =
8378       Builder.CreateICmp(P, Count,
8379                          createStepForVF(Builder, Count->getType(),
8380                                          EPI.EpilogueVF, EPI.EpilogueUF),
8381                          "min.epilog.iters.check");
8382 
8383   ReplaceInstWithInst(
8384       Insert->getTerminator(),
8385       BranchInst::Create(Bypass, LoopVectorPreHeader, CheckMinIters));
8386 
8387   LoopBypassBlocks.push_back(Insert);
8388   return Insert;
8389 }
8390 
8391 void EpilogueVectorizerEpilogueLoop::printDebugTracesAtStart() {
8392   LLVM_DEBUG({
8393     dbgs() << "Create Skeleton for epilogue vectorized loop (second pass)\n"
8394            << "Epilogue Loop VF:" << EPI.EpilogueVF
8395            << ", Epilogue Loop UF:" << EPI.EpilogueUF << "\n";
8396   });
8397 }
8398 
8399 void EpilogueVectorizerEpilogueLoop::printDebugTracesAtEnd() {
8400   DEBUG_WITH_TYPE(VerboseDebug, {
8401     dbgs() << "final fn:\n" << *OrigLoop->getHeader()->getParent() << "\n";
8402   });
8403 }
8404 
8405 bool LoopVectorizationPlanner::getDecisionAndClampRange(
8406     const std::function<bool(ElementCount)> &Predicate, VFRange &Range) {
8407   assert(!Range.isEmpty() && "Trying to test an empty VF range.");
8408   bool PredicateAtRangeStart = Predicate(Range.Start);
8409 
8410   for (ElementCount TmpVF = Range.Start * 2;
8411        ElementCount::isKnownLT(TmpVF, Range.End); TmpVF *= 2)
8412     if (Predicate(TmpVF) != PredicateAtRangeStart) {
8413       Range.End = TmpVF;
8414       break;
8415     }
8416 
8417   return PredicateAtRangeStart;
8418 }
8419 
8420 /// Build VPlans for the full range of feasible VF's = {\p MinVF, 2 * \p MinVF,
8421 /// 4 * \p MinVF, ..., \p MaxVF} by repeatedly building a VPlan for a sub-range
8422 /// of VF's starting at a given VF and extending it as much as possible. Each
8423 /// vectorization decision can potentially shorten this sub-range during
8424 /// buildVPlan().
8425 void LoopVectorizationPlanner::buildVPlans(ElementCount MinVF,
8426                                            ElementCount MaxVF) {
8427   auto MaxVFPlusOne = MaxVF.getWithIncrement(1);
8428   for (ElementCount VF = MinVF; ElementCount::isKnownLT(VF, MaxVFPlusOne);) {
8429     VFRange SubRange = {VF, MaxVFPlusOne};
8430     VPlans.push_back(buildVPlan(SubRange));
8431     VF = SubRange.End;
8432   }
8433 }
8434 
8435 VPValue *VPRecipeBuilder::createEdgeMask(BasicBlock *Src, BasicBlock *Dst,
8436                                          VPlanPtr &Plan) {
8437   assert(is_contained(predecessors(Dst), Src) && "Invalid edge");
8438 
8439   // Look for cached value.
8440   std::pair<BasicBlock *, BasicBlock *> Edge(Src, Dst);
8441   EdgeMaskCacheTy::iterator ECEntryIt = EdgeMaskCache.find(Edge);
8442   if (ECEntryIt != EdgeMaskCache.end())
8443     return ECEntryIt->second;
8444 
8445   VPValue *SrcMask = createBlockInMask(Src, Plan);
8446 
8447   // The terminator has to be a branch inst!
8448   BranchInst *BI = dyn_cast<BranchInst>(Src->getTerminator());
8449   assert(BI && "Unexpected terminator found");
8450 
8451   if (!BI->isConditional() || BI->getSuccessor(0) == BI->getSuccessor(1))
8452     return EdgeMaskCache[Edge] = SrcMask;
8453 
8454   // If source is an exiting block, we know the exit edge is dynamically dead
8455   // in the vector loop, and thus we don't need to restrict the mask.  Avoid
8456   // adding uses of an otherwise potentially dead instruction.
8457   if (OrigLoop->isLoopExiting(Src))
8458     return EdgeMaskCache[Edge] = SrcMask;
8459 
8460   VPValue *EdgeMask = Plan->getOrAddVPValue(BI->getCondition());
8461   assert(EdgeMask && "No Edge Mask found for condition");
8462 
8463   if (BI->getSuccessor(0) != Dst)
8464     EdgeMask = Builder.createNot(EdgeMask);
8465 
8466   if (SrcMask) { // Otherwise block in-mask is all-one, no need to AND.
8467     // The condition is 'SrcMask && EdgeMask', which is equivalent to
8468     // 'select i1 SrcMask, i1 EdgeMask, i1 false'.
8469     // The select version does not introduce new UB if SrcMask is false and
8470     // EdgeMask is poison. Using 'and' here introduces undefined behavior.
8471     VPValue *False = Plan->getOrAddVPValue(
8472         ConstantInt::getFalse(BI->getCondition()->getType()));
8473     EdgeMask = Builder.createSelect(SrcMask, EdgeMask, False);
8474   }
8475 
8476   return EdgeMaskCache[Edge] = EdgeMask;
8477 }
8478 
8479 VPValue *VPRecipeBuilder::createBlockInMask(BasicBlock *BB, VPlanPtr &Plan) {
8480   assert(OrigLoop->contains(BB) && "Block is not a part of a loop");
8481 
8482   // Look for cached value.
8483   BlockMaskCacheTy::iterator BCEntryIt = BlockMaskCache.find(BB);
8484   if (BCEntryIt != BlockMaskCache.end())
8485     return BCEntryIt->second;
8486 
8487   // All-one mask is modelled as no-mask following the convention for masked
8488   // load/store/gather/scatter. Initialize BlockMask to no-mask.
8489   VPValue *BlockMask = nullptr;
8490 
8491   if (OrigLoop->getHeader() == BB) {
8492     if (!CM.blockNeedsPredicationForAnyReason(BB))
8493       return BlockMaskCache[BB] = BlockMask; // Loop incoming mask is all-one.
8494 
8495     // Create the block in mask as the first non-phi instruction in the block.
8496     VPBuilder::InsertPointGuard Guard(Builder);
8497     auto NewInsertionPoint = Builder.getInsertBlock()->getFirstNonPhi();
8498     Builder.setInsertPoint(Builder.getInsertBlock(), NewInsertionPoint);
8499 
8500     // Introduce the early-exit compare IV <= BTC to form header block mask.
8501     // This is used instead of IV < TC because TC may wrap, unlike BTC.
8502     // Start by constructing the desired canonical IV.
8503     VPValue *IV = nullptr;
8504     if (Legal->getPrimaryInduction())
8505       IV = Plan->getOrAddVPValue(Legal->getPrimaryInduction());
8506     else {
8507       auto *IVRecipe = new VPWidenCanonicalIVRecipe();
8508       Builder.getInsertBlock()->insert(IVRecipe, NewInsertionPoint);
8509       IV = IVRecipe;
8510     }
8511     VPValue *BTC = Plan->getOrCreateBackedgeTakenCount();
8512     bool TailFolded = !CM.isScalarEpilogueAllowed();
8513 
8514     if (TailFolded && CM.TTI.emitGetActiveLaneMask()) {
8515       // While ActiveLaneMask is a binary op that consumes the loop tripcount
8516       // as a second argument, we only pass the IV here and extract the
8517       // tripcount from the transform state where codegen of the VP instructions
8518       // happen.
8519       BlockMask = Builder.createNaryOp(VPInstruction::ActiveLaneMask, {IV});
8520     } else {
8521       BlockMask = Builder.createNaryOp(VPInstruction::ICmpULE, {IV, BTC});
8522     }
8523     return BlockMaskCache[BB] = BlockMask;
8524   }
8525 
8526   // This is the block mask. We OR all incoming edges.
8527   for (auto *Predecessor : predecessors(BB)) {
8528     VPValue *EdgeMask = createEdgeMask(Predecessor, BB, Plan);
8529     if (!EdgeMask) // Mask of predecessor is all-one so mask of block is too.
8530       return BlockMaskCache[BB] = EdgeMask;
8531 
8532     if (!BlockMask) { // BlockMask has its initialized nullptr value.
8533       BlockMask = EdgeMask;
8534       continue;
8535     }
8536 
8537     BlockMask = Builder.createOr(BlockMask, EdgeMask);
8538   }
8539 
8540   return BlockMaskCache[BB] = BlockMask;
8541 }
8542 
8543 VPRecipeBase *VPRecipeBuilder::tryToWidenMemory(Instruction *I,
8544                                                 ArrayRef<VPValue *> Operands,
8545                                                 VFRange &Range,
8546                                                 VPlanPtr &Plan) {
8547   assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&
8548          "Must be called with either a load or store");
8549 
8550   auto willWiden = [&](ElementCount VF) -> bool {
8551     if (VF.isScalar())
8552       return false;
8553     LoopVectorizationCostModel::InstWidening Decision =
8554         CM.getWideningDecision(I, VF);
8555     assert(Decision != LoopVectorizationCostModel::CM_Unknown &&
8556            "CM decision should be taken at this point.");
8557     if (Decision == LoopVectorizationCostModel::CM_Interleave)
8558       return true;
8559     if (CM.isScalarAfterVectorization(I, VF) ||
8560         CM.isProfitableToScalarize(I, VF))
8561       return false;
8562     return Decision != LoopVectorizationCostModel::CM_Scalarize;
8563   };
8564 
8565   if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range))
8566     return nullptr;
8567 
8568   VPValue *Mask = nullptr;
8569   if (Legal->isMaskRequired(I))
8570     Mask = createBlockInMask(I->getParent(), Plan);
8571 
8572   // Determine if the pointer operand of the access is either consecutive or
8573   // reverse consecutive.
8574   LoopVectorizationCostModel::InstWidening Decision =
8575       CM.getWideningDecision(I, Range.Start);
8576   bool Reverse = Decision == LoopVectorizationCostModel::CM_Widen_Reverse;
8577   bool Consecutive =
8578       Reverse || Decision == LoopVectorizationCostModel::CM_Widen;
8579 
8580   if (LoadInst *Load = dyn_cast<LoadInst>(I))
8581     return new VPWidenMemoryInstructionRecipe(*Load, Operands[0], Mask,
8582                                               Consecutive, Reverse);
8583 
8584   StoreInst *Store = cast<StoreInst>(I);
8585   return new VPWidenMemoryInstructionRecipe(*Store, Operands[1], Operands[0],
8586                                             Mask, Consecutive, Reverse);
8587 }
8588 
8589 VPWidenIntOrFpInductionRecipe *
8590 VPRecipeBuilder::tryToOptimizeInductionPHI(PHINode *Phi,
8591                                            ArrayRef<VPValue *> Operands) const {
8592   // Check if this is an integer or fp induction. If so, build the recipe that
8593   // produces its scalar and vector values.
8594   InductionDescriptor II = Legal->getInductionVars().lookup(Phi);
8595   if (II.getKind() == InductionDescriptor::IK_IntInduction ||
8596       II.getKind() == InductionDescriptor::IK_FpInduction) {
8597     assert(II.getStartValue() ==
8598            Phi->getIncomingValueForBlock(OrigLoop->getLoopPreheader()));
8599     const SmallVectorImpl<Instruction *> &Casts = II.getCastInsts();
8600     return new VPWidenIntOrFpInductionRecipe(
8601         Phi, Operands[0], Casts.empty() ? nullptr : Casts.front());
8602   }
8603 
8604   return nullptr;
8605 }
8606 
8607 VPWidenIntOrFpInductionRecipe *VPRecipeBuilder::tryToOptimizeInductionTruncate(
8608     TruncInst *I, ArrayRef<VPValue *> Operands, VFRange &Range,
8609     VPlan &Plan) const {
8610   // Optimize the special case where the source is a constant integer
8611   // induction variable. Notice that we can only optimize the 'trunc' case
8612   // because (a) FP conversions lose precision, (b) sext/zext may wrap, and
8613   // (c) other casts depend on pointer size.
8614 
8615   // Determine whether \p K is a truncation based on an induction variable that
8616   // can be optimized.
8617   auto isOptimizableIVTruncate =
8618       [&](Instruction *K) -> std::function<bool(ElementCount)> {
8619     return [=](ElementCount VF) -> bool {
8620       return CM.isOptimizableIVTruncate(K, VF);
8621     };
8622   };
8623 
8624   if (LoopVectorizationPlanner::getDecisionAndClampRange(
8625           isOptimizableIVTruncate(I), Range)) {
8626 
8627     InductionDescriptor II =
8628         Legal->getInductionVars().lookup(cast<PHINode>(I->getOperand(0)));
8629     VPValue *Start = Plan.getOrAddVPValue(II.getStartValue());
8630     return new VPWidenIntOrFpInductionRecipe(cast<PHINode>(I->getOperand(0)),
8631                                              Start, nullptr, I);
8632   }
8633   return nullptr;
8634 }
8635 
8636 VPRecipeOrVPValueTy VPRecipeBuilder::tryToBlend(PHINode *Phi,
8637                                                 ArrayRef<VPValue *> Operands,
8638                                                 VPlanPtr &Plan) {
8639   // If all incoming values are equal, the incoming VPValue can be used directly
8640   // instead of creating a new VPBlendRecipe.
8641   VPValue *FirstIncoming = Operands[0];
8642   if (all_of(Operands, [FirstIncoming](const VPValue *Inc) {
8643         return FirstIncoming == Inc;
8644       })) {
8645     return Operands[0];
8646   }
8647 
8648   // We know that all PHIs in non-header blocks are converted into selects, so
8649   // we don't have to worry about the insertion order and we can just use the
8650   // builder. At this point we generate the predication tree. There may be
8651   // duplications since this is a simple recursive scan, but future
8652   // optimizations will clean it up.
8653   SmallVector<VPValue *, 2> OperandsWithMask;
8654   unsigned NumIncoming = Phi->getNumIncomingValues();
8655 
8656   for (unsigned In = 0; In < NumIncoming; In++) {
8657     VPValue *EdgeMask =
8658       createEdgeMask(Phi->getIncomingBlock(In), Phi->getParent(), Plan);
8659     assert((EdgeMask || NumIncoming == 1) &&
8660            "Multiple predecessors with one having a full mask");
8661     OperandsWithMask.push_back(Operands[In]);
8662     if (EdgeMask)
8663       OperandsWithMask.push_back(EdgeMask);
8664   }
8665   return toVPRecipeResult(new VPBlendRecipe(Phi, OperandsWithMask));
8666 }
8667 
8668 VPWidenCallRecipe *VPRecipeBuilder::tryToWidenCall(CallInst *CI,
8669                                                    ArrayRef<VPValue *> Operands,
8670                                                    VFRange &Range) const {
8671 
8672   bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange(
8673       [this, CI](ElementCount VF) { return CM.isScalarWithPredication(CI); },
8674       Range);
8675 
8676   if (IsPredicated)
8677     return nullptr;
8678 
8679   Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
8680   if (ID && (ID == Intrinsic::assume || ID == Intrinsic::lifetime_end ||
8681              ID == Intrinsic::lifetime_start || ID == Intrinsic::sideeffect ||
8682              ID == Intrinsic::pseudoprobe ||
8683              ID == Intrinsic::experimental_noalias_scope_decl))
8684     return nullptr;
8685 
8686   auto willWiden = [&](ElementCount VF) -> bool {
8687     Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
8688     // The following case may be scalarized depending on the VF.
8689     // The flag shows whether we use Intrinsic or a usual Call for vectorized
8690     // version of the instruction.
8691     // Is it beneficial to perform intrinsic call compared to lib call?
8692     bool NeedToScalarize = false;
8693     InstructionCost CallCost = CM.getVectorCallCost(CI, VF, NeedToScalarize);
8694     InstructionCost IntrinsicCost = ID ? CM.getVectorIntrinsicCost(CI, VF) : 0;
8695     bool UseVectorIntrinsic = ID && IntrinsicCost <= CallCost;
8696     return UseVectorIntrinsic || !NeedToScalarize;
8697   };
8698 
8699   if (!LoopVectorizationPlanner::getDecisionAndClampRange(willWiden, Range))
8700     return nullptr;
8701 
8702   ArrayRef<VPValue *> Ops = Operands.take_front(CI->arg_size());
8703   return new VPWidenCallRecipe(*CI, make_range(Ops.begin(), Ops.end()));
8704 }
8705 
8706 bool VPRecipeBuilder::shouldWiden(Instruction *I, VFRange &Range) const {
8707   assert(!isa<BranchInst>(I) && !isa<PHINode>(I) && !isa<LoadInst>(I) &&
8708          !isa<StoreInst>(I) && "Instruction should have been handled earlier");
8709   // Instruction should be widened, unless it is scalar after vectorization,
8710   // scalarization is profitable or it is predicated.
8711   auto WillScalarize = [this, I](ElementCount VF) -> bool {
8712     return CM.isScalarAfterVectorization(I, VF) ||
8713            CM.isProfitableToScalarize(I, VF) || CM.isScalarWithPredication(I);
8714   };
8715   return !LoopVectorizationPlanner::getDecisionAndClampRange(WillScalarize,
8716                                                              Range);
8717 }
8718 
8719 VPWidenRecipe *VPRecipeBuilder::tryToWiden(Instruction *I,
8720                                            ArrayRef<VPValue *> Operands) const {
8721   auto IsVectorizableOpcode = [](unsigned Opcode) {
8722     switch (Opcode) {
8723     case Instruction::Add:
8724     case Instruction::And:
8725     case Instruction::AShr:
8726     case Instruction::BitCast:
8727     case Instruction::FAdd:
8728     case Instruction::FCmp:
8729     case Instruction::FDiv:
8730     case Instruction::FMul:
8731     case Instruction::FNeg:
8732     case Instruction::FPExt:
8733     case Instruction::FPToSI:
8734     case Instruction::FPToUI:
8735     case Instruction::FPTrunc:
8736     case Instruction::FRem:
8737     case Instruction::FSub:
8738     case Instruction::ICmp:
8739     case Instruction::IntToPtr:
8740     case Instruction::LShr:
8741     case Instruction::Mul:
8742     case Instruction::Or:
8743     case Instruction::PtrToInt:
8744     case Instruction::SDiv:
8745     case Instruction::Select:
8746     case Instruction::SExt:
8747     case Instruction::Shl:
8748     case Instruction::SIToFP:
8749     case Instruction::SRem:
8750     case Instruction::Sub:
8751     case Instruction::Trunc:
8752     case Instruction::UDiv:
8753     case Instruction::UIToFP:
8754     case Instruction::URem:
8755     case Instruction::Xor:
8756     case Instruction::ZExt:
8757       return true;
8758     }
8759     return false;
8760   };
8761 
8762   if (!IsVectorizableOpcode(I->getOpcode()))
8763     return nullptr;
8764 
8765   // Success: widen this instruction.
8766   return new VPWidenRecipe(*I, make_range(Operands.begin(), Operands.end()));
8767 }
8768 
8769 void VPRecipeBuilder::fixHeaderPhis() {
8770   BasicBlock *OrigLatch = OrigLoop->getLoopLatch();
8771   for (VPWidenPHIRecipe *R : PhisToFix) {
8772     auto *PN = cast<PHINode>(R->getUnderlyingValue());
8773     VPRecipeBase *IncR =
8774         getRecipe(cast<Instruction>(PN->getIncomingValueForBlock(OrigLatch)));
8775     R->addOperand(IncR->getVPSingleValue());
8776   }
8777 }
8778 
8779 VPBasicBlock *VPRecipeBuilder::handleReplication(
8780     Instruction *I, VFRange &Range, VPBasicBlock *VPBB,
8781     VPlanPtr &Plan) {
8782   bool IsUniform = LoopVectorizationPlanner::getDecisionAndClampRange(
8783       [&](ElementCount VF) { return CM.isUniformAfterVectorization(I, VF); },
8784       Range);
8785 
8786   bool IsPredicated = LoopVectorizationPlanner::getDecisionAndClampRange(
8787       [&](ElementCount VF) { return CM.isPredicatedInst(I, IsUniform); },
8788       Range);
8789 
8790   // Even if the instruction is not marked as uniform, there are certain
8791   // intrinsic calls that can be effectively treated as such, so we check for
8792   // them here. Conservatively, we only do this for scalable vectors, since
8793   // for fixed-width VFs we can always fall back on full scalarization.
8794   if (!IsUniform && Range.Start.isScalable() && isa<IntrinsicInst>(I)) {
8795     switch (cast<IntrinsicInst>(I)->getIntrinsicID()) {
8796     case Intrinsic::assume:
8797     case Intrinsic::lifetime_start:
8798     case Intrinsic::lifetime_end:
8799       // For scalable vectors if one of the operands is variant then we still
8800       // want to mark as uniform, which will generate one instruction for just
8801       // the first lane of the vector. We can't scalarize the call in the same
8802       // way as for fixed-width vectors because we don't know how many lanes
8803       // there are.
8804       //
8805       // The reasons for doing it this way for scalable vectors are:
8806       //   1. For the assume intrinsic generating the instruction for the first
8807       //      lane is still be better than not generating any at all. For
8808       //      example, the input may be a splat across all lanes.
8809       //   2. For the lifetime start/end intrinsics the pointer operand only
8810       //      does anything useful when the input comes from a stack object,
8811       //      which suggests it should always be uniform. For non-stack objects
8812       //      the effect is to poison the object, which still allows us to
8813       //      remove the call.
8814       IsUniform = true;
8815       break;
8816     default:
8817       break;
8818     }
8819   }
8820 
8821   auto *Recipe = new VPReplicateRecipe(I, Plan->mapToVPValues(I->operands()),
8822                                        IsUniform, IsPredicated);
8823   setRecipe(I, Recipe);
8824   Plan->addVPValue(I, Recipe);
8825 
8826   // Find if I uses a predicated instruction. If so, it will use its scalar
8827   // value. Avoid hoisting the insert-element which packs the scalar value into
8828   // a vector value, as that happens iff all users use the vector value.
8829   for (VPValue *Op : Recipe->operands()) {
8830     auto *PredR = dyn_cast_or_null<VPPredInstPHIRecipe>(Op->getDef());
8831     if (!PredR)
8832       continue;
8833     auto *RepR =
8834         cast_or_null<VPReplicateRecipe>(PredR->getOperand(0)->getDef());
8835     assert(RepR->isPredicated() &&
8836            "expected Replicate recipe to be predicated");
8837     RepR->setAlsoPack(false);
8838   }
8839 
8840   // Finalize the recipe for Instr, first if it is not predicated.
8841   if (!IsPredicated) {
8842     LLVM_DEBUG(dbgs() << "LV: Scalarizing:" << *I << "\n");
8843     VPBB->appendRecipe(Recipe);
8844     return VPBB;
8845   }
8846   LLVM_DEBUG(dbgs() << "LV: Scalarizing and predicating:" << *I << "\n");
8847   assert(VPBB->getSuccessors().empty() &&
8848          "VPBB has successors when handling predicated replication.");
8849   // Record predicated instructions for above packing optimizations.
8850   VPBlockBase *Region = createReplicateRegion(I, Recipe, Plan);
8851   VPBlockUtils::insertBlockAfter(Region, VPBB);
8852   auto *RegSucc = new VPBasicBlock();
8853   VPBlockUtils::insertBlockAfter(RegSucc, Region);
8854   return RegSucc;
8855 }
8856 
8857 VPRegionBlock *VPRecipeBuilder::createReplicateRegion(Instruction *Instr,
8858                                                       VPRecipeBase *PredRecipe,
8859                                                       VPlanPtr &Plan) {
8860   // Instructions marked for predication are replicated and placed under an
8861   // if-then construct to prevent side-effects.
8862 
8863   // Generate recipes to compute the block mask for this region.
8864   VPValue *BlockInMask = createBlockInMask(Instr->getParent(), Plan);
8865 
8866   // Build the triangular if-then region.
8867   std::string RegionName = (Twine("pred.") + Instr->getOpcodeName()).str();
8868   assert(Instr->getParent() && "Predicated instruction not in any basic block");
8869   auto *BOMRecipe = new VPBranchOnMaskRecipe(BlockInMask);
8870   auto *Entry = new VPBasicBlock(Twine(RegionName) + ".entry", BOMRecipe);
8871   auto *PHIRecipe = Instr->getType()->isVoidTy()
8872                         ? nullptr
8873                         : new VPPredInstPHIRecipe(Plan->getOrAddVPValue(Instr));
8874   if (PHIRecipe) {
8875     Plan->removeVPValueFor(Instr);
8876     Plan->addVPValue(Instr, PHIRecipe);
8877   }
8878   auto *Exit = new VPBasicBlock(Twine(RegionName) + ".continue", PHIRecipe);
8879   auto *Pred = new VPBasicBlock(Twine(RegionName) + ".if", PredRecipe);
8880   VPRegionBlock *Region = new VPRegionBlock(Entry, Exit, RegionName, true);
8881 
8882   // Note: first set Entry as region entry and then connect successors starting
8883   // from it in order, to propagate the "parent" of each VPBasicBlock.
8884   VPBlockUtils::insertTwoBlocksAfter(Pred, Exit, BlockInMask, Entry);
8885   VPBlockUtils::connectBlocks(Pred, Exit);
8886 
8887   return Region;
8888 }
8889 
8890 VPRecipeOrVPValueTy
8891 VPRecipeBuilder::tryToCreateWidenRecipe(Instruction *Instr,
8892                                         ArrayRef<VPValue *> Operands,
8893                                         VFRange &Range, VPlanPtr &Plan) {
8894   // First, check for specific widening recipes that deal with calls, memory
8895   // operations, inductions and Phi nodes.
8896   if (auto *CI = dyn_cast<CallInst>(Instr))
8897     return toVPRecipeResult(tryToWidenCall(CI, Operands, Range));
8898 
8899   if (isa<LoadInst>(Instr) || isa<StoreInst>(Instr))
8900     return toVPRecipeResult(tryToWidenMemory(Instr, Operands, Range, Plan));
8901 
8902   VPRecipeBase *Recipe;
8903   if (auto Phi = dyn_cast<PHINode>(Instr)) {
8904     if (Phi->getParent() != OrigLoop->getHeader())
8905       return tryToBlend(Phi, Operands, Plan);
8906     if ((Recipe = tryToOptimizeInductionPHI(Phi, Operands)))
8907       return toVPRecipeResult(Recipe);
8908 
8909     VPWidenPHIRecipe *PhiRecipe = nullptr;
8910     if (Legal->isReductionVariable(Phi) || Legal->isFirstOrderRecurrence(Phi)) {
8911       VPValue *StartV = Operands[0];
8912       if (Legal->isReductionVariable(Phi)) {
8913         RecurrenceDescriptor &RdxDesc = Legal->getReductionVars()[Phi];
8914         assert(RdxDesc.getRecurrenceStartValue() ==
8915                Phi->getIncomingValueForBlock(OrigLoop->getLoopPreheader()));
8916         PhiRecipe = new VPReductionPHIRecipe(Phi, RdxDesc, *StartV,
8917                                              CM.isInLoopReduction(Phi),
8918                                              CM.useOrderedReductions(RdxDesc));
8919       } else {
8920         PhiRecipe = new VPFirstOrderRecurrencePHIRecipe(Phi, *StartV);
8921       }
8922 
8923       // Record the incoming value from the backedge, so we can add the incoming
8924       // value from the backedge after all recipes have been created.
8925       recordRecipeOf(cast<Instruction>(
8926           Phi->getIncomingValueForBlock(OrigLoop->getLoopLatch())));
8927       PhisToFix.push_back(PhiRecipe);
8928     } else {
8929       // TODO: record start and backedge value for remaining pointer induction
8930       // phis.
8931       assert(Phi->getType()->isPointerTy() &&
8932              "only pointer phis should be handled here");
8933       PhiRecipe = new VPWidenPHIRecipe(Phi);
8934     }
8935 
8936     return toVPRecipeResult(PhiRecipe);
8937   }
8938 
8939   if (isa<TruncInst>(Instr) &&
8940       (Recipe = tryToOptimizeInductionTruncate(cast<TruncInst>(Instr), Operands,
8941                                                Range, *Plan)))
8942     return toVPRecipeResult(Recipe);
8943 
8944   if (!shouldWiden(Instr, Range))
8945     return nullptr;
8946 
8947   if (auto GEP = dyn_cast<GetElementPtrInst>(Instr))
8948     return toVPRecipeResult(new VPWidenGEPRecipe(
8949         GEP, make_range(Operands.begin(), Operands.end()), OrigLoop));
8950 
8951   if (auto *SI = dyn_cast<SelectInst>(Instr)) {
8952     bool InvariantCond =
8953         PSE.getSE()->isLoopInvariant(PSE.getSCEV(SI->getOperand(0)), OrigLoop);
8954     return toVPRecipeResult(new VPWidenSelectRecipe(
8955         *SI, make_range(Operands.begin(), Operands.end()), InvariantCond));
8956   }
8957 
8958   return toVPRecipeResult(tryToWiden(Instr, Operands));
8959 }
8960 
8961 void LoopVectorizationPlanner::buildVPlansWithVPRecipes(ElementCount MinVF,
8962                                                         ElementCount MaxVF) {
8963   assert(OrigLoop->isInnermost() && "Inner loop expected.");
8964 
8965   // Collect instructions from the original loop that will become trivially dead
8966   // in the vectorized loop. We don't need to vectorize these instructions. For
8967   // example, original induction update instructions can become dead because we
8968   // separately emit induction "steps" when generating code for the new loop.
8969   // Similarly, we create a new latch condition when setting up the structure
8970   // of the new loop, so the old one can become dead.
8971   SmallPtrSet<Instruction *, 4> DeadInstructions;
8972   collectTriviallyDeadInstructions(DeadInstructions);
8973 
8974   // Add assume instructions we need to drop to DeadInstructions, to prevent
8975   // them from being added to the VPlan.
8976   // TODO: We only need to drop assumes in blocks that get flattend. If the
8977   // control flow is preserved, we should keep them.
8978   auto &ConditionalAssumes = Legal->getConditionalAssumes();
8979   DeadInstructions.insert(ConditionalAssumes.begin(), ConditionalAssumes.end());
8980 
8981   MapVector<Instruction *, Instruction *> &SinkAfter = Legal->getSinkAfter();
8982   // Dead instructions do not need sinking. Remove them from SinkAfter.
8983   for (Instruction *I : DeadInstructions)
8984     SinkAfter.erase(I);
8985 
8986   // Cannot sink instructions after dead instructions (there won't be any
8987   // recipes for them). Instead, find the first non-dead previous instruction.
8988   for (auto &P : Legal->getSinkAfter()) {
8989     Instruction *SinkTarget = P.second;
8990     Instruction *FirstInst = &*SinkTarget->getParent()->begin();
8991     (void)FirstInst;
8992     while (DeadInstructions.contains(SinkTarget)) {
8993       assert(
8994           SinkTarget != FirstInst &&
8995           "Must find a live instruction (at least the one feeding the "
8996           "first-order recurrence PHI) before reaching beginning of the block");
8997       SinkTarget = SinkTarget->getPrevNode();
8998       assert(SinkTarget != P.first &&
8999              "sink source equals target, no sinking required");
9000     }
9001     P.second = SinkTarget;
9002   }
9003 
9004   auto MaxVFPlusOne = MaxVF.getWithIncrement(1);
9005   for (ElementCount VF = MinVF; ElementCount::isKnownLT(VF, MaxVFPlusOne);) {
9006     VFRange SubRange = {VF, MaxVFPlusOne};
9007     VPlans.push_back(
9008         buildVPlanWithVPRecipes(SubRange, DeadInstructions, SinkAfter));
9009     VF = SubRange.End;
9010   }
9011 }
9012 
9013 VPlanPtr LoopVectorizationPlanner::buildVPlanWithVPRecipes(
9014     VFRange &Range, SmallPtrSetImpl<Instruction *> &DeadInstructions,
9015     const MapVector<Instruction *, Instruction *> &SinkAfter) {
9016 
9017   SmallPtrSet<const InterleaveGroup<Instruction> *, 1> InterleaveGroups;
9018 
9019   VPRecipeBuilder RecipeBuilder(OrigLoop, TLI, Legal, CM, PSE, Builder);
9020 
9021   // ---------------------------------------------------------------------------
9022   // Pre-construction: record ingredients whose recipes we'll need to further
9023   // process after constructing the initial VPlan.
9024   // ---------------------------------------------------------------------------
9025 
9026   // Mark instructions we'll need to sink later and their targets as
9027   // ingredients whose recipe we'll need to record.
9028   for (auto &Entry : SinkAfter) {
9029     RecipeBuilder.recordRecipeOf(Entry.first);
9030     RecipeBuilder.recordRecipeOf(Entry.second);
9031   }
9032   for (auto &Reduction : CM.getInLoopReductionChains()) {
9033     PHINode *Phi = Reduction.first;
9034     RecurKind Kind = Legal->getReductionVars()[Phi].getRecurrenceKind();
9035     const SmallVector<Instruction *, 4> &ReductionOperations = Reduction.second;
9036 
9037     RecipeBuilder.recordRecipeOf(Phi);
9038     for (auto &R : ReductionOperations) {
9039       RecipeBuilder.recordRecipeOf(R);
9040       // For min/max reducitons, where we have a pair of icmp/select, we also
9041       // need to record the ICmp recipe, so it can be removed later.
9042       assert(!RecurrenceDescriptor::isSelectCmpRecurrenceKind(Kind) &&
9043              "Only min/max recurrences allowed for inloop reductions");
9044       if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind))
9045         RecipeBuilder.recordRecipeOf(cast<Instruction>(R->getOperand(0)));
9046     }
9047   }
9048 
9049   // For each interleave group which is relevant for this (possibly trimmed)
9050   // Range, add it to the set of groups to be later applied to the VPlan and add
9051   // placeholders for its members' Recipes which we'll be replacing with a
9052   // single VPInterleaveRecipe.
9053   for (InterleaveGroup<Instruction> *IG : IAI.getInterleaveGroups()) {
9054     auto applyIG = [IG, this](ElementCount VF) -> bool {
9055       return (VF.isVector() && // Query is illegal for VF == 1
9056               CM.getWideningDecision(IG->getInsertPos(), VF) ==
9057                   LoopVectorizationCostModel::CM_Interleave);
9058     };
9059     if (!getDecisionAndClampRange(applyIG, Range))
9060       continue;
9061     InterleaveGroups.insert(IG);
9062     for (unsigned i = 0; i < IG->getFactor(); i++)
9063       if (Instruction *Member = IG->getMember(i))
9064         RecipeBuilder.recordRecipeOf(Member);
9065   };
9066 
9067   // ---------------------------------------------------------------------------
9068   // Build initial VPlan: Scan the body of the loop in a topological order to
9069   // visit each basic block after having visited its predecessor basic blocks.
9070   // ---------------------------------------------------------------------------
9071 
9072   auto Plan = std::make_unique<VPlan>();
9073 
9074   // Scan the body of the loop in a topological order to visit each basic block
9075   // after having visited its predecessor basic blocks.
9076   LoopBlocksDFS DFS(OrigLoop);
9077   DFS.perform(LI);
9078 
9079   VPBasicBlock *VPBB = nullptr;
9080   VPBasicBlock *HeaderVPBB = nullptr;
9081   SmallVector<VPWidenIntOrFpInductionRecipe *> InductionsToMove;
9082   for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO())) {
9083     // Relevant instructions from basic block BB will be grouped into VPRecipe
9084     // ingredients and fill a new VPBasicBlock.
9085     unsigned VPBBsForBB = 0;
9086     auto *FirstVPBBForBB = new VPBasicBlock(BB->getName());
9087     if (VPBB)
9088       VPBlockUtils::insertBlockAfter(FirstVPBBForBB, VPBB);
9089     else {
9090       auto *TopRegion = new VPRegionBlock("vector loop");
9091       TopRegion->setEntry(FirstVPBBForBB);
9092       Plan->setEntry(TopRegion);
9093       HeaderVPBB = FirstVPBBForBB;
9094     }
9095     VPBB = FirstVPBBForBB;
9096     Builder.setInsertPoint(VPBB);
9097 
9098     // Introduce each ingredient into VPlan.
9099     // TODO: Model and preserve debug instrinsics in VPlan.
9100     for (Instruction &I : BB->instructionsWithoutDebug()) {
9101       Instruction *Instr = &I;
9102 
9103       // First filter out irrelevant instructions, to ensure no recipes are
9104       // built for them.
9105       if (isa<BranchInst>(Instr) || DeadInstructions.count(Instr))
9106         continue;
9107 
9108       SmallVector<VPValue *, 4> Operands;
9109       auto *Phi = dyn_cast<PHINode>(Instr);
9110       if (Phi && Phi->getParent() == OrigLoop->getHeader()) {
9111         Operands.push_back(Plan->getOrAddVPValue(
9112             Phi->getIncomingValueForBlock(OrigLoop->getLoopPreheader())));
9113       } else {
9114         auto OpRange = Plan->mapToVPValues(Instr->operands());
9115         Operands = {OpRange.begin(), OpRange.end()};
9116       }
9117       if (auto RecipeOrValue = RecipeBuilder.tryToCreateWidenRecipe(
9118               Instr, Operands, Range, Plan)) {
9119         // If Instr can be simplified to an existing VPValue, use it.
9120         if (RecipeOrValue.is<VPValue *>()) {
9121           auto *VPV = RecipeOrValue.get<VPValue *>();
9122           Plan->addVPValue(Instr, VPV);
9123           // If the re-used value is a recipe, register the recipe for the
9124           // instruction, in case the recipe for Instr needs to be recorded.
9125           if (auto *R = dyn_cast_or_null<VPRecipeBase>(VPV->getDef()))
9126             RecipeBuilder.setRecipe(Instr, R);
9127           continue;
9128         }
9129         // Otherwise, add the new recipe.
9130         VPRecipeBase *Recipe = RecipeOrValue.get<VPRecipeBase *>();
9131         for (auto *Def : Recipe->definedValues()) {
9132           auto *UV = Def->getUnderlyingValue();
9133           Plan->addVPValue(UV, Def);
9134         }
9135 
9136         if (isa<VPWidenIntOrFpInductionRecipe>(Recipe) &&
9137             HeaderVPBB->getFirstNonPhi() != VPBB->end()) {
9138           // Keep track of VPWidenIntOrFpInductionRecipes not in the phi section
9139           // of the header block. That can happen for truncates of induction
9140           // variables. Those recipes are moved to the phi section of the header
9141           // block after applying SinkAfter, which relies on the original
9142           // position of the trunc.
9143           assert(isa<TruncInst>(Instr));
9144           InductionsToMove.push_back(
9145               cast<VPWidenIntOrFpInductionRecipe>(Recipe));
9146         }
9147         RecipeBuilder.setRecipe(Instr, Recipe);
9148         VPBB->appendRecipe(Recipe);
9149         continue;
9150       }
9151 
9152       // Otherwise, if all widening options failed, Instruction is to be
9153       // replicated. This may create a successor for VPBB.
9154       VPBasicBlock *NextVPBB =
9155           RecipeBuilder.handleReplication(Instr, Range, VPBB, Plan);
9156       if (NextVPBB != VPBB) {
9157         VPBB = NextVPBB;
9158         VPBB->setName(BB->hasName() ? BB->getName() + "." + Twine(VPBBsForBB++)
9159                                     : "");
9160       }
9161     }
9162   }
9163 
9164   assert(isa<VPRegionBlock>(Plan->getEntry()) &&
9165          !Plan->getEntry()->getEntryBasicBlock()->empty() &&
9166          "entry block must be set to a VPRegionBlock having a non-empty entry "
9167          "VPBasicBlock");
9168   cast<VPRegionBlock>(Plan->getEntry())->setExit(VPBB);
9169   RecipeBuilder.fixHeaderPhis();
9170 
9171   // ---------------------------------------------------------------------------
9172   // Transform initial VPlan: Apply previously taken decisions, in order, to
9173   // bring the VPlan to its final state.
9174   // ---------------------------------------------------------------------------
9175 
9176   // Apply Sink-After legal constraints.
9177   auto GetReplicateRegion = [](VPRecipeBase *R) -> VPRegionBlock * {
9178     auto *Region = dyn_cast_or_null<VPRegionBlock>(R->getParent()->getParent());
9179     if (Region && Region->isReplicator()) {
9180       assert(Region->getNumSuccessors() == 1 &&
9181              Region->getNumPredecessors() == 1 && "Expected SESE region!");
9182       assert(R->getParent()->size() == 1 &&
9183              "A recipe in an original replicator region must be the only "
9184              "recipe in its block");
9185       return Region;
9186     }
9187     return nullptr;
9188   };
9189   for (auto &Entry : SinkAfter) {
9190     VPRecipeBase *Sink = RecipeBuilder.getRecipe(Entry.first);
9191     VPRecipeBase *Target = RecipeBuilder.getRecipe(Entry.second);
9192 
9193     auto *TargetRegion = GetReplicateRegion(Target);
9194     auto *SinkRegion = GetReplicateRegion(Sink);
9195     if (!SinkRegion) {
9196       // If the sink source is not a replicate region, sink the recipe directly.
9197       if (TargetRegion) {
9198         // The target is in a replication region, make sure to move Sink to
9199         // the block after it, not into the replication region itself.
9200         VPBasicBlock *NextBlock =
9201             cast<VPBasicBlock>(TargetRegion->getSuccessors().front());
9202         Sink->moveBefore(*NextBlock, NextBlock->getFirstNonPhi());
9203       } else
9204         Sink->moveAfter(Target);
9205       continue;
9206     }
9207 
9208     // The sink source is in a replicate region. Unhook the region from the CFG.
9209     auto *SinkPred = SinkRegion->getSinglePredecessor();
9210     auto *SinkSucc = SinkRegion->getSingleSuccessor();
9211     VPBlockUtils::disconnectBlocks(SinkPred, SinkRegion);
9212     VPBlockUtils::disconnectBlocks(SinkRegion, SinkSucc);
9213     VPBlockUtils::connectBlocks(SinkPred, SinkSucc);
9214 
9215     if (TargetRegion) {
9216       // The target recipe is also in a replicate region, move the sink region
9217       // after the target region.
9218       auto *TargetSucc = TargetRegion->getSingleSuccessor();
9219       VPBlockUtils::disconnectBlocks(TargetRegion, TargetSucc);
9220       VPBlockUtils::connectBlocks(TargetRegion, SinkRegion);
9221       VPBlockUtils::connectBlocks(SinkRegion, TargetSucc);
9222     } else {
9223       // The sink source is in a replicate region, we need to move the whole
9224       // replicate region, which should only contain a single recipe in the
9225       // main block.
9226       auto *SplitBlock =
9227           Target->getParent()->splitAt(std::next(Target->getIterator()));
9228 
9229       auto *SplitPred = SplitBlock->getSinglePredecessor();
9230 
9231       VPBlockUtils::disconnectBlocks(SplitPred, SplitBlock);
9232       VPBlockUtils::connectBlocks(SplitPred, SinkRegion);
9233       VPBlockUtils::connectBlocks(SinkRegion, SplitBlock);
9234       if (VPBB == SplitPred)
9235         VPBB = SplitBlock;
9236     }
9237   }
9238 
9239   // Now that sink-after is done, move induction recipes for optimized truncates
9240   // to the phi section of the header block.
9241   for (VPWidenIntOrFpInductionRecipe *Ind : InductionsToMove)
9242     Ind->moveBefore(*HeaderVPBB, HeaderVPBB->getFirstNonPhi());
9243 
9244   // Adjust the recipes for any inloop reductions.
9245   adjustRecipesForReductions(VPBB, Plan, RecipeBuilder, Range.Start);
9246 
9247   // Introduce a recipe to combine the incoming and previous values of a
9248   // first-order recurrence.
9249   for (VPRecipeBase &R : Plan->getEntry()->getEntryBasicBlock()->phis()) {
9250     auto *RecurPhi = dyn_cast<VPFirstOrderRecurrencePHIRecipe>(&R);
9251     if (!RecurPhi)
9252       continue;
9253 
9254     VPRecipeBase *PrevRecipe = RecurPhi->getBackedgeRecipe();
9255     VPBasicBlock *InsertBlock = PrevRecipe->getParent();
9256     auto *Region = GetReplicateRegion(PrevRecipe);
9257     if (Region)
9258       InsertBlock = cast<VPBasicBlock>(Region->getSingleSuccessor());
9259     if (Region || PrevRecipe->isPhi())
9260       Builder.setInsertPoint(InsertBlock, InsertBlock->getFirstNonPhi());
9261     else
9262       Builder.setInsertPoint(InsertBlock, std::next(PrevRecipe->getIterator()));
9263 
9264     auto *RecurSplice = cast<VPInstruction>(
9265         Builder.createNaryOp(VPInstruction::FirstOrderRecurrenceSplice,
9266                              {RecurPhi, RecurPhi->getBackedgeValue()}));
9267 
9268     RecurPhi->replaceAllUsesWith(RecurSplice);
9269     // Set the first operand of RecurSplice to RecurPhi again, after replacing
9270     // all users.
9271     RecurSplice->setOperand(0, RecurPhi);
9272   }
9273 
9274   // Interleave memory: for each Interleave Group we marked earlier as relevant
9275   // for this VPlan, replace the Recipes widening its memory instructions with a
9276   // single VPInterleaveRecipe at its insertion point.
9277   for (auto IG : InterleaveGroups) {
9278     auto *Recipe = cast<VPWidenMemoryInstructionRecipe>(
9279         RecipeBuilder.getRecipe(IG->getInsertPos()));
9280     SmallVector<VPValue *, 4> StoredValues;
9281     for (unsigned i = 0; i < IG->getFactor(); ++i)
9282       if (auto *SI = dyn_cast_or_null<StoreInst>(IG->getMember(i))) {
9283         auto *StoreR =
9284             cast<VPWidenMemoryInstructionRecipe>(RecipeBuilder.getRecipe(SI));
9285         StoredValues.push_back(StoreR->getStoredValue());
9286       }
9287 
9288     auto *VPIG = new VPInterleaveRecipe(IG, Recipe->getAddr(), StoredValues,
9289                                         Recipe->getMask());
9290     VPIG->insertBefore(Recipe);
9291     unsigned J = 0;
9292     for (unsigned i = 0; i < IG->getFactor(); ++i)
9293       if (Instruction *Member = IG->getMember(i)) {
9294         if (!Member->getType()->isVoidTy()) {
9295           VPValue *OriginalV = Plan->getVPValue(Member);
9296           Plan->removeVPValueFor(Member);
9297           Plan->addVPValue(Member, VPIG->getVPValue(J));
9298           OriginalV->replaceAllUsesWith(VPIG->getVPValue(J));
9299           J++;
9300         }
9301         RecipeBuilder.getRecipe(Member)->eraseFromParent();
9302       }
9303   }
9304 
9305   // From this point onwards, VPlan-to-VPlan transformations may change the plan
9306   // in ways that accessing values using original IR values is incorrect.
9307   Plan->disableValue2VPValue();
9308 
9309   VPlanTransforms::sinkScalarOperands(*Plan);
9310   VPlanTransforms::mergeReplicateRegions(*Plan);
9311 
9312   std::string PlanName;
9313   raw_string_ostream RSO(PlanName);
9314   ElementCount VF = Range.Start;
9315   Plan->addVF(VF);
9316   RSO << "Initial VPlan for VF={" << VF;
9317   for (VF *= 2; ElementCount::isKnownLT(VF, Range.End); VF *= 2) {
9318     Plan->addVF(VF);
9319     RSO << "," << VF;
9320   }
9321   RSO << "},UF>=1";
9322   RSO.flush();
9323   Plan->setName(PlanName);
9324 
9325   assert(VPlanVerifier::verifyPlanIsValid(*Plan) && "VPlan is invalid");
9326   return Plan;
9327 }
9328 
9329 VPlanPtr LoopVectorizationPlanner::buildVPlan(VFRange &Range) {
9330   // Outer loop handling: They may require CFG and instruction level
9331   // transformations before even evaluating whether vectorization is profitable.
9332   // Since we cannot modify the incoming IR, we need to build VPlan upfront in
9333   // the vectorization pipeline.
9334   assert(!OrigLoop->isInnermost());
9335   assert(EnableVPlanNativePath && "VPlan-native path is not enabled.");
9336 
9337   // Create new empty VPlan
9338   auto Plan = std::make_unique<VPlan>();
9339 
9340   // Build hierarchical CFG
9341   VPlanHCFGBuilder HCFGBuilder(OrigLoop, LI, *Plan);
9342   HCFGBuilder.buildHierarchicalCFG();
9343 
9344   for (ElementCount VF = Range.Start; ElementCount::isKnownLT(VF, Range.End);
9345        VF *= 2)
9346     Plan->addVF(VF);
9347 
9348   if (EnableVPlanPredication) {
9349     VPlanPredicator VPP(*Plan);
9350     VPP.predicate();
9351 
9352     // Avoid running transformation to recipes until masked code generation in
9353     // VPlan-native path is in place.
9354     return Plan;
9355   }
9356 
9357   SmallPtrSet<Instruction *, 1> DeadInstructions;
9358   VPlanTransforms::VPInstructionsToVPRecipes(OrigLoop, Plan,
9359                                              Legal->getInductionVars(),
9360                                              DeadInstructions, *PSE.getSE());
9361   return Plan;
9362 }
9363 
9364 // Adjust the recipes for reductions. For in-loop reductions the chain of
9365 // instructions leading from the loop exit instr to the phi need to be converted
9366 // to reductions, with one operand being vector and the other being the scalar
9367 // reduction chain. For other reductions, a select is introduced between the phi
9368 // and live-out recipes when folding the tail.
9369 void LoopVectorizationPlanner::adjustRecipesForReductions(
9370     VPBasicBlock *LatchVPBB, VPlanPtr &Plan, VPRecipeBuilder &RecipeBuilder,
9371     ElementCount MinVF) {
9372   for (auto &Reduction : CM.getInLoopReductionChains()) {
9373     PHINode *Phi = Reduction.first;
9374     RecurrenceDescriptor &RdxDesc = Legal->getReductionVars()[Phi];
9375     const SmallVector<Instruction *, 4> &ReductionOperations = Reduction.second;
9376 
9377     if (MinVF.isScalar() && !CM.useOrderedReductions(RdxDesc))
9378       continue;
9379 
9380     // ReductionOperations are orders top-down from the phi's use to the
9381     // LoopExitValue. We keep a track of the previous item (the Chain) to tell
9382     // which of the two operands will remain scalar and which will be reduced.
9383     // For minmax the chain will be the select instructions.
9384     Instruction *Chain = Phi;
9385     for (Instruction *R : ReductionOperations) {
9386       VPRecipeBase *WidenRecipe = RecipeBuilder.getRecipe(R);
9387       RecurKind Kind = RdxDesc.getRecurrenceKind();
9388 
9389       VPValue *ChainOp = Plan->getVPValue(Chain);
9390       unsigned FirstOpId;
9391       assert(!RecurrenceDescriptor::isSelectCmpRecurrenceKind(Kind) &&
9392              "Only min/max recurrences allowed for inloop reductions");
9393       // Recognize a call to the llvm.fmuladd intrinsic.
9394       bool IsFMulAdd = (Kind == RecurKind::FMulAdd);
9395       assert((!IsFMulAdd || RecurrenceDescriptor::isFMulAddIntrinsic(R)) &&
9396              "Expected instruction to be a call to the llvm.fmuladd intrinsic");
9397       if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) {
9398         assert(isa<VPWidenSelectRecipe>(WidenRecipe) &&
9399                "Expected to replace a VPWidenSelectSC");
9400         FirstOpId = 1;
9401       } else {
9402         assert((MinVF.isScalar() || isa<VPWidenRecipe>(WidenRecipe) ||
9403                 (IsFMulAdd && isa<VPWidenCallRecipe>(WidenRecipe))) &&
9404                "Expected to replace a VPWidenSC");
9405         FirstOpId = 0;
9406       }
9407       unsigned VecOpId =
9408           R->getOperand(FirstOpId) == Chain ? FirstOpId + 1 : FirstOpId;
9409       VPValue *VecOp = Plan->getVPValue(R->getOperand(VecOpId));
9410 
9411       auto *CondOp = CM.foldTailByMasking()
9412                          ? RecipeBuilder.createBlockInMask(R->getParent(), Plan)
9413                          : nullptr;
9414 
9415       if (IsFMulAdd) {
9416         // If the instruction is a call to the llvm.fmuladd intrinsic then we
9417         // need to create an fmul recipe to use as the vector operand for the
9418         // fadd reduction.
9419         VPInstruction *FMulRecipe = new VPInstruction(
9420             Instruction::FMul, {VecOp, Plan->getVPValue(R->getOperand(1))});
9421         FMulRecipe->setFastMathFlags(R->getFastMathFlags());
9422         WidenRecipe->getParent()->insert(FMulRecipe,
9423                                          WidenRecipe->getIterator());
9424         VecOp = FMulRecipe;
9425       }
9426       VPReductionRecipe *RedRecipe =
9427           new VPReductionRecipe(&RdxDesc, R, ChainOp, VecOp, CondOp, TTI);
9428       WidenRecipe->getVPSingleValue()->replaceAllUsesWith(RedRecipe);
9429       Plan->removeVPValueFor(R);
9430       Plan->addVPValue(R, RedRecipe);
9431       WidenRecipe->getParent()->insert(RedRecipe, WidenRecipe->getIterator());
9432       WidenRecipe->getVPSingleValue()->replaceAllUsesWith(RedRecipe);
9433       WidenRecipe->eraseFromParent();
9434 
9435       if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) {
9436         VPRecipeBase *CompareRecipe =
9437             RecipeBuilder.getRecipe(cast<Instruction>(R->getOperand(0)));
9438         assert(isa<VPWidenRecipe>(CompareRecipe) &&
9439                "Expected to replace a VPWidenSC");
9440         assert(cast<VPWidenRecipe>(CompareRecipe)->getNumUsers() == 0 &&
9441                "Expected no remaining users");
9442         CompareRecipe->eraseFromParent();
9443       }
9444       Chain = R;
9445     }
9446   }
9447 
9448   // If tail is folded by masking, introduce selects between the phi
9449   // and the live-out instruction of each reduction, at the end of the latch.
9450   if (CM.foldTailByMasking()) {
9451     for (VPRecipeBase &R : Plan->getEntry()->getEntryBasicBlock()->phis()) {
9452       VPReductionPHIRecipe *PhiR = dyn_cast<VPReductionPHIRecipe>(&R);
9453       if (!PhiR || PhiR->isInLoop())
9454         continue;
9455       Builder.setInsertPoint(LatchVPBB);
9456       VPValue *Cond =
9457           RecipeBuilder.createBlockInMask(OrigLoop->getHeader(), Plan);
9458       VPValue *Red = PhiR->getBackedgeValue();
9459       Builder.createNaryOp(Instruction::Select, {Cond, Red, PhiR});
9460     }
9461   }
9462 }
9463 
9464 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
9465 void VPInterleaveRecipe::print(raw_ostream &O, const Twine &Indent,
9466                                VPSlotTracker &SlotTracker) const {
9467   O << Indent << "INTERLEAVE-GROUP with factor " << IG->getFactor() << " at ";
9468   IG->getInsertPos()->printAsOperand(O, false);
9469   O << ", ";
9470   getAddr()->printAsOperand(O, SlotTracker);
9471   VPValue *Mask = getMask();
9472   if (Mask) {
9473     O << ", ";
9474     Mask->printAsOperand(O, SlotTracker);
9475   }
9476 
9477   unsigned OpIdx = 0;
9478   for (unsigned i = 0; i < IG->getFactor(); ++i) {
9479     if (!IG->getMember(i))
9480       continue;
9481     if (getNumStoreOperands() > 0) {
9482       O << "\n" << Indent << "  store ";
9483       getOperand(1 + OpIdx)->printAsOperand(O, SlotTracker);
9484       O << " to index " << i;
9485     } else {
9486       O << "\n" << Indent << "  ";
9487       getVPValue(OpIdx)->printAsOperand(O, SlotTracker);
9488       O << " = load from index " << i;
9489     }
9490     ++OpIdx;
9491   }
9492 }
9493 #endif
9494 
9495 void VPWidenCallRecipe::execute(VPTransformState &State) {
9496   State.ILV->widenCallInstruction(*cast<CallInst>(getUnderlyingInstr()), this,
9497                                   *this, State);
9498 }
9499 
9500 void VPWidenSelectRecipe::execute(VPTransformState &State) {
9501   auto &I = *cast<SelectInst>(getUnderlyingInstr());
9502   State.ILV->setDebugLocFromInst(&I);
9503 
9504   // The condition can be loop invariant  but still defined inside the
9505   // loop. This means that we can't just use the original 'cond' value.
9506   // We have to take the 'vectorized' value and pick the first lane.
9507   // Instcombine will make this a no-op.
9508   auto *InvarCond =
9509       InvariantCond ? State.get(getOperand(0), VPIteration(0, 0)) : nullptr;
9510 
9511   for (unsigned Part = 0; Part < State.UF; ++Part) {
9512     Value *Cond = InvarCond ? InvarCond : State.get(getOperand(0), Part);
9513     Value *Op0 = State.get(getOperand(1), Part);
9514     Value *Op1 = State.get(getOperand(2), Part);
9515     Value *Sel = State.Builder.CreateSelect(Cond, Op0, Op1);
9516     State.set(this, Sel, Part);
9517     State.ILV->addMetadata(Sel, &I);
9518   }
9519 }
9520 
9521 void VPWidenRecipe::execute(VPTransformState &State) {
9522   auto &I = *cast<Instruction>(getUnderlyingValue());
9523   auto &Builder = State.Builder;
9524   switch (I.getOpcode()) {
9525   case Instruction::Call:
9526   case Instruction::Br:
9527   case Instruction::PHI:
9528   case Instruction::GetElementPtr:
9529   case Instruction::Select:
9530     llvm_unreachable("This instruction is handled by a different recipe.");
9531   case Instruction::UDiv:
9532   case Instruction::SDiv:
9533   case Instruction::SRem:
9534   case Instruction::URem:
9535   case Instruction::Add:
9536   case Instruction::FAdd:
9537   case Instruction::Sub:
9538   case Instruction::FSub:
9539   case Instruction::FNeg:
9540   case Instruction::Mul:
9541   case Instruction::FMul:
9542   case Instruction::FDiv:
9543   case Instruction::FRem:
9544   case Instruction::Shl:
9545   case Instruction::LShr:
9546   case Instruction::AShr:
9547   case Instruction::And:
9548   case Instruction::Or:
9549   case Instruction::Xor: {
9550     // Just widen unops and binops.
9551     State.ILV->setDebugLocFromInst(&I);
9552 
9553     for (unsigned Part = 0; Part < State.UF; ++Part) {
9554       SmallVector<Value *, 2> Ops;
9555       for (VPValue *VPOp : operands())
9556         Ops.push_back(State.get(VPOp, Part));
9557 
9558       Value *V = Builder.CreateNAryOp(I.getOpcode(), Ops);
9559 
9560       if (auto *VecOp = dyn_cast<Instruction>(V)) {
9561         VecOp->copyIRFlags(&I);
9562 
9563         // If the instruction is vectorized and was in a basic block that needed
9564         // predication, we can't propagate poison-generating flags (nuw/nsw,
9565         // exact, etc.). The control flow has been linearized and the
9566         // instruction is no longer guarded by the predicate, which could make
9567         // the flag properties to no longer hold.
9568         if (State.MayGeneratePoisonRecipes.count(this) > 0)
9569           VecOp->dropPoisonGeneratingFlags();
9570       }
9571 
9572       // Use this vector value for all users of the original instruction.
9573       State.set(this, V, Part);
9574       State.ILV->addMetadata(V, &I);
9575     }
9576 
9577     break;
9578   }
9579   case Instruction::ICmp:
9580   case Instruction::FCmp: {
9581     // Widen compares. Generate vector compares.
9582     bool FCmp = (I.getOpcode() == Instruction::FCmp);
9583     auto *Cmp = cast<CmpInst>(&I);
9584     State.ILV->setDebugLocFromInst(Cmp);
9585     for (unsigned Part = 0; Part < State.UF; ++Part) {
9586       Value *A = State.get(getOperand(0), Part);
9587       Value *B = State.get(getOperand(1), Part);
9588       Value *C = nullptr;
9589       if (FCmp) {
9590         // Propagate fast math flags.
9591         IRBuilder<>::FastMathFlagGuard FMFG(Builder);
9592         Builder.setFastMathFlags(Cmp->getFastMathFlags());
9593         C = Builder.CreateFCmp(Cmp->getPredicate(), A, B);
9594       } else {
9595         C = Builder.CreateICmp(Cmp->getPredicate(), A, B);
9596       }
9597       State.set(this, C, Part);
9598       State.ILV->addMetadata(C, &I);
9599     }
9600 
9601     break;
9602   }
9603 
9604   case Instruction::ZExt:
9605   case Instruction::SExt:
9606   case Instruction::FPToUI:
9607   case Instruction::FPToSI:
9608   case Instruction::FPExt:
9609   case Instruction::PtrToInt:
9610   case Instruction::IntToPtr:
9611   case Instruction::SIToFP:
9612   case Instruction::UIToFP:
9613   case Instruction::Trunc:
9614   case Instruction::FPTrunc:
9615   case Instruction::BitCast: {
9616     auto *CI = cast<CastInst>(&I);
9617     State.ILV->setDebugLocFromInst(CI);
9618 
9619     /// Vectorize casts.
9620     Type *DestTy = (State.VF.isScalar())
9621                        ? CI->getType()
9622                        : VectorType::get(CI->getType(), State.VF);
9623 
9624     for (unsigned Part = 0; Part < State.UF; ++Part) {
9625       Value *A = State.get(getOperand(0), Part);
9626       Value *Cast = Builder.CreateCast(CI->getOpcode(), A, DestTy);
9627       State.set(this, Cast, Part);
9628       State.ILV->addMetadata(Cast, &I);
9629     }
9630     break;
9631   }
9632   default:
9633     // This instruction is not vectorized by simple widening.
9634     LLVM_DEBUG(dbgs() << "LV: Found an unhandled instruction: " << I);
9635     llvm_unreachable("Unhandled instruction!");
9636   } // end of switch.
9637 }
9638 
9639 void VPWidenGEPRecipe::execute(VPTransformState &State) {
9640   auto *GEP = cast<GetElementPtrInst>(getUnderlyingInstr());
9641   // Construct a vector GEP by widening the operands of the scalar GEP as
9642   // necessary. We mark the vector GEP 'inbounds' if appropriate. A GEP
9643   // results in a vector of pointers when at least one operand of the GEP
9644   // is vector-typed. Thus, to keep the representation compact, we only use
9645   // vector-typed operands for loop-varying values.
9646 
9647   if (State.VF.isVector() && IsPtrLoopInvariant && IsIndexLoopInvariant.all()) {
9648     // If we are vectorizing, but the GEP has only loop-invariant operands,
9649     // the GEP we build (by only using vector-typed operands for
9650     // loop-varying values) would be a scalar pointer. Thus, to ensure we
9651     // produce a vector of pointers, we need to either arbitrarily pick an
9652     // operand to broadcast, or broadcast a clone of the original GEP.
9653     // Here, we broadcast a clone of the original.
9654     //
9655     // TODO: If at some point we decide to scalarize instructions having
9656     //       loop-invariant operands, this special case will no longer be
9657     //       required. We would add the scalarization decision to
9658     //       collectLoopScalars() and teach getVectorValue() to broadcast
9659     //       the lane-zero scalar value.
9660     auto *Clone = State.Builder.Insert(GEP->clone());
9661     for (unsigned Part = 0; Part < State.UF; ++Part) {
9662       Value *EntryPart = State.Builder.CreateVectorSplat(State.VF, Clone);
9663       State.set(this, EntryPart, Part);
9664       State.ILV->addMetadata(EntryPart, GEP);
9665     }
9666   } else {
9667     // If the GEP has at least one loop-varying operand, we are sure to
9668     // produce a vector of pointers. But if we are only unrolling, we want
9669     // to produce a scalar GEP for each unroll part. Thus, the GEP we
9670     // produce with the code below will be scalar (if VF == 1) or vector
9671     // (otherwise). Note that for the unroll-only case, we still maintain
9672     // values in the vector mapping with initVector, as we do for other
9673     // instructions.
9674     for (unsigned Part = 0; Part < State.UF; ++Part) {
9675       // The pointer operand of the new GEP. If it's loop-invariant, we
9676       // won't broadcast it.
9677       auto *Ptr = IsPtrLoopInvariant
9678                       ? State.get(getOperand(0), VPIteration(0, 0))
9679                       : State.get(getOperand(0), Part);
9680 
9681       // Collect all the indices for the new GEP. If any index is
9682       // loop-invariant, we won't broadcast it.
9683       SmallVector<Value *, 4> Indices;
9684       for (unsigned I = 1, E = getNumOperands(); I < E; I++) {
9685         VPValue *Operand = getOperand(I);
9686         if (IsIndexLoopInvariant[I - 1])
9687           Indices.push_back(State.get(Operand, VPIteration(0, 0)));
9688         else
9689           Indices.push_back(State.get(Operand, Part));
9690       }
9691 
9692       // If the GEP instruction is vectorized and was in a basic block that
9693       // needed predication, we can't propagate the poison-generating 'inbounds'
9694       // flag. The control flow has been linearized and the GEP is no longer
9695       // guarded by the predicate, which could make the 'inbounds' properties to
9696       // no longer hold.
9697       bool IsInBounds =
9698           GEP->isInBounds() && State.MayGeneratePoisonRecipes.count(this) == 0;
9699 
9700       // Create the new GEP. Note that this GEP may be a scalar if VF == 1,
9701       // but it should be a vector, otherwise.
9702       auto *NewGEP = IsInBounds
9703                          ? State.Builder.CreateInBoundsGEP(
9704                                GEP->getSourceElementType(), Ptr, Indices)
9705                          : State.Builder.CreateGEP(GEP->getSourceElementType(),
9706                                                    Ptr, Indices);
9707       assert((State.VF.isScalar() || NewGEP->getType()->isVectorTy()) &&
9708              "NewGEP is not a pointer vector");
9709       State.set(this, NewGEP, Part);
9710       State.ILV->addMetadata(NewGEP, GEP);
9711     }
9712   }
9713 }
9714 
9715 void VPWidenIntOrFpInductionRecipe::execute(VPTransformState &State) {
9716   assert(!State.Instance && "Int or FP induction being replicated.");
9717   State.ILV->widenIntOrFpInduction(IV, getStartValue()->getLiveInIRValue(),
9718                                    getTruncInst(), getVPValue(0),
9719                                    getCastValue(), State);
9720 }
9721 
9722 void VPWidenPHIRecipe::execute(VPTransformState &State) {
9723   State.ILV->widenPHIInstruction(cast<PHINode>(getUnderlyingValue()), this,
9724                                  State);
9725 }
9726 
9727 void VPBlendRecipe::execute(VPTransformState &State) {
9728   State.ILV->setDebugLocFromInst(Phi, &State.Builder);
9729   // We know that all PHIs in non-header blocks are converted into
9730   // selects, so we don't have to worry about the insertion order and we
9731   // can just use the builder.
9732   // At this point we generate the predication tree. There may be
9733   // duplications since this is a simple recursive scan, but future
9734   // optimizations will clean it up.
9735 
9736   unsigned NumIncoming = getNumIncomingValues();
9737 
9738   // Generate a sequence of selects of the form:
9739   // SELECT(Mask3, In3,
9740   //        SELECT(Mask2, In2,
9741   //               SELECT(Mask1, In1,
9742   //                      In0)))
9743   // Note that Mask0 is never used: lanes for which no path reaches this phi and
9744   // are essentially undef are taken from In0.
9745   InnerLoopVectorizer::VectorParts Entry(State.UF);
9746   for (unsigned In = 0; In < NumIncoming; ++In) {
9747     for (unsigned Part = 0; Part < State.UF; ++Part) {
9748       // We might have single edge PHIs (blocks) - use an identity
9749       // 'select' for the first PHI operand.
9750       Value *In0 = State.get(getIncomingValue(In), Part);
9751       if (In == 0)
9752         Entry[Part] = In0; // Initialize with the first incoming value.
9753       else {
9754         // Select between the current value and the previous incoming edge
9755         // based on the incoming mask.
9756         Value *Cond = State.get(getMask(In), Part);
9757         Entry[Part] =
9758             State.Builder.CreateSelect(Cond, In0, Entry[Part], "predphi");
9759       }
9760     }
9761   }
9762   for (unsigned Part = 0; Part < State.UF; ++Part)
9763     State.set(this, Entry[Part], Part);
9764 }
9765 
9766 void VPInterleaveRecipe::execute(VPTransformState &State) {
9767   assert(!State.Instance && "Interleave group being replicated.");
9768   State.ILV->vectorizeInterleaveGroup(IG, definedValues(), State, getAddr(),
9769                                       getStoredValues(), getMask());
9770 }
9771 
9772 void VPReductionRecipe::execute(VPTransformState &State) {
9773   assert(!State.Instance && "Reduction being replicated.");
9774   Value *PrevInChain = State.get(getChainOp(), 0);
9775   RecurKind Kind = RdxDesc->getRecurrenceKind();
9776   bool IsOrdered = State.ILV->useOrderedReductions(*RdxDesc);
9777   // Propagate the fast-math flags carried by the underlying instruction.
9778   IRBuilderBase::FastMathFlagGuard FMFGuard(State.Builder);
9779   State.Builder.setFastMathFlags(RdxDesc->getFastMathFlags());
9780   for (unsigned Part = 0; Part < State.UF; ++Part) {
9781     Value *NewVecOp = State.get(getVecOp(), Part);
9782     if (VPValue *Cond = getCondOp()) {
9783       Value *NewCond = State.get(Cond, Part);
9784       VectorType *VecTy = cast<VectorType>(NewVecOp->getType());
9785       Value *Iden = RdxDesc->getRecurrenceIdentity(
9786           Kind, VecTy->getElementType(), RdxDesc->getFastMathFlags());
9787       Value *IdenVec =
9788           State.Builder.CreateVectorSplat(VecTy->getElementCount(), Iden);
9789       Value *Select = State.Builder.CreateSelect(NewCond, NewVecOp, IdenVec);
9790       NewVecOp = Select;
9791     }
9792     Value *NewRed;
9793     Value *NextInChain;
9794     if (IsOrdered) {
9795       if (State.VF.isVector())
9796         NewRed = createOrderedReduction(State.Builder, *RdxDesc, NewVecOp,
9797                                         PrevInChain);
9798       else
9799         NewRed = State.Builder.CreateBinOp(
9800             (Instruction::BinaryOps)RdxDesc->getOpcode(Kind), PrevInChain,
9801             NewVecOp);
9802       PrevInChain = NewRed;
9803     } else {
9804       PrevInChain = State.get(getChainOp(), Part);
9805       NewRed = createTargetReduction(State.Builder, TTI, *RdxDesc, NewVecOp);
9806     }
9807     if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind)) {
9808       NextInChain =
9809           createMinMaxOp(State.Builder, RdxDesc->getRecurrenceKind(),
9810                          NewRed, PrevInChain);
9811     } else if (IsOrdered)
9812       NextInChain = NewRed;
9813     else
9814       NextInChain = State.Builder.CreateBinOp(
9815           (Instruction::BinaryOps)RdxDesc->getOpcode(Kind), NewRed,
9816           PrevInChain);
9817     State.set(this, NextInChain, Part);
9818   }
9819 }
9820 
9821 void VPReplicateRecipe::execute(VPTransformState &State) {
9822   if (State.Instance) { // Generate a single instance.
9823     assert(!State.VF.isScalable() && "Can't scalarize a scalable vector");
9824     State.ILV->scalarizeInstruction(getUnderlyingInstr(), this, *State.Instance,
9825                                     IsPredicated, State);
9826     // Insert scalar instance packing it into a vector.
9827     if (AlsoPack && State.VF.isVector()) {
9828       // If we're constructing lane 0, initialize to start from poison.
9829       if (State.Instance->Lane.isFirstLane()) {
9830         assert(!State.VF.isScalable() && "VF is assumed to be non scalable.");
9831         Value *Poison = PoisonValue::get(
9832             VectorType::get(getUnderlyingValue()->getType(), State.VF));
9833         State.set(this, Poison, State.Instance->Part);
9834       }
9835       State.ILV->packScalarIntoVectorValue(this, *State.Instance, State);
9836     }
9837     return;
9838   }
9839 
9840   // Generate scalar instances for all VF lanes of all UF parts, unless the
9841   // instruction is uniform inwhich case generate only the first lane for each
9842   // of the UF parts.
9843   unsigned EndLane = IsUniform ? 1 : State.VF.getKnownMinValue();
9844   assert((!State.VF.isScalable() || IsUniform) &&
9845          "Can't scalarize a scalable vector");
9846   for (unsigned Part = 0; Part < State.UF; ++Part)
9847     for (unsigned Lane = 0; Lane < EndLane; ++Lane)
9848       State.ILV->scalarizeInstruction(getUnderlyingInstr(), this,
9849                                       VPIteration(Part, Lane), IsPredicated,
9850                                       State);
9851 }
9852 
9853 void VPBranchOnMaskRecipe::execute(VPTransformState &State) {
9854   assert(State.Instance && "Branch on Mask works only on single instance.");
9855 
9856   unsigned Part = State.Instance->Part;
9857   unsigned Lane = State.Instance->Lane.getKnownLane();
9858 
9859   Value *ConditionBit = nullptr;
9860   VPValue *BlockInMask = getMask();
9861   if (BlockInMask) {
9862     ConditionBit = State.get(BlockInMask, Part);
9863     if (ConditionBit->getType()->isVectorTy())
9864       ConditionBit = State.Builder.CreateExtractElement(
9865           ConditionBit, State.Builder.getInt32(Lane));
9866   } else // Block in mask is all-one.
9867     ConditionBit = State.Builder.getTrue();
9868 
9869   // Replace the temporary unreachable terminator with a new conditional branch,
9870   // whose two destinations will be set later when they are created.
9871   auto *CurrentTerminator = State.CFG.PrevBB->getTerminator();
9872   assert(isa<UnreachableInst>(CurrentTerminator) &&
9873          "Expected to replace unreachable terminator with conditional branch.");
9874   auto *CondBr = BranchInst::Create(State.CFG.PrevBB, nullptr, ConditionBit);
9875   CondBr->setSuccessor(0, nullptr);
9876   ReplaceInstWithInst(CurrentTerminator, CondBr);
9877 }
9878 
9879 void VPPredInstPHIRecipe::execute(VPTransformState &State) {
9880   assert(State.Instance && "Predicated instruction PHI works per instance.");
9881   Instruction *ScalarPredInst =
9882       cast<Instruction>(State.get(getOperand(0), *State.Instance));
9883   BasicBlock *PredicatedBB = ScalarPredInst->getParent();
9884   BasicBlock *PredicatingBB = PredicatedBB->getSinglePredecessor();
9885   assert(PredicatingBB && "Predicated block has no single predecessor.");
9886   assert(isa<VPReplicateRecipe>(getOperand(0)) &&
9887          "operand must be VPReplicateRecipe");
9888 
9889   // By current pack/unpack logic we need to generate only a single phi node: if
9890   // a vector value for the predicated instruction exists at this point it means
9891   // the instruction has vector users only, and a phi for the vector value is
9892   // needed. In this case the recipe of the predicated instruction is marked to
9893   // also do that packing, thereby "hoisting" the insert-element sequence.
9894   // Otherwise, a phi node for the scalar value is needed.
9895   unsigned Part = State.Instance->Part;
9896   if (State.hasVectorValue(getOperand(0), Part)) {
9897     Value *VectorValue = State.get(getOperand(0), Part);
9898     InsertElementInst *IEI = cast<InsertElementInst>(VectorValue);
9899     PHINode *VPhi = State.Builder.CreatePHI(IEI->getType(), 2);
9900     VPhi->addIncoming(IEI->getOperand(0), PredicatingBB); // Unmodified vector.
9901     VPhi->addIncoming(IEI, PredicatedBB); // New vector with inserted element.
9902     if (State.hasVectorValue(this, Part))
9903       State.reset(this, VPhi, Part);
9904     else
9905       State.set(this, VPhi, Part);
9906     // NOTE: Currently we need to update the value of the operand, so the next
9907     // predicated iteration inserts its generated value in the correct vector.
9908     State.reset(getOperand(0), VPhi, Part);
9909   } else {
9910     Type *PredInstType = getOperand(0)->getUnderlyingValue()->getType();
9911     PHINode *Phi = State.Builder.CreatePHI(PredInstType, 2);
9912     Phi->addIncoming(PoisonValue::get(ScalarPredInst->getType()),
9913                      PredicatingBB);
9914     Phi->addIncoming(ScalarPredInst, PredicatedBB);
9915     if (State.hasScalarValue(this, *State.Instance))
9916       State.reset(this, Phi, *State.Instance);
9917     else
9918       State.set(this, Phi, *State.Instance);
9919     // NOTE: Currently we need to update the value of the operand, so the next
9920     // predicated iteration inserts its generated value in the correct vector.
9921     State.reset(getOperand(0), Phi, *State.Instance);
9922   }
9923 }
9924 
9925 void VPWidenMemoryInstructionRecipe::execute(VPTransformState &State) {
9926   VPValue *StoredValue = isStore() ? getStoredValue() : nullptr;
9927 
9928   // Attempt to issue a wide load.
9929   LoadInst *LI = dyn_cast<LoadInst>(&Ingredient);
9930   StoreInst *SI = dyn_cast<StoreInst>(&Ingredient);
9931 
9932   assert((LI || SI) && "Invalid Load/Store instruction");
9933   assert((!SI || StoredValue) && "No stored value provided for widened store");
9934   assert((!LI || !StoredValue) && "Stored value provided for widened load");
9935 
9936   Type *ScalarDataTy = getLoadStoreType(&Ingredient);
9937 
9938   auto *DataTy = VectorType::get(ScalarDataTy, State.VF);
9939   const Align Alignment = getLoadStoreAlignment(&Ingredient);
9940   bool CreateGatherScatter = !Consecutive;
9941 
9942   auto &Builder = State.Builder;
9943   InnerLoopVectorizer::VectorParts BlockInMaskParts(State.UF);
9944   bool isMaskRequired = getMask();
9945   if (isMaskRequired)
9946     for (unsigned Part = 0; Part < State.UF; ++Part)
9947       BlockInMaskParts[Part] = State.get(getMask(), Part);
9948 
9949   const auto CreateVecPtr = [&](unsigned Part, Value *Ptr) -> Value * {
9950     // Calculate the pointer for the specific unroll-part.
9951     GetElementPtrInst *PartPtr = nullptr;
9952 
9953     bool InBounds = false;
9954     if (auto *gep = dyn_cast<GetElementPtrInst>(Ptr->stripPointerCasts()))
9955       InBounds = gep->isInBounds();
9956     if (Reverse) {
9957       // If the address is consecutive but reversed, then the
9958       // wide store needs to start at the last vector element.
9959       // RunTimeVF =  VScale * VF.getKnownMinValue()
9960       // For fixed-width VScale is 1, then RunTimeVF = VF.getKnownMinValue()
9961       Value *RunTimeVF = getRuntimeVF(Builder, Builder.getInt32Ty(), State.VF);
9962       // NumElt = -Part * RunTimeVF
9963       Value *NumElt = Builder.CreateMul(Builder.getInt32(-Part), RunTimeVF);
9964       // LastLane = 1 - RunTimeVF
9965       Value *LastLane = Builder.CreateSub(Builder.getInt32(1), RunTimeVF);
9966       PartPtr =
9967           cast<GetElementPtrInst>(Builder.CreateGEP(ScalarDataTy, Ptr, NumElt));
9968       PartPtr->setIsInBounds(InBounds);
9969       PartPtr = cast<GetElementPtrInst>(
9970           Builder.CreateGEP(ScalarDataTy, PartPtr, LastLane));
9971       PartPtr->setIsInBounds(InBounds);
9972       if (isMaskRequired) // Reverse of a null all-one mask is a null mask.
9973         BlockInMaskParts[Part] =
9974             Builder.CreateVectorReverse(BlockInMaskParts[Part], "reverse");
9975     } else {
9976       Value *Increment =
9977           createStepForVF(Builder, Builder.getInt32Ty(), State.VF, Part);
9978       PartPtr = cast<GetElementPtrInst>(
9979           Builder.CreateGEP(ScalarDataTy, Ptr, Increment));
9980       PartPtr->setIsInBounds(InBounds);
9981     }
9982 
9983     unsigned AddressSpace = Ptr->getType()->getPointerAddressSpace();
9984     return Builder.CreateBitCast(PartPtr, DataTy->getPointerTo(AddressSpace));
9985   };
9986 
9987   // Handle Stores:
9988   if (SI) {
9989     State.ILV->setDebugLocFromInst(SI);
9990 
9991     for (unsigned Part = 0; Part < State.UF; ++Part) {
9992       Instruction *NewSI = nullptr;
9993       Value *StoredVal = State.get(StoredValue, Part);
9994       if (CreateGatherScatter) {
9995         Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr;
9996         Value *VectorGep = State.get(getAddr(), Part);
9997         NewSI = Builder.CreateMaskedScatter(StoredVal, VectorGep, Alignment,
9998                                             MaskPart);
9999       } else {
10000         if (Reverse) {
10001           // If we store to reverse consecutive memory locations, then we need
10002           // to reverse the order of elements in the stored value.
10003           StoredVal = Builder.CreateVectorReverse(StoredVal, "reverse");
10004           // We don't want to update the value in the map as it might be used in
10005           // another expression. So don't call resetVectorValue(StoredVal).
10006         }
10007         auto *VecPtr =
10008             CreateVecPtr(Part, State.get(getAddr(), VPIteration(0, 0)));
10009         if (isMaskRequired)
10010           NewSI = Builder.CreateMaskedStore(StoredVal, VecPtr, Alignment,
10011                                             BlockInMaskParts[Part]);
10012         else
10013           NewSI = Builder.CreateAlignedStore(StoredVal, VecPtr, Alignment);
10014       }
10015       State.ILV->addMetadata(NewSI, SI);
10016     }
10017     return;
10018   }
10019 
10020   // Handle loads.
10021   assert(LI && "Must have a load instruction");
10022   State.ILV->setDebugLocFromInst(LI);
10023   for (unsigned Part = 0; Part < State.UF; ++Part) {
10024     Value *NewLI;
10025     if (CreateGatherScatter) {
10026       Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr;
10027       Value *VectorGep = State.get(getAddr(), Part);
10028       NewLI = Builder.CreateMaskedGather(DataTy, VectorGep, Alignment, MaskPart,
10029                                          nullptr, "wide.masked.gather");
10030       State.ILV->addMetadata(NewLI, LI);
10031     } else {
10032       auto *VecPtr =
10033           CreateVecPtr(Part, State.get(getAddr(), VPIteration(0, 0)));
10034       if (isMaskRequired)
10035         NewLI = Builder.CreateMaskedLoad(
10036             DataTy, VecPtr, Alignment, BlockInMaskParts[Part],
10037             PoisonValue::get(DataTy), "wide.masked.load");
10038       else
10039         NewLI =
10040             Builder.CreateAlignedLoad(DataTy, VecPtr, Alignment, "wide.load");
10041 
10042       // Add metadata to the load, but setVectorValue to the reverse shuffle.
10043       State.ILV->addMetadata(NewLI, LI);
10044       if (Reverse)
10045         NewLI = Builder.CreateVectorReverse(NewLI, "reverse");
10046     }
10047 
10048     State.set(getVPSingleValue(), NewLI, Part);
10049   }
10050 }
10051 
10052 // Determine how to lower the scalar epilogue, which depends on 1) optimising
10053 // for minimum code-size, 2) predicate compiler options, 3) loop hints forcing
10054 // predication, and 4) a TTI hook that analyses whether the loop is suitable
10055 // for predication.
10056 static ScalarEpilogueLowering getScalarEpilogueLowering(
10057     Function *F, Loop *L, LoopVectorizeHints &Hints, ProfileSummaryInfo *PSI,
10058     BlockFrequencyInfo *BFI, TargetTransformInfo *TTI, TargetLibraryInfo *TLI,
10059     AssumptionCache *AC, LoopInfo *LI, ScalarEvolution *SE, DominatorTree *DT,
10060     LoopVectorizationLegality &LVL) {
10061   // 1) OptSize takes precedence over all other options, i.e. if this is set,
10062   // don't look at hints or options, and don't request a scalar epilogue.
10063   // (For PGSO, as shouldOptimizeForSize isn't currently accessible from
10064   // LoopAccessInfo (due to code dependency and not being able to reliably get
10065   // PSI/BFI from a loop analysis under NPM), we cannot suppress the collection
10066   // of strides in LoopAccessInfo::analyzeLoop() and vectorize without
10067   // versioning when the vectorization is forced, unlike hasOptSize. So revert
10068   // back to the old way and vectorize with versioning when forced. See D81345.)
10069   if (F->hasOptSize() || (llvm::shouldOptimizeForSize(L->getHeader(), PSI, BFI,
10070                                                       PGSOQueryType::IRPass) &&
10071                           Hints.getForce() != LoopVectorizeHints::FK_Enabled))
10072     return CM_ScalarEpilogueNotAllowedOptSize;
10073 
10074   // 2) If set, obey the directives
10075   if (PreferPredicateOverEpilogue.getNumOccurrences()) {
10076     switch (PreferPredicateOverEpilogue) {
10077     case PreferPredicateTy::ScalarEpilogue:
10078       return CM_ScalarEpilogueAllowed;
10079     case PreferPredicateTy::PredicateElseScalarEpilogue:
10080       return CM_ScalarEpilogueNotNeededUsePredicate;
10081     case PreferPredicateTy::PredicateOrDontVectorize:
10082       return CM_ScalarEpilogueNotAllowedUsePredicate;
10083     };
10084   }
10085 
10086   // 3) If set, obey the hints
10087   switch (Hints.getPredicate()) {
10088   case LoopVectorizeHints::FK_Enabled:
10089     return CM_ScalarEpilogueNotNeededUsePredicate;
10090   case LoopVectorizeHints::FK_Disabled:
10091     return CM_ScalarEpilogueAllowed;
10092   };
10093 
10094   // 4) if the TTI hook indicates this is profitable, request predication.
10095   if (TTI->preferPredicateOverEpilogue(L, LI, *SE, *AC, TLI, DT,
10096                                        LVL.getLAI()))
10097     return CM_ScalarEpilogueNotNeededUsePredicate;
10098 
10099   return CM_ScalarEpilogueAllowed;
10100 }
10101 
10102 Value *VPTransformState::get(VPValue *Def, unsigned Part) {
10103   // If Values have been set for this Def return the one relevant for \p Part.
10104   if (hasVectorValue(Def, Part))
10105     return Data.PerPartOutput[Def][Part];
10106 
10107   if (!hasScalarValue(Def, {Part, 0})) {
10108     Value *IRV = Def->getLiveInIRValue();
10109     Value *B = ILV->getBroadcastInstrs(IRV);
10110     set(Def, B, Part);
10111     return B;
10112   }
10113 
10114   Value *ScalarValue = get(Def, {Part, 0});
10115   // If we aren't vectorizing, we can just copy the scalar map values over
10116   // to the vector map.
10117   if (VF.isScalar()) {
10118     set(Def, ScalarValue, Part);
10119     return ScalarValue;
10120   }
10121 
10122   auto *RepR = dyn_cast<VPReplicateRecipe>(Def);
10123   bool IsUniform = RepR && RepR->isUniform();
10124 
10125   unsigned LastLane = IsUniform ? 0 : VF.getKnownMinValue() - 1;
10126   // Check if there is a scalar value for the selected lane.
10127   if (!hasScalarValue(Def, {Part, LastLane})) {
10128     // At the moment, VPWidenIntOrFpInductionRecipes can also be uniform.
10129     assert(isa<VPWidenIntOrFpInductionRecipe>(Def->getDef()) &&
10130            "unexpected recipe found to be invariant");
10131     IsUniform = true;
10132     LastLane = 0;
10133   }
10134 
10135   auto *LastInst = cast<Instruction>(get(Def, {Part, LastLane}));
10136   // Set the insert point after the last scalarized instruction or after the
10137   // last PHI, if LastInst is a PHI. This ensures the insertelement sequence
10138   // will directly follow the scalar definitions.
10139   auto OldIP = Builder.saveIP();
10140   auto NewIP =
10141       isa<PHINode>(LastInst)
10142           ? BasicBlock::iterator(LastInst->getParent()->getFirstNonPHI())
10143           : std::next(BasicBlock::iterator(LastInst));
10144   Builder.SetInsertPoint(&*NewIP);
10145 
10146   // However, if we are vectorizing, we need to construct the vector values.
10147   // If the value is known to be uniform after vectorization, we can just
10148   // broadcast the scalar value corresponding to lane zero for each unroll
10149   // iteration. Otherwise, we construct the vector values using
10150   // insertelement instructions. Since the resulting vectors are stored in
10151   // State, we will only generate the insertelements once.
10152   Value *VectorValue = nullptr;
10153   if (IsUniform) {
10154     VectorValue = ILV->getBroadcastInstrs(ScalarValue);
10155     set(Def, VectorValue, Part);
10156   } else {
10157     // Initialize packing with insertelements to start from undef.
10158     assert(!VF.isScalable() && "VF is assumed to be non scalable.");
10159     Value *Undef = PoisonValue::get(VectorType::get(LastInst->getType(), VF));
10160     set(Def, Undef, Part);
10161     for (unsigned Lane = 0; Lane < VF.getKnownMinValue(); ++Lane)
10162       ILV->packScalarIntoVectorValue(Def, {Part, Lane}, *this);
10163     VectorValue = get(Def, Part);
10164   }
10165   Builder.restoreIP(OldIP);
10166   return VectorValue;
10167 }
10168 
10169 // Process the loop in the VPlan-native vectorization path. This path builds
10170 // VPlan upfront in the vectorization pipeline, which allows to apply
10171 // VPlan-to-VPlan transformations from the very beginning without modifying the
10172 // input LLVM IR.
10173 static bool processLoopInVPlanNativePath(
10174     Loop *L, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT,
10175     LoopVectorizationLegality *LVL, TargetTransformInfo *TTI,
10176     TargetLibraryInfo *TLI, DemandedBits *DB, AssumptionCache *AC,
10177     OptimizationRemarkEmitter *ORE, BlockFrequencyInfo *BFI,
10178     ProfileSummaryInfo *PSI, LoopVectorizeHints &Hints,
10179     LoopVectorizationRequirements &Requirements) {
10180 
10181   if (isa<SCEVCouldNotCompute>(PSE.getBackedgeTakenCount())) {
10182     LLVM_DEBUG(dbgs() << "LV: cannot compute the outer-loop trip count\n");
10183     return false;
10184   }
10185   assert(EnableVPlanNativePath && "VPlan-native path is disabled.");
10186   Function *F = L->getHeader()->getParent();
10187   InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL->getLAI());
10188 
10189   ScalarEpilogueLowering SEL = getScalarEpilogueLowering(
10190       F, L, Hints, PSI, BFI, TTI, TLI, AC, LI, PSE.getSE(), DT, *LVL);
10191 
10192   LoopVectorizationCostModel CM(SEL, L, PSE, LI, LVL, *TTI, TLI, DB, AC, ORE, F,
10193                                 &Hints, IAI);
10194   // Use the planner for outer loop vectorization.
10195   // TODO: CM is not used at this point inside the planner. Turn CM into an
10196   // optional argument if we don't need it in the future.
10197   LoopVectorizationPlanner LVP(L, LI, TLI, TTI, LVL, CM, IAI, PSE, Hints,
10198                                Requirements, ORE);
10199 
10200   // Get user vectorization factor.
10201   ElementCount UserVF = Hints.getWidth();
10202 
10203   CM.collectElementTypesForWidening();
10204 
10205   // Plan how to best vectorize, return the best VF and its cost.
10206   const VectorizationFactor VF = LVP.planInVPlanNativePath(UserVF);
10207 
10208   // If we are stress testing VPlan builds, do not attempt to generate vector
10209   // code. Masked vector code generation support will follow soon.
10210   // Also, do not attempt to vectorize if no vector code will be produced.
10211   if (VPlanBuildStressTest || EnableVPlanPredication ||
10212       VectorizationFactor::Disabled() == VF)
10213     return false;
10214 
10215   VPlan &BestPlan = LVP.getBestPlanFor(VF.Width);
10216 
10217   {
10218     GeneratedRTChecks Checks(*PSE.getSE(), DT, LI,
10219                              F->getParent()->getDataLayout());
10220     InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, 1, LVL,
10221                            &CM, BFI, PSI, Checks);
10222     LLVM_DEBUG(dbgs() << "Vectorizing outer loop in \""
10223                       << L->getHeader()->getParent()->getName() << "\"\n");
10224     LVP.executePlan(VF.Width, 1, BestPlan, LB, DT);
10225   }
10226 
10227   // Mark the loop as already vectorized to avoid vectorizing again.
10228   Hints.setAlreadyVectorized();
10229   assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs()));
10230   return true;
10231 }
10232 
10233 // Emit a remark if there are stores to floats that required a floating point
10234 // extension. If the vectorized loop was generated with floating point there
10235 // will be a performance penalty from the conversion overhead and the change in
10236 // the vector width.
10237 static void checkMixedPrecision(Loop *L, OptimizationRemarkEmitter *ORE) {
10238   SmallVector<Instruction *, 4> Worklist;
10239   for (BasicBlock *BB : L->getBlocks()) {
10240     for (Instruction &Inst : *BB) {
10241       if (auto *S = dyn_cast<StoreInst>(&Inst)) {
10242         if (S->getValueOperand()->getType()->isFloatTy())
10243           Worklist.push_back(S);
10244       }
10245     }
10246   }
10247 
10248   // Traverse the floating point stores upwards searching, for floating point
10249   // conversions.
10250   SmallPtrSet<const Instruction *, 4> Visited;
10251   SmallPtrSet<const Instruction *, 4> EmittedRemark;
10252   while (!Worklist.empty()) {
10253     auto *I = Worklist.pop_back_val();
10254     if (!L->contains(I))
10255       continue;
10256     if (!Visited.insert(I).second)
10257       continue;
10258 
10259     // Emit a remark if the floating point store required a floating
10260     // point conversion.
10261     // TODO: More work could be done to identify the root cause such as a
10262     // constant or a function return type and point the user to it.
10263     if (isa<FPExtInst>(I) && EmittedRemark.insert(I).second)
10264       ORE->emit([&]() {
10265         return OptimizationRemarkAnalysis(LV_NAME, "VectorMixedPrecision",
10266                                           I->getDebugLoc(), L->getHeader())
10267                << "floating point conversion changes vector width. "
10268                << "Mixed floating point precision requires an up/down "
10269                << "cast that will negatively impact performance.";
10270       });
10271 
10272     for (Use &Op : I->operands())
10273       if (auto *OpI = dyn_cast<Instruction>(Op))
10274         Worklist.push_back(OpI);
10275   }
10276 }
10277 
10278 LoopVectorizePass::LoopVectorizePass(LoopVectorizeOptions Opts)
10279     : InterleaveOnlyWhenForced(Opts.InterleaveOnlyWhenForced ||
10280                                !EnableLoopInterleaving),
10281       VectorizeOnlyWhenForced(Opts.VectorizeOnlyWhenForced ||
10282                               !EnableLoopVectorization) {}
10283 
10284 bool LoopVectorizePass::processLoop(Loop *L) {
10285   assert((EnableVPlanNativePath || L->isInnermost()) &&
10286          "VPlan-native path is not enabled. Only process inner loops.");
10287 
10288 #ifndef NDEBUG
10289   const std::string DebugLocStr = getDebugLocString(L);
10290 #endif /* NDEBUG */
10291 
10292   LLVM_DEBUG(dbgs() << "\nLV: Checking a loop in \""
10293                     << L->getHeader()->getParent()->getName() << "\" from "
10294                     << DebugLocStr << "\n");
10295 
10296   LoopVectorizeHints Hints(L, InterleaveOnlyWhenForced, *ORE);
10297 
10298   LLVM_DEBUG(
10299       dbgs() << "LV: Loop hints:"
10300              << " force="
10301              << (Hints.getForce() == LoopVectorizeHints::FK_Disabled
10302                      ? "disabled"
10303                      : (Hints.getForce() == LoopVectorizeHints::FK_Enabled
10304                             ? "enabled"
10305                             : "?"))
10306              << " width=" << Hints.getWidth()
10307              << " interleave=" << Hints.getInterleave() << "\n");
10308 
10309   // Function containing loop
10310   Function *F = L->getHeader()->getParent();
10311 
10312   // Looking at the diagnostic output is the only way to determine if a loop
10313   // was vectorized (other than looking at the IR or machine code), so it
10314   // is important to generate an optimization remark for each loop. Most of
10315   // these messages are generated as OptimizationRemarkAnalysis. Remarks
10316   // generated as OptimizationRemark and OptimizationRemarkMissed are
10317   // less verbose reporting vectorized loops and unvectorized loops that may
10318   // benefit from vectorization, respectively.
10319 
10320   if (!Hints.allowVectorization(F, L, VectorizeOnlyWhenForced)) {
10321     LLVM_DEBUG(dbgs() << "LV: Loop hints prevent vectorization.\n");
10322     return false;
10323   }
10324 
10325   PredicatedScalarEvolution PSE(*SE, *L);
10326 
10327   // Check if it is legal to vectorize the loop.
10328   LoopVectorizationRequirements Requirements;
10329   LoopVectorizationLegality LVL(L, PSE, DT, TTI, TLI, AA, F, GetLAA, LI, ORE,
10330                                 &Requirements, &Hints, DB, AC, BFI, PSI);
10331   if (!LVL.canVectorize(EnableVPlanNativePath)) {
10332     LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Cannot prove legality.\n");
10333     Hints.emitRemarkWithHints();
10334     return false;
10335   }
10336 
10337   // Check the function attributes and profiles to find out if this function
10338   // should be optimized for size.
10339   ScalarEpilogueLowering SEL = getScalarEpilogueLowering(
10340       F, L, Hints, PSI, BFI, TTI, TLI, AC, LI, PSE.getSE(), DT, LVL);
10341 
10342   // Entrance to the VPlan-native vectorization path. Outer loops are processed
10343   // here. They may require CFG and instruction level transformations before
10344   // even evaluating whether vectorization is profitable. Since we cannot modify
10345   // the incoming IR, we need to build VPlan upfront in the vectorization
10346   // pipeline.
10347   if (!L->isInnermost())
10348     return processLoopInVPlanNativePath(L, PSE, LI, DT, &LVL, TTI, TLI, DB, AC,
10349                                         ORE, BFI, PSI, Hints, Requirements);
10350 
10351   assert(L->isInnermost() && "Inner loop expected.");
10352 
10353   // Check the loop for a trip count threshold: vectorize loops with a tiny trip
10354   // count by optimizing for size, to minimize overheads.
10355   auto ExpectedTC = getSmallBestKnownTC(*SE, L);
10356   if (ExpectedTC && *ExpectedTC < TinyTripCountVectorThreshold) {
10357     LLVM_DEBUG(dbgs() << "LV: Found a loop with a very small trip count. "
10358                       << "This loop is worth vectorizing only if no scalar "
10359                       << "iteration overheads are incurred.");
10360     if (Hints.getForce() == LoopVectorizeHints::FK_Enabled)
10361       LLVM_DEBUG(dbgs() << " But vectorizing was explicitly forced.\n");
10362     else {
10363       LLVM_DEBUG(dbgs() << "\n");
10364       SEL = CM_ScalarEpilogueNotAllowedLowTripLoop;
10365     }
10366   }
10367 
10368   // Check the function attributes to see if implicit floats are allowed.
10369   // FIXME: This check doesn't seem possibly correct -- what if the loop is
10370   // an integer loop and the vector instructions selected are purely integer
10371   // vector instructions?
10372   if (F->hasFnAttribute(Attribute::NoImplicitFloat)) {
10373     reportVectorizationFailure(
10374         "Can't vectorize when the NoImplicitFloat attribute is used",
10375         "loop not vectorized due to NoImplicitFloat attribute",
10376         "NoImplicitFloat", ORE, L);
10377     Hints.emitRemarkWithHints();
10378     return false;
10379   }
10380 
10381   // Check if the target supports potentially unsafe FP vectorization.
10382   // FIXME: Add a check for the type of safety issue (denormal, signaling)
10383   // for the target we're vectorizing for, to make sure none of the
10384   // additional fp-math flags can help.
10385   if (Hints.isPotentiallyUnsafe() &&
10386       TTI->isFPVectorizationPotentiallyUnsafe()) {
10387     reportVectorizationFailure(
10388         "Potentially unsafe FP op prevents vectorization",
10389         "loop not vectorized due to unsafe FP support.",
10390         "UnsafeFP", ORE, L);
10391     Hints.emitRemarkWithHints();
10392     return false;
10393   }
10394 
10395   bool AllowOrderedReductions;
10396   // If the flag is set, use that instead and override the TTI behaviour.
10397   if (ForceOrderedReductions.getNumOccurrences() > 0)
10398     AllowOrderedReductions = ForceOrderedReductions;
10399   else
10400     AllowOrderedReductions = TTI->enableOrderedReductions();
10401   if (!LVL.canVectorizeFPMath(AllowOrderedReductions)) {
10402     ORE->emit([&]() {
10403       auto *ExactFPMathInst = Requirements.getExactFPInst();
10404       return OptimizationRemarkAnalysisFPCommute(DEBUG_TYPE, "CantReorderFPOps",
10405                                                  ExactFPMathInst->getDebugLoc(),
10406                                                  ExactFPMathInst->getParent())
10407              << "loop not vectorized: cannot prove it is safe to reorder "
10408                 "floating-point operations";
10409     });
10410     LLVM_DEBUG(dbgs() << "LV: loop not vectorized: cannot prove it is safe to "
10411                          "reorder floating-point operations\n");
10412     Hints.emitRemarkWithHints();
10413     return false;
10414   }
10415 
10416   bool UseInterleaved = TTI->enableInterleavedAccessVectorization();
10417   InterleavedAccessInfo IAI(PSE, L, DT, LI, LVL.getLAI());
10418 
10419   // If an override option has been passed in for interleaved accesses, use it.
10420   if (EnableInterleavedMemAccesses.getNumOccurrences() > 0)
10421     UseInterleaved = EnableInterleavedMemAccesses;
10422 
10423   // Analyze interleaved memory accesses.
10424   if (UseInterleaved) {
10425     IAI.analyzeInterleaving(useMaskedInterleavedAccesses(*TTI));
10426   }
10427 
10428   // Use the cost model.
10429   LoopVectorizationCostModel CM(SEL, L, PSE, LI, &LVL, *TTI, TLI, DB, AC, ORE,
10430                                 F, &Hints, IAI);
10431   CM.collectValuesToIgnore();
10432   CM.collectElementTypesForWidening();
10433 
10434   // Use the planner for vectorization.
10435   LoopVectorizationPlanner LVP(L, LI, TLI, TTI, &LVL, CM, IAI, PSE, Hints,
10436                                Requirements, ORE);
10437 
10438   // Get user vectorization factor and interleave count.
10439   ElementCount UserVF = Hints.getWidth();
10440   unsigned UserIC = Hints.getInterleave();
10441 
10442   // Plan how to best vectorize, return the best VF and its cost.
10443   Optional<VectorizationFactor> MaybeVF = LVP.plan(UserVF, UserIC);
10444 
10445   VectorizationFactor VF = VectorizationFactor::Disabled();
10446   unsigned IC = 1;
10447 
10448   if (MaybeVF) {
10449     VF = *MaybeVF;
10450     // Select the interleave count.
10451     IC = CM.selectInterleaveCount(VF.Width, *VF.Cost.getValue());
10452   }
10453 
10454   // Identify the diagnostic messages that should be produced.
10455   std::pair<StringRef, std::string> VecDiagMsg, IntDiagMsg;
10456   bool VectorizeLoop = true, InterleaveLoop = true;
10457   if (VF.Width.isScalar()) {
10458     LLVM_DEBUG(dbgs() << "LV: Vectorization is possible but not beneficial.\n");
10459     VecDiagMsg = std::make_pair(
10460         "VectorizationNotBeneficial",
10461         "the cost-model indicates that vectorization is not beneficial");
10462     VectorizeLoop = false;
10463   }
10464 
10465   if (!MaybeVF && UserIC > 1) {
10466     // Tell the user interleaving was avoided up-front, despite being explicitly
10467     // requested.
10468     LLVM_DEBUG(dbgs() << "LV: Ignoring UserIC, because vectorization and "
10469                          "interleaving should be avoided up front\n");
10470     IntDiagMsg = std::make_pair(
10471         "InterleavingAvoided",
10472         "Ignoring UserIC, because interleaving was avoided up front");
10473     InterleaveLoop = false;
10474   } else if (IC == 1 && UserIC <= 1) {
10475     // Tell the user interleaving is not beneficial.
10476     LLVM_DEBUG(dbgs() << "LV: Interleaving is not beneficial.\n");
10477     IntDiagMsg = std::make_pair(
10478         "InterleavingNotBeneficial",
10479         "the cost-model indicates that interleaving is not beneficial");
10480     InterleaveLoop = false;
10481     if (UserIC == 1) {
10482       IntDiagMsg.first = "InterleavingNotBeneficialAndDisabled";
10483       IntDiagMsg.second +=
10484           " and is explicitly disabled or interleave count is set to 1";
10485     }
10486   } else if (IC > 1 && UserIC == 1) {
10487     // Tell the user interleaving is beneficial, but it explicitly disabled.
10488     LLVM_DEBUG(
10489         dbgs() << "LV: Interleaving is beneficial but is explicitly disabled.");
10490     IntDiagMsg = std::make_pair(
10491         "InterleavingBeneficialButDisabled",
10492         "the cost-model indicates that interleaving is beneficial "
10493         "but is explicitly disabled or interleave count is set to 1");
10494     InterleaveLoop = false;
10495   }
10496 
10497   // Override IC if user provided an interleave count.
10498   IC = UserIC > 0 ? UserIC : IC;
10499 
10500   // Emit diagnostic messages, if any.
10501   const char *VAPassName = Hints.vectorizeAnalysisPassName();
10502   if (!VectorizeLoop && !InterleaveLoop) {
10503     // Do not vectorize or interleaving the loop.
10504     ORE->emit([&]() {
10505       return OptimizationRemarkMissed(VAPassName, VecDiagMsg.first,
10506                                       L->getStartLoc(), L->getHeader())
10507              << VecDiagMsg.second;
10508     });
10509     ORE->emit([&]() {
10510       return OptimizationRemarkMissed(LV_NAME, IntDiagMsg.first,
10511                                       L->getStartLoc(), L->getHeader())
10512              << IntDiagMsg.second;
10513     });
10514     return false;
10515   } else if (!VectorizeLoop && InterleaveLoop) {
10516     LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n');
10517     ORE->emit([&]() {
10518       return OptimizationRemarkAnalysis(VAPassName, VecDiagMsg.first,
10519                                         L->getStartLoc(), L->getHeader())
10520              << VecDiagMsg.second;
10521     });
10522   } else if (VectorizeLoop && !InterleaveLoop) {
10523     LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width
10524                       << ") in " << DebugLocStr << '\n');
10525     ORE->emit([&]() {
10526       return OptimizationRemarkAnalysis(LV_NAME, IntDiagMsg.first,
10527                                         L->getStartLoc(), L->getHeader())
10528              << IntDiagMsg.second;
10529     });
10530   } else if (VectorizeLoop && InterleaveLoop) {
10531     LLVM_DEBUG(dbgs() << "LV: Found a vectorizable loop (" << VF.Width
10532                       << ") in " << DebugLocStr << '\n');
10533     LLVM_DEBUG(dbgs() << "LV: Interleave Count is " << IC << '\n');
10534   }
10535 
10536   bool DisableRuntimeUnroll = false;
10537   MDNode *OrigLoopID = L->getLoopID();
10538   {
10539     // Optimistically generate runtime checks. Drop them if they turn out to not
10540     // be profitable. Limit the scope of Checks, so the cleanup happens
10541     // immediately after vector codegeneration is done.
10542     GeneratedRTChecks Checks(*PSE.getSE(), DT, LI,
10543                              F->getParent()->getDataLayout());
10544     if (!VF.Width.isScalar() || IC > 1)
10545       Checks.Create(L, *LVL.getLAI(), PSE.getUnionPredicate());
10546 
10547     using namespace ore;
10548     if (!VectorizeLoop) {
10549       assert(IC > 1 && "interleave count should not be 1 or 0");
10550       // If we decided that it is not legal to vectorize the loop, then
10551       // interleave it.
10552       InnerLoopUnroller Unroller(L, PSE, LI, DT, TLI, TTI, AC, ORE, IC, &LVL,
10553                                  &CM, BFI, PSI, Checks);
10554 
10555       VPlan &BestPlan = LVP.getBestPlanFor(VF.Width);
10556       LVP.executePlan(VF.Width, IC, BestPlan, Unroller, DT);
10557 
10558       ORE->emit([&]() {
10559         return OptimizationRemark(LV_NAME, "Interleaved", L->getStartLoc(),
10560                                   L->getHeader())
10561                << "interleaved loop (interleaved count: "
10562                << NV("InterleaveCount", IC) << ")";
10563       });
10564     } else {
10565       // If we decided that it is *legal* to vectorize the loop, then do it.
10566 
10567       // Consider vectorizing the epilogue too if it's profitable.
10568       VectorizationFactor EpilogueVF =
10569           CM.selectEpilogueVectorizationFactor(VF.Width, LVP);
10570       if (EpilogueVF.Width.isVector()) {
10571 
10572         // The first pass vectorizes the main loop and creates a scalar epilogue
10573         // to be vectorized by executing the plan (potentially with a different
10574         // factor) again shortly afterwards.
10575         EpilogueLoopVectorizationInfo EPI(VF.Width, IC, EpilogueVF.Width, 1);
10576         EpilogueVectorizerMainLoop MainILV(L, PSE, LI, DT, TLI, TTI, AC, ORE,
10577                                            EPI, &LVL, &CM, BFI, PSI, Checks);
10578 
10579         VPlan &BestMainPlan = LVP.getBestPlanFor(EPI.MainLoopVF);
10580         LVP.executePlan(EPI.MainLoopVF, EPI.MainLoopUF, BestMainPlan, MainILV,
10581                         DT);
10582         ++LoopsVectorized;
10583 
10584         simplifyLoop(L, DT, LI, SE, AC, nullptr, false /* PreserveLCSSA */);
10585         formLCSSARecursively(*L, *DT, LI, SE);
10586 
10587         // Second pass vectorizes the epilogue and adjusts the control flow
10588         // edges from the first pass.
10589         EPI.MainLoopVF = EPI.EpilogueVF;
10590         EPI.MainLoopUF = EPI.EpilogueUF;
10591         EpilogueVectorizerEpilogueLoop EpilogILV(L, PSE, LI, DT, TLI, TTI, AC,
10592                                                  ORE, EPI, &LVL, &CM, BFI, PSI,
10593                                                  Checks);
10594 
10595         VPlan &BestEpiPlan = LVP.getBestPlanFor(EPI.EpilogueVF);
10596         LVP.executePlan(EPI.EpilogueVF, EPI.EpilogueUF, BestEpiPlan, EpilogILV,
10597                         DT);
10598         ++LoopsEpilogueVectorized;
10599 
10600         if (!MainILV.areSafetyChecksAdded())
10601           DisableRuntimeUnroll = true;
10602       } else {
10603         InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, AC, ORE, VF.Width, IC,
10604                                &LVL, &CM, BFI, PSI, Checks);
10605 
10606         VPlan &BestPlan = LVP.getBestPlanFor(VF.Width);
10607         LVP.executePlan(VF.Width, IC, BestPlan, LB, DT);
10608         ++LoopsVectorized;
10609 
10610         // Add metadata to disable runtime unrolling a scalar loop when there
10611         // are no runtime checks about strides and memory. A scalar loop that is
10612         // rarely used is not worth unrolling.
10613         if (!LB.areSafetyChecksAdded())
10614           DisableRuntimeUnroll = true;
10615       }
10616       // Report the vectorization decision.
10617       ORE->emit([&]() {
10618         return OptimizationRemark(LV_NAME, "Vectorized", L->getStartLoc(),
10619                                   L->getHeader())
10620                << "vectorized loop (vectorization width: "
10621                << NV("VectorizationFactor", VF.Width)
10622                << ", interleaved count: " << NV("InterleaveCount", IC) << ")";
10623       });
10624     }
10625 
10626     if (ORE->allowExtraAnalysis(LV_NAME))
10627       checkMixedPrecision(L, ORE);
10628   }
10629 
10630   Optional<MDNode *> RemainderLoopID =
10631       makeFollowupLoopID(OrigLoopID, {LLVMLoopVectorizeFollowupAll,
10632                                       LLVMLoopVectorizeFollowupEpilogue});
10633   if (RemainderLoopID.hasValue()) {
10634     L->setLoopID(RemainderLoopID.getValue());
10635   } else {
10636     if (DisableRuntimeUnroll)
10637       AddRuntimeUnrollDisableMetaData(L);
10638 
10639     // Mark the loop as already vectorized to avoid vectorizing again.
10640     Hints.setAlreadyVectorized();
10641   }
10642 
10643   assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs()));
10644   return true;
10645 }
10646 
10647 LoopVectorizeResult LoopVectorizePass::runImpl(
10648     Function &F, ScalarEvolution &SE_, LoopInfo &LI_, TargetTransformInfo &TTI_,
10649     DominatorTree &DT_, BlockFrequencyInfo &BFI_, TargetLibraryInfo *TLI_,
10650     DemandedBits &DB_, AAResults &AA_, AssumptionCache &AC_,
10651     std::function<const LoopAccessInfo &(Loop &)> &GetLAA_,
10652     OptimizationRemarkEmitter &ORE_, ProfileSummaryInfo *PSI_) {
10653   SE = &SE_;
10654   LI = &LI_;
10655   TTI = &TTI_;
10656   DT = &DT_;
10657   BFI = &BFI_;
10658   TLI = TLI_;
10659   AA = &AA_;
10660   AC = &AC_;
10661   GetLAA = &GetLAA_;
10662   DB = &DB_;
10663   ORE = &ORE_;
10664   PSI = PSI_;
10665 
10666   // Don't attempt if
10667   // 1. the target claims to have no vector registers, and
10668   // 2. interleaving won't help ILP.
10669   //
10670   // The second condition is necessary because, even if the target has no
10671   // vector registers, loop vectorization may still enable scalar
10672   // interleaving.
10673   if (!TTI->getNumberOfRegisters(TTI->getRegisterClassForType(true)) &&
10674       TTI->getMaxInterleaveFactor(1) < 2)
10675     return LoopVectorizeResult(false, false);
10676 
10677   bool Changed = false, CFGChanged = false;
10678 
10679   // The vectorizer requires loops to be in simplified form.
10680   // Since simplification may add new inner loops, it has to run before the
10681   // legality and profitability checks. This means running the loop vectorizer
10682   // will simplify all loops, regardless of whether anything end up being
10683   // vectorized.
10684   for (auto &L : *LI)
10685     Changed |= CFGChanged |=
10686         simplifyLoop(L, DT, LI, SE, AC, nullptr, false /* PreserveLCSSA */);
10687 
10688   // Build up a worklist of inner-loops to vectorize. This is necessary as
10689   // the act of vectorizing or partially unrolling a loop creates new loops
10690   // and can invalidate iterators across the loops.
10691   SmallVector<Loop *, 8> Worklist;
10692 
10693   for (Loop *L : *LI)
10694     collectSupportedLoops(*L, LI, ORE, Worklist);
10695 
10696   LoopsAnalyzed += Worklist.size();
10697 
10698   // Now walk the identified inner loops.
10699   while (!Worklist.empty()) {
10700     Loop *L = Worklist.pop_back_val();
10701 
10702     // For the inner loops we actually process, form LCSSA to simplify the
10703     // transform.
10704     Changed |= formLCSSARecursively(*L, *DT, LI, SE);
10705 
10706     Changed |= CFGChanged |= processLoop(L);
10707   }
10708 
10709   // Process each loop nest in the function.
10710   return LoopVectorizeResult(Changed, CFGChanged);
10711 }
10712 
10713 PreservedAnalyses LoopVectorizePass::run(Function &F,
10714                                          FunctionAnalysisManager &AM) {
10715     auto &SE = AM.getResult<ScalarEvolutionAnalysis>(F);
10716     auto &LI = AM.getResult<LoopAnalysis>(F);
10717     auto &TTI = AM.getResult<TargetIRAnalysis>(F);
10718     auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
10719     auto &BFI = AM.getResult<BlockFrequencyAnalysis>(F);
10720     auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
10721     auto &AA = AM.getResult<AAManager>(F);
10722     auto &AC = AM.getResult<AssumptionAnalysis>(F);
10723     auto &DB = AM.getResult<DemandedBitsAnalysis>(F);
10724     auto &ORE = AM.getResult<OptimizationRemarkEmitterAnalysis>(F);
10725 
10726     auto &LAM = AM.getResult<LoopAnalysisManagerFunctionProxy>(F).getManager();
10727     std::function<const LoopAccessInfo &(Loop &)> GetLAA =
10728         [&](Loop &L) -> const LoopAccessInfo & {
10729       LoopStandardAnalysisResults AR = {AA,  AC,  DT,      LI,      SE,
10730                                         TLI, TTI, nullptr, nullptr, nullptr};
10731       return LAM.getResult<LoopAccessAnalysis>(L, AR);
10732     };
10733     auto &MAMProxy = AM.getResult<ModuleAnalysisManagerFunctionProxy>(F);
10734     ProfileSummaryInfo *PSI =
10735         MAMProxy.getCachedResult<ProfileSummaryAnalysis>(*F.getParent());
10736     LoopVectorizeResult Result =
10737         runImpl(F, SE, LI, TTI, DT, BFI, &TLI, DB, AA, AC, GetLAA, ORE, PSI);
10738     if (!Result.MadeAnyChange)
10739       return PreservedAnalyses::all();
10740     PreservedAnalyses PA;
10741 
10742     // We currently do not preserve loopinfo/dominator analyses with outer loop
10743     // vectorization. Until this is addressed, mark these analyses as preserved
10744     // only for non-VPlan-native path.
10745     // TODO: Preserve Loop and Dominator analyses for VPlan-native path.
10746     if (!EnableVPlanNativePath) {
10747       PA.preserve<LoopAnalysis>();
10748       PA.preserve<DominatorTreeAnalysis>();
10749     }
10750     if (!Result.MadeCFGChange)
10751       PA.preserveSet<CFGAnalyses>();
10752     return PA;
10753 }
10754 
10755 void LoopVectorizePass::printPipeline(
10756     raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) {
10757   static_cast<PassInfoMixin<LoopVectorizePass> *>(this)->printPipeline(
10758       OS, MapClassName2PassName);
10759 
10760   OS << "<";
10761   OS << (InterleaveOnlyWhenForced ? "" : "no-") << "interleave-forced-only;";
10762   OS << (VectorizeOnlyWhenForced ? "" : "no-") << "vectorize-forced-only;";
10763   OS << ">";
10764 }
10765