xref: /llvm-project/llvm/lib/Transforms/Vectorize/LoopVectorizationLegality.cpp (revision f4eeae1244c83486e38e4b73715fa688d34bc244)
1 //===- LoopVectorizationLegality.cpp --------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file provides loop vectorization legality analysis. Original code
10 // resided in LoopVectorize.cpp for a long time.
11 //
12 // At this point, it is implemented as a utility class, not as an analysis
13 // pass. It should be easy to create an analysis pass around it if there
14 // is a need (but D45420 needs to happen first).
15 //
16 
17 #include "llvm/Transforms/Vectorize/LoopVectorizationLegality.h"
18 #include "llvm/Analysis/Loads.h"
19 #include "llvm/Analysis/LoopInfo.h"
20 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
21 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
22 #include "llvm/Analysis/TargetLibraryInfo.h"
23 #include "llvm/Analysis/TargetTransformInfo.h"
24 #include "llvm/Analysis/ValueTracking.h"
25 #include "llvm/Analysis/VectorUtils.h"
26 #include "llvm/IR/IntrinsicInst.h"
27 #include "llvm/IR/PatternMatch.h"
28 #include "llvm/Transforms/Utils/SizeOpts.h"
29 #include "llvm/Transforms/Vectorize/LoopVectorize.h"
30 
31 using namespace llvm;
32 using namespace PatternMatch;
33 
34 #define LV_NAME "loop-vectorize"
35 #define DEBUG_TYPE LV_NAME
36 
37 static cl::opt<bool>
38     EnableIfConversion("enable-if-conversion", cl::init(true), cl::Hidden,
39                        cl::desc("Enable if-conversion during vectorization."));
40 
41 static cl::opt<bool>
42 AllowStridedPointerIVs("lv-strided-pointer-ivs", cl::init(false), cl::Hidden,
43                        cl::desc("Enable recognition of non-constant strided "
44                                 "pointer induction variables."));
45 
46 namespace llvm {
47 cl::opt<bool>
48     HintsAllowReordering("hints-allow-reordering", cl::init(true), cl::Hidden,
49                          cl::desc("Allow enabling loop hints to reorder "
50                                   "FP operations during vectorization."));
51 } // namespace llvm
52 
53 // TODO: Move size-based thresholds out of legality checking, make cost based
54 // decisions instead of hard thresholds.
55 static cl::opt<unsigned> VectorizeSCEVCheckThreshold(
56     "vectorize-scev-check-threshold", cl::init(16), cl::Hidden,
57     cl::desc("The maximum number of SCEV checks allowed."));
58 
59 static cl::opt<unsigned> PragmaVectorizeSCEVCheckThreshold(
60     "pragma-vectorize-scev-check-threshold", cl::init(128), cl::Hidden,
61     cl::desc("The maximum number of SCEV checks allowed with a "
62              "vectorize(enable) pragma"));
63 
64 static cl::opt<LoopVectorizeHints::ScalableForceKind>
65     ForceScalableVectorization(
66         "scalable-vectorization", cl::init(LoopVectorizeHints::SK_Unspecified),
67         cl::Hidden,
68         cl::desc("Control whether the compiler can use scalable vectors to "
69                  "vectorize a loop"),
70         cl::values(
71             clEnumValN(LoopVectorizeHints::SK_FixedWidthOnly, "off",
72                        "Scalable vectorization is disabled."),
73             clEnumValN(
74                 LoopVectorizeHints::SK_PreferScalable, "preferred",
75                 "Scalable vectorization is available and favored when the "
76                 "cost is inconclusive."),
77             clEnumValN(
78                 LoopVectorizeHints::SK_PreferScalable, "on",
79                 "Scalable vectorization is available and favored when the "
80                 "cost is inconclusive.")));
81 
82 /// Maximum vectorization interleave count.
83 static const unsigned MaxInterleaveFactor = 16;
84 
85 namespace llvm {
86 
87 bool LoopVectorizeHints::Hint::validate(unsigned Val) {
88   switch (Kind) {
89   case HK_WIDTH:
90     return isPowerOf2_32(Val) && Val <= VectorizerParams::MaxVectorWidth;
91   case HK_INTERLEAVE:
92     return isPowerOf2_32(Val) && Val <= MaxInterleaveFactor;
93   case HK_FORCE:
94     return (Val <= 1);
95   case HK_ISVECTORIZED:
96   case HK_PREDICATE:
97   case HK_SCALABLE:
98     return (Val == 0 || Val == 1);
99   }
100   return false;
101 }
102 
103 LoopVectorizeHints::LoopVectorizeHints(const Loop *L,
104                                        bool InterleaveOnlyWhenForced,
105                                        OptimizationRemarkEmitter &ORE,
106                                        const TargetTransformInfo *TTI)
107     : Width("vectorize.width", VectorizerParams::VectorizationFactor, HK_WIDTH),
108       Interleave("interleave.count", InterleaveOnlyWhenForced, HK_INTERLEAVE),
109       Force("vectorize.enable", FK_Undefined, HK_FORCE),
110       IsVectorized("isvectorized", 0, HK_ISVECTORIZED),
111       Predicate("vectorize.predicate.enable", FK_Undefined, HK_PREDICATE),
112       Scalable("vectorize.scalable.enable", SK_Unspecified, HK_SCALABLE),
113       TheLoop(L), ORE(ORE) {
114   // Populate values with existing loop metadata.
115   getHintsFromMetadata();
116 
117   // force-vector-interleave overrides DisableInterleaving.
118   if (VectorizerParams::isInterleaveForced())
119     Interleave.Value = VectorizerParams::VectorizationInterleave;
120 
121   // If the metadata doesn't explicitly specify whether to enable scalable
122   // vectorization, then decide based on the following criteria (increasing
123   // level of priority):
124   //  - Target default
125   //  - Metadata width
126   //  - Force option (always overrides)
127   if ((LoopVectorizeHints::ScalableForceKind)Scalable.Value == SK_Unspecified) {
128     if (TTI)
129       Scalable.Value = TTI->enableScalableVectorization() ? SK_PreferScalable
130                                                           : SK_FixedWidthOnly;
131 
132     if (Width.Value)
133       // If the width is set, but the metadata says nothing about the scalable
134       // property, then assume it concerns only a fixed-width UserVF.
135       // If width is not set, the flag takes precedence.
136       Scalable.Value = SK_FixedWidthOnly;
137   }
138 
139   // If the flag is set to force any use of scalable vectors, override the loop
140   // hints.
141   if (ForceScalableVectorization.getValue() !=
142       LoopVectorizeHints::SK_Unspecified)
143     Scalable.Value = ForceScalableVectorization.getValue();
144 
145   // Scalable vectorization is disabled if no preference is specified.
146   if ((LoopVectorizeHints::ScalableForceKind)Scalable.Value == SK_Unspecified)
147     Scalable.Value = SK_FixedWidthOnly;
148 
149   if (IsVectorized.Value != 1)
150     // If the vectorization width and interleaving count are both 1 then
151     // consider the loop to have been already vectorized because there's
152     // nothing more that we can do.
153     IsVectorized.Value =
154         getWidth() == ElementCount::getFixed(1) && getInterleave() == 1;
155   LLVM_DEBUG(if (InterleaveOnlyWhenForced && getInterleave() == 1) dbgs()
156              << "LV: Interleaving disabled by the pass manager\n");
157 }
158 
159 void LoopVectorizeHints::setAlreadyVectorized() {
160   LLVMContext &Context = TheLoop->getHeader()->getContext();
161 
162   MDNode *IsVectorizedMD = MDNode::get(
163       Context,
164       {MDString::get(Context, "llvm.loop.isvectorized"),
165        ConstantAsMetadata::get(ConstantInt::get(Context, APInt(32, 1)))});
166   MDNode *LoopID = TheLoop->getLoopID();
167   MDNode *NewLoopID =
168       makePostTransformationMetadata(Context, LoopID,
169                                      {Twine(Prefix(), "vectorize.").str(),
170                                       Twine(Prefix(), "interleave.").str()},
171                                      {IsVectorizedMD});
172   TheLoop->setLoopID(NewLoopID);
173 
174   // Update internal cache.
175   IsVectorized.Value = 1;
176 }
177 
178 bool LoopVectorizeHints::allowVectorization(
179     Function *F, Loop *L, bool VectorizeOnlyWhenForced) const {
180   if (getForce() == LoopVectorizeHints::FK_Disabled) {
181     LLVM_DEBUG(dbgs() << "LV: Not vectorizing: #pragma vectorize disable.\n");
182     emitRemarkWithHints();
183     return false;
184   }
185 
186   if (VectorizeOnlyWhenForced && getForce() != LoopVectorizeHints::FK_Enabled) {
187     LLVM_DEBUG(dbgs() << "LV: Not vectorizing: No #pragma vectorize enable.\n");
188     emitRemarkWithHints();
189     return false;
190   }
191 
192   if (getIsVectorized() == 1) {
193     LLVM_DEBUG(dbgs() << "LV: Not vectorizing: Disabled/already vectorized.\n");
194     // FIXME: Add interleave.disable metadata. This will allow
195     // vectorize.disable to be used without disabling the pass and errors
196     // to differentiate between disabled vectorization and a width of 1.
197     ORE.emit([&]() {
198       return OptimizationRemarkAnalysis(vectorizeAnalysisPassName(),
199                                         "AllDisabled", L->getStartLoc(),
200                                         L->getHeader())
201              << "loop not vectorized: vectorization and interleaving are "
202                 "explicitly disabled, or the loop has already been "
203                 "vectorized";
204     });
205     return false;
206   }
207 
208   return true;
209 }
210 
211 void LoopVectorizeHints::emitRemarkWithHints() const {
212   using namespace ore;
213 
214   ORE.emit([&]() {
215     if (Force.Value == LoopVectorizeHints::FK_Disabled)
216       return OptimizationRemarkMissed(LV_NAME, "MissedExplicitlyDisabled",
217                                       TheLoop->getStartLoc(),
218                                       TheLoop->getHeader())
219              << "loop not vectorized: vectorization is explicitly disabled";
220 
221     OptimizationRemarkMissed R(LV_NAME, "MissedDetails", TheLoop->getStartLoc(),
222                                TheLoop->getHeader());
223     R << "loop not vectorized";
224     if (Force.Value == LoopVectorizeHints::FK_Enabled) {
225       R << " (Force=" << NV("Force", true);
226       if (Width.Value != 0)
227         R << ", Vector Width=" << NV("VectorWidth", getWidth());
228       if (getInterleave() != 0)
229         R << ", Interleave Count=" << NV("InterleaveCount", getInterleave());
230       R << ")";
231     }
232     return R;
233   });
234 }
235 
236 const char *LoopVectorizeHints::vectorizeAnalysisPassName() const {
237   if (getWidth() == ElementCount::getFixed(1))
238     return LV_NAME;
239   if (getForce() == LoopVectorizeHints::FK_Disabled)
240     return LV_NAME;
241   if (getForce() == LoopVectorizeHints::FK_Undefined && getWidth().isZero())
242     return LV_NAME;
243   return OptimizationRemarkAnalysis::AlwaysPrint;
244 }
245 
246 bool LoopVectorizeHints::allowReordering() const {
247   // Allow the vectorizer to change the order of operations if enabling
248   // loop hints are provided
249   ElementCount EC = getWidth();
250   return HintsAllowReordering &&
251          (getForce() == LoopVectorizeHints::FK_Enabled ||
252           EC.getKnownMinValue() > 1);
253 }
254 
255 void LoopVectorizeHints::getHintsFromMetadata() {
256   MDNode *LoopID = TheLoop->getLoopID();
257   if (!LoopID)
258     return;
259 
260   // First operand should refer to the loop id itself.
261   assert(LoopID->getNumOperands() > 0 && "requires at least one operand");
262   assert(LoopID->getOperand(0) == LoopID && "invalid loop id");
263 
264   for (const MDOperand &MDO : llvm::drop_begin(LoopID->operands())) {
265     const MDString *S = nullptr;
266     SmallVector<Metadata *, 4> Args;
267 
268     // The expected hint is either a MDString or a MDNode with the first
269     // operand a MDString.
270     if (const MDNode *MD = dyn_cast<MDNode>(MDO)) {
271       if (!MD || MD->getNumOperands() == 0)
272         continue;
273       S = dyn_cast<MDString>(MD->getOperand(0));
274       for (unsigned Idx = 1; Idx < MD->getNumOperands(); ++Idx)
275         Args.push_back(MD->getOperand(Idx));
276     } else {
277       S = dyn_cast<MDString>(MDO);
278       assert(Args.size() == 0 && "too many arguments for MDString");
279     }
280 
281     if (!S)
282       continue;
283 
284     // Check if the hint starts with the loop metadata prefix.
285     StringRef Name = S->getString();
286     if (Args.size() == 1)
287       setHint(Name, Args[0]);
288   }
289 }
290 
291 void LoopVectorizeHints::setHint(StringRef Name, Metadata *Arg) {
292   if (!Name.starts_with(Prefix()))
293     return;
294   Name = Name.substr(Prefix().size(), StringRef::npos);
295 
296   const ConstantInt *C = mdconst::dyn_extract<ConstantInt>(Arg);
297   if (!C)
298     return;
299   unsigned Val = C->getZExtValue();
300 
301   Hint *Hints[] = {&Width,        &Interleave, &Force,
302                    &IsVectorized, &Predicate,  &Scalable};
303   for (auto *H : Hints) {
304     if (Name == H->Name) {
305       if (H->validate(Val))
306         H->Value = Val;
307       else
308         LLVM_DEBUG(dbgs() << "LV: ignoring invalid hint '" << Name << "'\n");
309       break;
310     }
311   }
312 }
313 
314 // Return true if the inner loop \p Lp is uniform with regard to the outer loop
315 // \p OuterLp (i.e., if the outer loop is vectorized, all the vector lanes
316 // executing the inner loop will execute the same iterations). This check is
317 // very constrained for now but it will be relaxed in the future. \p Lp is
318 // considered uniform if it meets all the following conditions:
319 //   1) it has a canonical IV (starting from 0 and with stride 1),
320 //   2) its latch terminator is a conditional branch and,
321 //   3) its latch condition is a compare instruction whose operands are the
322 //      canonical IV and an OuterLp invariant.
323 // This check doesn't take into account the uniformity of other conditions not
324 // related to the loop latch because they don't affect the loop uniformity.
325 //
326 // NOTE: We decided to keep all these checks and its associated documentation
327 // together so that we can easily have a picture of the current supported loop
328 // nests. However, some of the current checks don't depend on \p OuterLp and
329 // would be redundantly executed for each \p Lp if we invoked this function for
330 // different candidate outer loops. This is not the case for now because we
331 // don't currently have the infrastructure to evaluate multiple candidate outer
332 // loops and \p OuterLp will be a fixed parameter while we only support explicit
333 // outer loop vectorization. It's also very likely that these checks go away
334 // before introducing the aforementioned infrastructure. However, if this is not
335 // the case, we should move the \p OuterLp independent checks to a separate
336 // function that is only executed once for each \p Lp.
337 static bool isUniformLoop(Loop *Lp, Loop *OuterLp) {
338   assert(Lp->getLoopLatch() && "Expected loop with a single latch.");
339 
340   // If Lp is the outer loop, it's uniform by definition.
341   if (Lp == OuterLp)
342     return true;
343   assert(OuterLp->contains(Lp) && "OuterLp must contain Lp.");
344 
345   // 1.
346   PHINode *IV = Lp->getCanonicalInductionVariable();
347   if (!IV) {
348     LLVM_DEBUG(dbgs() << "LV: Canonical IV not found.\n");
349     return false;
350   }
351 
352   // 2.
353   BasicBlock *Latch = Lp->getLoopLatch();
354   auto *LatchBr = dyn_cast<BranchInst>(Latch->getTerminator());
355   if (!LatchBr || LatchBr->isUnconditional()) {
356     LLVM_DEBUG(dbgs() << "LV: Unsupported loop latch branch.\n");
357     return false;
358   }
359 
360   // 3.
361   auto *LatchCmp = dyn_cast<CmpInst>(LatchBr->getCondition());
362   if (!LatchCmp) {
363     LLVM_DEBUG(
364         dbgs() << "LV: Loop latch condition is not a compare instruction.\n");
365     return false;
366   }
367 
368   Value *CondOp0 = LatchCmp->getOperand(0);
369   Value *CondOp1 = LatchCmp->getOperand(1);
370   Value *IVUpdate = IV->getIncomingValueForBlock(Latch);
371   if (!(CondOp0 == IVUpdate && OuterLp->isLoopInvariant(CondOp1)) &&
372       !(CondOp1 == IVUpdate && OuterLp->isLoopInvariant(CondOp0))) {
373     LLVM_DEBUG(dbgs() << "LV: Loop latch condition is not uniform.\n");
374     return false;
375   }
376 
377   return true;
378 }
379 
380 // Return true if \p Lp and all its nested loops are uniform with regard to \p
381 // OuterLp.
382 static bool isUniformLoopNest(Loop *Lp, Loop *OuterLp) {
383   if (!isUniformLoop(Lp, OuterLp))
384     return false;
385 
386   // Check if nested loops are uniform.
387   for (Loop *SubLp : *Lp)
388     if (!isUniformLoopNest(SubLp, OuterLp))
389       return false;
390 
391   return true;
392 }
393 
394 static Type *convertPointerToIntegerType(const DataLayout &DL, Type *Ty) {
395   if (Ty->isPointerTy())
396     return DL.getIntPtrType(Ty);
397 
398   // It is possible that char's or short's overflow when we ask for the loop's
399   // trip count, work around this by changing the type size.
400   if (Ty->getScalarSizeInBits() < 32)
401     return Type::getInt32Ty(Ty->getContext());
402 
403   return Ty;
404 }
405 
406 static Type *getWiderType(const DataLayout &DL, Type *Ty0, Type *Ty1) {
407   Ty0 = convertPointerToIntegerType(DL, Ty0);
408   Ty1 = convertPointerToIntegerType(DL, Ty1);
409   if (Ty0->getScalarSizeInBits() > Ty1->getScalarSizeInBits())
410     return Ty0;
411   return Ty1;
412 }
413 
414 /// Check that the instruction has outside loop users and is not an
415 /// identified reduction variable.
416 static bool hasOutsideLoopUser(const Loop *TheLoop, Instruction *Inst,
417                                SmallPtrSetImpl<Value *> &AllowedExit) {
418   // Reductions, Inductions and non-header phis are allowed to have exit users. All
419   // other instructions must not have external users.
420   if (!AllowedExit.count(Inst))
421     // Check that all of the users of the loop are inside the BB.
422     for (User *U : Inst->users()) {
423       Instruction *UI = cast<Instruction>(U);
424       // This user may be a reduction exit value.
425       if (!TheLoop->contains(UI)) {
426         LLVM_DEBUG(dbgs() << "LV: Found an outside user for : " << *UI << '\n');
427         return true;
428       }
429     }
430   return false;
431 }
432 
433 /// Returns true if A and B have same pointer operands or same SCEVs addresses
434 static bool storeToSameAddress(ScalarEvolution *SE, StoreInst *A,
435                                StoreInst *B) {
436   // Compare store
437   if (A == B)
438     return true;
439 
440   // Otherwise Compare pointers
441   Value *APtr = A->getPointerOperand();
442   Value *BPtr = B->getPointerOperand();
443   if (APtr == BPtr)
444     return true;
445 
446   // Otherwise compare address SCEVs
447   return SE->getSCEV(APtr) == SE->getSCEV(BPtr);
448 }
449 
450 int LoopVectorizationLegality::isConsecutivePtr(Type *AccessTy,
451                                                 Value *Ptr) const {
452   // FIXME: Currently, the set of symbolic strides is sometimes queried before
453   // it's collected.  This happens from canVectorizeWithIfConvert, when the
454   // pointer is checked to reference consecutive elements suitable for a
455   // masked access.
456   const auto &Strides =
457     LAI ? LAI->getSymbolicStrides() : DenseMap<Value *, const SCEV *>();
458 
459   Function *F = TheLoop->getHeader()->getParent();
460   bool OptForSize = F->hasOptSize() ||
461                     llvm::shouldOptimizeForSize(TheLoop->getHeader(), PSI, BFI,
462                                                 PGSOQueryType::IRPass);
463   bool CanAddPredicate = !OptForSize;
464   int Stride = getPtrStride(PSE, AccessTy, Ptr, TheLoop, Strides,
465                             CanAddPredicate, false).value_or(0);
466   if (Stride == 1 || Stride == -1)
467     return Stride;
468   return 0;
469 }
470 
471 bool LoopVectorizationLegality::isInvariant(Value *V) const {
472   return LAI->isInvariant(V);
473 }
474 
475 namespace {
476 /// A rewriter to build the SCEVs for each of the VF lanes in the expected
477 /// vectorized loop, which can then be compared to detect their uniformity. This
478 /// is done by replacing the AddRec SCEVs of the original scalar loop (TheLoop)
479 /// with new AddRecs where the step is multiplied by StepMultiplier and Offset *
480 /// Step is added. Also checks if all sub-expressions are analyzable w.r.t.
481 /// uniformity.
482 class SCEVAddRecForUniformityRewriter
483     : public SCEVRewriteVisitor<SCEVAddRecForUniformityRewriter> {
484   /// Multiplier to be applied to the step of AddRecs in TheLoop.
485   unsigned StepMultiplier;
486 
487   /// Offset to be added to the AddRecs in TheLoop.
488   unsigned Offset;
489 
490   /// Loop for which to rewrite AddRecsFor.
491   Loop *TheLoop;
492 
493   /// Is any sub-expressions not analyzable w.r.t. uniformity?
494   bool CannotAnalyze = false;
495 
496   bool canAnalyze() const { return !CannotAnalyze; }
497 
498 public:
499   SCEVAddRecForUniformityRewriter(ScalarEvolution &SE, unsigned StepMultiplier,
500                                   unsigned Offset, Loop *TheLoop)
501       : SCEVRewriteVisitor(SE), StepMultiplier(StepMultiplier), Offset(Offset),
502         TheLoop(TheLoop) {}
503 
504   const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) {
505     assert(Expr->getLoop() == TheLoop &&
506            "addrec outside of TheLoop must be invariant and should have been "
507            "handled earlier");
508     // Build a new AddRec by multiplying the step by StepMultiplier and
509     // incrementing the start by Offset * step.
510     Type *Ty = Expr->getType();
511     const SCEV *Step = Expr->getStepRecurrence(SE);
512     if (!SE.isLoopInvariant(Step, TheLoop)) {
513       CannotAnalyze = true;
514       return Expr;
515     }
516     const SCEV *NewStep =
517         SE.getMulExpr(Step, SE.getConstant(Ty, StepMultiplier));
518     const SCEV *ScaledOffset = SE.getMulExpr(Step, SE.getConstant(Ty, Offset));
519     const SCEV *NewStart = SE.getAddExpr(Expr->getStart(), ScaledOffset);
520     return SE.getAddRecExpr(NewStart, NewStep, TheLoop, SCEV::FlagAnyWrap);
521   }
522 
523   const SCEV *visit(const SCEV *S) {
524     if (CannotAnalyze || SE.isLoopInvariant(S, TheLoop))
525       return S;
526     return SCEVRewriteVisitor<SCEVAddRecForUniformityRewriter>::visit(S);
527   }
528 
529   const SCEV *visitUnknown(const SCEVUnknown *S) {
530     if (SE.isLoopInvariant(S, TheLoop))
531       return S;
532     // The value could vary across iterations.
533     CannotAnalyze = true;
534     return S;
535   }
536 
537   const SCEV *visitCouldNotCompute(const SCEVCouldNotCompute *S) {
538     // Could not analyze the expression.
539     CannotAnalyze = true;
540     return S;
541   }
542 
543   static const SCEV *rewrite(const SCEV *S, ScalarEvolution &SE,
544                              unsigned StepMultiplier, unsigned Offset,
545                              Loop *TheLoop) {
546     /// Bail out if the expression does not contain an UDiv expression.
547     /// Uniform values which are not loop invariant require operations to strip
548     /// out the lowest bits. For now just look for UDivs and use it to avoid
549     /// re-writing UDIV-free expressions for other lanes to limit compile time.
550     if (!SCEVExprContains(S,
551                           [](const SCEV *S) { return isa<SCEVUDivExpr>(S); }))
552       return SE.getCouldNotCompute();
553 
554     SCEVAddRecForUniformityRewriter Rewriter(SE, StepMultiplier, Offset,
555                                              TheLoop);
556     const SCEV *Result = Rewriter.visit(S);
557 
558     if (Rewriter.canAnalyze())
559       return Result;
560     return SE.getCouldNotCompute();
561   }
562 };
563 
564 } // namespace
565 
566 bool LoopVectorizationLegality::isUniform(Value *V, ElementCount VF) const {
567   if (isInvariant(V))
568     return true;
569   if (VF.isScalable())
570     return false;
571   if (VF.isScalar())
572     return true;
573 
574   // Since we rely on SCEV for uniformity, if the type is not SCEVable, it is
575   // never considered uniform.
576   auto *SE = PSE.getSE();
577   if (!SE->isSCEVable(V->getType()))
578     return false;
579   const SCEV *S = SE->getSCEV(V);
580 
581   // Rewrite AddRecs in TheLoop to step by VF and check if the expression for
582   // lane 0 matches the expressions for all other lanes.
583   unsigned FixedVF = VF.getKnownMinValue();
584   const SCEV *FirstLaneExpr =
585       SCEVAddRecForUniformityRewriter::rewrite(S, *SE, FixedVF, 0, TheLoop);
586   if (isa<SCEVCouldNotCompute>(FirstLaneExpr))
587     return false;
588 
589   // Make sure the expressions for lanes FixedVF-1..1 match the expression for
590   // lane 0. We check lanes in reverse order for compile-time, as frequently
591   // checking the last lane is sufficient to rule out uniformity.
592   return all_of(reverse(seq<unsigned>(1, FixedVF)), [&](unsigned I) {
593     const SCEV *IthLaneExpr =
594         SCEVAddRecForUniformityRewriter::rewrite(S, *SE, FixedVF, I, TheLoop);
595     return FirstLaneExpr == IthLaneExpr;
596   });
597 }
598 
599 bool LoopVectorizationLegality::isUniformMemOp(Instruction &I,
600                                                ElementCount VF) const {
601   Value *Ptr = getLoadStorePointerOperand(&I);
602   if (!Ptr)
603     return false;
604   // Note: There's nothing inherent which prevents predicated loads and
605   // stores from being uniform.  The current lowering simply doesn't handle
606   // it; in particular, the cost model distinguishes scatter/gather from
607   // scalar w/predication, and we currently rely on the scalar path.
608   return isUniform(Ptr, VF) && !blockNeedsPredication(I.getParent());
609 }
610 
611 bool LoopVectorizationLegality::canVectorizeOuterLoop() {
612   assert(!TheLoop->isInnermost() && "We are not vectorizing an outer loop.");
613   // Store the result and return it at the end instead of exiting early, in case
614   // allowExtraAnalysis is used to report multiple reasons for not vectorizing.
615   bool Result = true;
616   bool DoExtraAnalysis = ORE->allowExtraAnalysis(DEBUG_TYPE);
617 
618   for (BasicBlock *BB : TheLoop->blocks()) {
619     // Check whether the BB terminator is a BranchInst. Any other terminator is
620     // not supported yet.
621     auto *Br = dyn_cast<BranchInst>(BB->getTerminator());
622     if (!Br) {
623       reportVectorizationFailure("Unsupported basic block terminator",
624           "loop control flow is not understood by vectorizer",
625           "CFGNotUnderstood", ORE, TheLoop);
626       if (DoExtraAnalysis)
627         Result = false;
628       else
629         return false;
630     }
631 
632     // Check whether the BranchInst is a supported one. Only unconditional
633     // branches, conditional branches with an outer loop invariant condition or
634     // backedges are supported.
635     // FIXME: We skip these checks when VPlan predication is enabled as we
636     // want to allow divergent branches. This whole check will be removed
637     // once VPlan predication is on by default.
638     if (Br && Br->isConditional() &&
639         !TheLoop->isLoopInvariant(Br->getCondition()) &&
640         !LI->isLoopHeader(Br->getSuccessor(0)) &&
641         !LI->isLoopHeader(Br->getSuccessor(1))) {
642       reportVectorizationFailure("Unsupported conditional branch",
643           "loop control flow is not understood by vectorizer",
644           "CFGNotUnderstood", ORE, TheLoop);
645       if (DoExtraAnalysis)
646         Result = false;
647       else
648         return false;
649     }
650   }
651 
652   // Check whether inner loops are uniform. At this point, we only support
653   // simple outer loops scenarios with uniform nested loops.
654   if (!isUniformLoopNest(TheLoop /*loop nest*/,
655                          TheLoop /*context outer loop*/)) {
656     reportVectorizationFailure("Outer loop contains divergent loops",
657         "loop control flow is not understood by vectorizer",
658         "CFGNotUnderstood", ORE, TheLoop);
659     if (DoExtraAnalysis)
660       Result = false;
661     else
662       return false;
663   }
664 
665   // Check whether we are able to set up outer loop induction.
666   if (!setupOuterLoopInductions()) {
667     reportVectorizationFailure("Unsupported outer loop Phi(s)",
668                                "Unsupported outer loop Phi(s)",
669                                "UnsupportedPhi", ORE, TheLoop);
670     if (DoExtraAnalysis)
671       Result = false;
672     else
673       return false;
674   }
675 
676   return Result;
677 }
678 
679 void LoopVectorizationLegality::addInductionPhi(
680     PHINode *Phi, const InductionDescriptor &ID,
681     SmallPtrSetImpl<Value *> &AllowedExit) {
682   Inductions[Phi] = ID;
683 
684   // In case this induction also comes with casts that we know we can ignore
685   // in the vectorized loop body, record them here. All casts could be recorded
686   // here for ignoring, but suffices to record only the first (as it is the
687   // only one that may bw used outside the cast sequence).
688   const SmallVectorImpl<Instruction *> &Casts = ID.getCastInsts();
689   if (!Casts.empty())
690     InductionCastsToIgnore.insert(*Casts.begin());
691 
692   Type *PhiTy = Phi->getType();
693   const DataLayout &DL = Phi->getDataLayout();
694 
695   // Get the widest type.
696   if (!PhiTy->isFloatingPointTy()) {
697     if (!WidestIndTy)
698       WidestIndTy = convertPointerToIntegerType(DL, PhiTy);
699     else
700       WidestIndTy = getWiderType(DL, PhiTy, WidestIndTy);
701   }
702 
703   // Int inductions are special because we only allow one IV.
704   if (ID.getKind() == InductionDescriptor::IK_IntInduction &&
705       ID.getConstIntStepValue() && ID.getConstIntStepValue()->isOne() &&
706       isa<Constant>(ID.getStartValue()) &&
707       cast<Constant>(ID.getStartValue())->isNullValue()) {
708 
709     // Use the phi node with the widest type as induction. Use the last
710     // one if there are multiple (no good reason for doing this other
711     // than it is expedient). We've checked that it begins at zero and
712     // steps by one, so this is a canonical induction variable.
713     if (!PrimaryInduction || PhiTy == WidestIndTy)
714       PrimaryInduction = Phi;
715   }
716 
717   // Both the PHI node itself, and the "post-increment" value feeding
718   // back into the PHI node may have external users.
719   // We can allow those uses, except if the SCEVs we have for them rely
720   // on predicates that only hold within the loop, since allowing the exit
721   // currently means re-using this SCEV outside the loop (see PR33706 for more
722   // details).
723   if (PSE.getPredicate().isAlwaysTrue()) {
724     AllowedExit.insert(Phi);
725     AllowedExit.insert(Phi->getIncomingValueForBlock(TheLoop->getLoopLatch()));
726   }
727 
728   LLVM_DEBUG(dbgs() << "LV: Found an induction variable.\n");
729 }
730 
731 bool LoopVectorizationLegality::setupOuterLoopInductions() {
732   BasicBlock *Header = TheLoop->getHeader();
733 
734   // Returns true if a given Phi is a supported induction.
735   auto IsSupportedPhi = [&](PHINode &Phi) -> bool {
736     InductionDescriptor ID;
737     if (InductionDescriptor::isInductionPHI(&Phi, TheLoop, PSE, ID) &&
738         ID.getKind() == InductionDescriptor::IK_IntInduction) {
739       addInductionPhi(&Phi, ID, AllowedExit);
740       return true;
741     }
742     // Bail out for any Phi in the outer loop header that is not a supported
743     // induction.
744     LLVM_DEBUG(
745         dbgs() << "LV: Found unsupported PHI for outer loop vectorization.\n");
746     return false;
747   };
748 
749   return llvm::all_of(Header->phis(), IsSupportedPhi);
750 }
751 
752 /// Checks if a function is scalarizable according to the TLI, in
753 /// the sense that it should be vectorized and then expanded in
754 /// multiple scalar calls. This is represented in the
755 /// TLI via mappings that do not specify a vector name, as in the
756 /// following example:
757 ///
758 ///    const VecDesc VecIntrinsics[] = {
759 ///      {"llvm.phx.abs.i32", "", 4}
760 ///    };
761 static bool isTLIScalarize(const TargetLibraryInfo &TLI, const CallInst &CI) {
762   const StringRef ScalarName = CI.getCalledFunction()->getName();
763   bool Scalarize = TLI.isFunctionVectorizable(ScalarName);
764   // Check that all known VFs are not associated to a vector
765   // function, i.e. the vector name is emty.
766   if (Scalarize) {
767     ElementCount WidestFixedVF, WidestScalableVF;
768     TLI.getWidestVF(ScalarName, WidestFixedVF, WidestScalableVF);
769     for (ElementCount VF = ElementCount::getFixed(2);
770          ElementCount::isKnownLE(VF, WidestFixedVF); VF *= 2)
771       Scalarize &= !TLI.isFunctionVectorizable(ScalarName, VF);
772     for (ElementCount VF = ElementCount::getScalable(1);
773          ElementCount::isKnownLE(VF, WidestScalableVF); VF *= 2)
774       Scalarize &= !TLI.isFunctionVectorizable(ScalarName, VF);
775     assert((WidestScalableVF.isZero() || !Scalarize) &&
776            "Caller may decide to scalarize a variant using a scalable VF");
777   }
778   return Scalarize;
779 }
780 
781 bool LoopVectorizationLegality::canVectorizeInstrs() {
782   BasicBlock *Header = TheLoop->getHeader();
783 
784   // For each block in the loop.
785   for (BasicBlock *BB : TheLoop->blocks()) {
786     // Scan the instructions in the block and look for hazards.
787     for (Instruction &I : *BB) {
788       if (auto *Phi = dyn_cast<PHINode>(&I)) {
789         Type *PhiTy = Phi->getType();
790         // Check that this PHI type is allowed.
791         if (!PhiTy->isIntegerTy() && !PhiTy->isFloatingPointTy() &&
792             !PhiTy->isPointerTy()) {
793           reportVectorizationFailure("Found a non-int non-pointer PHI",
794                                      "loop control flow is not understood by vectorizer",
795                                      "CFGNotUnderstood", ORE, TheLoop);
796           return false;
797         }
798 
799         // If this PHINode is not in the header block, then we know that we
800         // can convert it to select during if-conversion. No need to check if
801         // the PHIs in this block are induction or reduction variables.
802         if (BB != Header) {
803           // Non-header phi nodes that have outside uses can be vectorized. Add
804           // them to the list of allowed exits.
805           // Unsafe cyclic dependencies with header phis are identified during
806           // legalization for reduction, induction and fixed order
807           // recurrences.
808           AllowedExit.insert(&I);
809           continue;
810         }
811 
812         // We only allow if-converted PHIs with exactly two incoming values.
813         if (Phi->getNumIncomingValues() != 2) {
814           reportVectorizationFailure("Found an invalid PHI",
815               "loop control flow is not understood by vectorizer",
816               "CFGNotUnderstood", ORE, TheLoop, Phi);
817           return false;
818         }
819 
820         RecurrenceDescriptor RedDes;
821         if (RecurrenceDescriptor::isReductionPHI(Phi, TheLoop, RedDes, DB, AC,
822                                                  DT, PSE.getSE())) {
823           Requirements->addExactFPMathInst(RedDes.getExactFPMathInst());
824           AllowedExit.insert(RedDes.getLoopExitInstr());
825           Reductions[Phi] = RedDes;
826           continue;
827         }
828 
829         // We prevent matching non-constant strided pointer IVS to preserve
830         // historical vectorizer behavior after a generalization of the
831         // IVDescriptor code.  The intent is to remove this check, but we
832         // have to fix issues around code quality for such loops first.
833         auto IsDisallowedStridedPointerInduction =
834             [](const InductionDescriptor &ID) {
835               if (AllowStridedPointerIVs)
836                 return false;
837               return ID.getKind() == InductionDescriptor::IK_PtrInduction &&
838                      ID.getConstIntStepValue() == nullptr;
839             };
840 
841         // TODO: Instead of recording the AllowedExit, it would be good to
842         // record the complementary set: NotAllowedExit. These include (but may
843         // not be limited to):
844         // 1. Reduction phis as they represent the one-before-last value, which
845         // is not available when vectorized
846         // 2. Induction phis and increment when SCEV predicates cannot be used
847         // outside the loop - see addInductionPhi
848         // 3. Non-Phis with outside uses when SCEV predicates cannot be used
849         // outside the loop - see call to hasOutsideLoopUser in the non-phi
850         // handling below
851         // 4. FixedOrderRecurrence phis that can possibly be handled by
852         // extraction.
853         // By recording these, we can then reason about ways to vectorize each
854         // of these NotAllowedExit.
855         InductionDescriptor ID;
856         if (InductionDescriptor::isInductionPHI(Phi, TheLoop, PSE, ID) &&
857             !IsDisallowedStridedPointerInduction(ID)) {
858           addInductionPhi(Phi, ID, AllowedExit);
859           Requirements->addExactFPMathInst(ID.getExactFPMathInst());
860           continue;
861         }
862 
863         if (RecurrenceDescriptor::isFixedOrderRecurrence(Phi, TheLoop, DT)) {
864           AllowedExit.insert(Phi);
865           FixedOrderRecurrences.insert(Phi);
866           continue;
867         }
868 
869         // As a last resort, coerce the PHI to a AddRec expression
870         // and re-try classifying it a an induction PHI.
871         if (InductionDescriptor::isInductionPHI(Phi, TheLoop, PSE, ID, true) &&
872             !IsDisallowedStridedPointerInduction(ID)) {
873           addInductionPhi(Phi, ID, AllowedExit);
874           continue;
875         }
876 
877         reportVectorizationFailure("Found an unidentified PHI",
878             "value that could not be identified as "
879             "reduction is used outside the loop",
880             "NonReductionValueUsedOutsideLoop", ORE, TheLoop, Phi);
881         return false;
882       } // end of PHI handling
883 
884       // We handle calls that:
885       //   * Are debug info intrinsics.
886       //   * Have a mapping to an IR intrinsic.
887       //   * Have a vector version available.
888       auto *CI = dyn_cast<CallInst>(&I);
889 
890       if (CI && !getVectorIntrinsicIDForCall(CI, TLI) &&
891           !isa<DbgInfoIntrinsic>(CI) &&
892           !(CI->getCalledFunction() && TLI &&
893             (!VFDatabase::getMappings(*CI).empty() ||
894              isTLIScalarize(*TLI, *CI)))) {
895         // If the call is a recognized math libary call, it is likely that
896         // we can vectorize it given loosened floating-point constraints.
897         LibFunc Func;
898         bool IsMathLibCall =
899             TLI && CI->getCalledFunction() &&
900             CI->getType()->isFloatingPointTy() &&
901             TLI->getLibFunc(CI->getCalledFunction()->getName(), Func) &&
902             TLI->hasOptimizedCodeGen(Func);
903 
904         if (IsMathLibCall) {
905           // TODO: Ideally, we should not use clang-specific language here,
906           // but it's hard to provide meaningful yet generic advice.
907           // Also, should this be guarded by allowExtraAnalysis() and/or be part
908           // of the returned info from isFunctionVectorizable()?
909           reportVectorizationFailure(
910               "Found a non-intrinsic callsite",
911               "library call cannot be vectorized. "
912               "Try compiling with -fno-math-errno, -ffast-math, "
913               "or similar flags",
914               "CantVectorizeLibcall", ORE, TheLoop, CI);
915         } else {
916           reportVectorizationFailure("Found a non-intrinsic callsite",
917                                      "call instruction cannot be vectorized",
918                                      "CantVectorizeLibcall", ORE, TheLoop, CI);
919         }
920         return false;
921       }
922 
923       // Some intrinsics have scalar arguments and should be same in order for
924       // them to be vectorized (i.e. loop invariant).
925       if (CI) {
926         auto *SE = PSE.getSE();
927         Intrinsic::ID IntrinID = getVectorIntrinsicIDForCall(CI, TLI);
928         for (unsigned Idx = 0; Idx < CI->arg_size(); ++Idx)
929           if (isVectorIntrinsicWithScalarOpAtArg(IntrinID, Idx)) {
930             if (!SE->isLoopInvariant(PSE.getSCEV(CI->getOperand(Idx)),
931                                      TheLoop)) {
932               reportVectorizationFailure("Found unvectorizable intrinsic",
933                   "intrinsic instruction cannot be vectorized",
934                   "CantVectorizeIntrinsic", ORE, TheLoop, CI);
935               return false;
936             }
937           }
938       }
939 
940       // If we found a vectorized variant of a function, note that so LV can
941       // make better decisions about maximum VF.
942       if (CI && !VFDatabase::getMappings(*CI).empty())
943         VecCallVariantsFound = true;
944 
945       // Check that the instruction return type is vectorizable.
946       // We can't vectorize casts from vector type to scalar type.
947       // Also, we can't vectorize extractelement instructions.
948       if ((!VectorType::isValidElementType(I.getType()) &&
949            !I.getType()->isVoidTy()) ||
950           (isa<CastInst>(I) &&
951            !VectorType::isValidElementType(I.getOperand(0)->getType())) ||
952           isa<ExtractElementInst>(I)) {
953         reportVectorizationFailure("Found unvectorizable type",
954             "instruction return type cannot be vectorized",
955             "CantVectorizeInstructionReturnType", ORE, TheLoop, &I);
956         return false;
957       }
958 
959       // Check that the stored type is vectorizable.
960       if (auto *ST = dyn_cast<StoreInst>(&I)) {
961         Type *T = ST->getValueOperand()->getType();
962         if (!VectorType::isValidElementType(T)) {
963           reportVectorizationFailure("Store instruction cannot be vectorized",
964                                      "store instruction cannot be vectorized",
965                                      "CantVectorizeStore", ORE, TheLoop, ST);
966           return false;
967         }
968 
969         // For nontemporal stores, check that a nontemporal vector version is
970         // supported on the target.
971         if (ST->getMetadata(LLVMContext::MD_nontemporal)) {
972           // Arbitrarily try a vector of 2 elements.
973           auto *VecTy = FixedVectorType::get(T, /*NumElts=*/2);
974           assert(VecTy && "did not find vectorized version of stored type");
975           if (!TTI->isLegalNTStore(VecTy, ST->getAlign())) {
976             reportVectorizationFailure(
977                 "nontemporal store instruction cannot be vectorized",
978                 "nontemporal store instruction cannot be vectorized",
979                 "CantVectorizeNontemporalStore", ORE, TheLoop, ST);
980             return false;
981           }
982         }
983 
984       } else if (auto *LD = dyn_cast<LoadInst>(&I)) {
985         if (LD->getMetadata(LLVMContext::MD_nontemporal)) {
986           // For nontemporal loads, check that a nontemporal vector version is
987           // supported on the target (arbitrarily try a vector of 2 elements).
988           auto *VecTy = FixedVectorType::get(I.getType(), /*NumElts=*/2);
989           assert(VecTy && "did not find vectorized version of load type");
990           if (!TTI->isLegalNTLoad(VecTy, LD->getAlign())) {
991             reportVectorizationFailure(
992                 "nontemporal load instruction cannot be vectorized",
993                 "nontemporal load instruction cannot be vectorized",
994                 "CantVectorizeNontemporalLoad", ORE, TheLoop, LD);
995             return false;
996           }
997         }
998 
999         // FP instructions can allow unsafe algebra, thus vectorizable by
1000         // non-IEEE-754 compliant SIMD units.
1001         // This applies to floating-point math operations and calls, not memory
1002         // operations, shuffles, or casts, as they don't change precision or
1003         // semantics.
1004       } else if (I.getType()->isFloatingPointTy() && (CI || I.isBinaryOp()) &&
1005                  !I.isFast()) {
1006         LLVM_DEBUG(dbgs() << "LV: Found FP op with unsafe algebra.\n");
1007         Hints->setPotentiallyUnsafe();
1008       }
1009 
1010       // Reduction instructions are allowed to have exit users.
1011       // All other instructions must not have external users.
1012       if (hasOutsideLoopUser(TheLoop, &I, AllowedExit)) {
1013         // We can safely vectorize loops where instructions within the loop are
1014         // used outside the loop only if the SCEV predicates within the loop is
1015         // same as outside the loop. Allowing the exit means reusing the SCEV
1016         // outside the loop.
1017         if (PSE.getPredicate().isAlwaysTrue()) {
1018           AllowedExit.insert(&I);
1019           continue;
1020         }
1021         reportVectorizationFailure("Value cannot be used outside the loop",
1022                                    "value cannot be used outside the loop",
1023                                    "ValueUsedOutsideLoop", ORE, TheLoop, &I);
1024         return false;
1025       }
1026     } // next instr.
1027   }
1028 
1029   if (!PrimaryInduction) {
1030     if (Inductions.empty()) {
1031       reportVectorizationFailure("Did not find one integer induction var",
1032           "loop induction variable could not be identified",
1033           "NoInductionVariable", ORE, TheLoop);
1034       return false;
1035     }
1036     if (!WidestIndTy) {
1037       reportVectorizationFailure("Did not find one integer induction var",
1038           "integer loop induction variable could not be identified",
1039           "NoIntegerInductionVariable", ORE, TheLoop);
1040       return false;
1041     }
1042     LLVM_DEBUG(dbgs() << "LV: Did not find one integer induction var.\n");
1043   }
1044 
1045   // Now we know the widest induction type, check if our found induction
1046   // is the same size. If it's not, unset it here and InnerLoopVectorizer
1047   // will create another.
1048   if (PrimaryInduction && WidestIndTy != PrimaryInduction->getType())
1049     PrimaryInduction = nullptr;
1050 
1051   return true;
1052 }
1053 
1054 bool LoopVectorizationLegality::canVectorizeMemory() {
1055   LAI = &LAIs.getInfo(*TheLoop);
1056   const OptimizationRemarkAnalysis *LAR = LAI->getReport();
1057   if (LAR) {
1058     ORE->emit([&]() {
1059       return OptimizationRemarkAnalysis(Hints->vectorizeAnalysisPassName(),
1060                                         "loop not vectorized: ", *LAR);
1061     });
1062   }
1063 
1064   if (!LAI->canVectorizeMemory())
1065     return false;
1066 
1067   if (LAI->hasLoadStoreDependenceInvolvingLoopInvariantAddress()) {
1068     reportVectorizationFailure("We don't allow storing to uniform addresses",
1069                                "write to a loop invariant address could not "
1070                                "be vectorized",
1071                                "CantVectorizeStoreToLoopInvariantAddress", ORE,
1072                                TheLoop);
1073     return false;
1074   }
1075 
1076   // We can vectorize stores to invariant address when final reduction value is
1077   // guaranteed to be stored at the end of the loop. Also, if decision to
1078   // vectorize loop is made, runtime checks are added so as to make sure that
1079   // invariant address won't alias with any other objects.
1080   if (!LAI->getStoresToInvariantAddresses().empty()) {
1081     // For each invariant address, check if last stored value is unconditional
1082     // and the address is not calculated inside the loop.
1083     for (StoreInst *SI : LAI->getStoresToInvariantAddresses()) {
1084       if (!isInvariantStoreOfReduction(SI))
1085         continue;
1086 
1087       if (blockNeedsPredication(SI->getParent())) {
1088         reportVectorizationFailure(
1089             "We don't allow storing to uniform addresses",
1090             "write of conditional recurring variant value to a loop "
1091             "invariant address could not be vectorized",
1092             "CantVectorizeStoreToLoopInvariantAddress", ORE, TheLoop);
1093         return false;
1094       }
1095 
1096       // Invariant address should be defined outside of loop. LICM pass usually
1097       // makes sure it happens, but in rare cases it does not, we do not want
1098       // to overcomplicate vectorization to support this case.
1099       if (Instruction *Ptr = dyn_cast<Instruction>(SI->getPointerOperand())) {
1100         if (TheLoop->contains(Ptr)) {
1101           reportVectorizationFailure(
1102               "Invariant address is calculated inside the loop",
1103               "write to a loop invariant address could not "
1104               "be vectorized",
1105               "CantVectorizeStoreToLoopInvariantAddress", ORE, TheLoop);
1106           return false;
1107         }
1108       }
1109     }
1110 
1111     if (LAI->hasStoreStoreDependenceInvolvingLoopInvariantAddress()) {
1112       // For each invariant address, check its last stored value is the result
1113       // of one of our reductions.
1114       //
1115       // We do not check if dependence with loads exists because that is already
1116       // checked via hasLoadStoreDependenceInvolvingLoopInvariantAddress.
1117       ScalarEvolution *SE = PSE.getSE();
1118       SmallVector<StoreInst *, 4> UnhandledStores;
1119       for (StoreInst *SI : LAI->getStoresToInvariantAddresses()) {
1120         if (isInvariantStoreOfReduction(SI)) {
1121           // Earlier stores to this address are effectively deadcode.
1122           // With opaque pointers it is possible for one pointer to be used with
1123           // different sizes of stored values:
1124           //    store i32 0, ptr %x
1125           //    store i8 0, ptr %x
1126           // The latest store doesn't complitely overwrite the first one in the
1127           // example. That is why we have to make sure that types of stored
1128           // values are same.
1129           // TODO: Check that bitwidth of unhandled store is smaller then the
1130           // one that overwrites it and add a test.
1131           erase_if(UnhandledStores, [SE, SI](StoreInst *I) {
1132             return storeToSameAddress(SE, SI, I) &&
1133                    I->getValueOperand()->getType() ==
1134                        SI->getValueOperand()->getType();
1135           });
1136           continue;
1137         }
1138         UnhandledStores.push_back(SI);
1139       }
1140 
1141       bool IsOK = UnhandledStores.empty();
1142       // TODO: we should also validate against InvariantMemSets.
1143       if (!IsOK) {
1144         reportVectorizationFailure(
1145             "We don't allow storing to uniform addresses",
1146             "write to a loop invariant address could not "
1147             "be vectorized",
1148             "CantVectorizeStoreToLoopInvariantAddress", ORE, TheLoop);
1149         return false;
1150       }
1151     }
1152   }
1153 
1154   PSE.addPredicate(LAI->getPSE().getPredicate());
1155   return true;
1156 }
1157 
1158 bool LoopVectorizationLegality::canVectorizeFPMath(
1159     bool EnableStrictReductions) {
1160 
1161   // First check if there is any ExactFP math or if we allow reassociations
1162   if (!Requirements->getExactFPInst() || Hints->allowReordering())
1163     return true;
1164 
1165   // If the above is false, we have ExactFPMath & do not allow reordering.
1166   // If the EnableStrictReductions flag is set, first check if we have any
1167   // Exact FP induction vars, which we cannot vectorize.
1168   if (!EnableStrictReductions ||
1169       any_of(getInductionVars(), [&](auto &Induction) -> bool {
1170         InductionDescriptor IndDesc = Induction.second;
1171         return IndDesc.getExactFPMathInst();
1172       }))
1173     return false;
1174 
1175   // We can now only vectorize if all reductions with Exact FP math also
1176   // have the isOrdered flag set, which indicates that we can move the
1177   // reduction operations in-loop.
1178   return (all_of(getReductionVars(), [&](auto &Reduction) -> bool {
1179     const RecurrenceDescriptor &RdxDesc = Reduction.second;
1180     return !RdxDesc.hasExactFPMath() || RdxDesc.isOrdered();
1181   }));
1182 }
1183 
1184 bool LoopVectorizationLegality::isInvariantStoreOfReduction(StoreInst *SI) {
1185   return any_of(getReductionVars(), [&](auto &Reduction) -> bool {
1186     const RecurrenceDescriptor &RdxDesc = Reduction.second;
1187     return RdxDesc.IntermediateStore == SI;
1188   });
1189 }
1190 
1191 bool LoopVectorizationLegality::isInvariantAddressOfReduction(Value *V) {
1192   return any_of(getReductionVars(), [&](auto &Reduction) -> bool {
1193     const RecurrenceDescriptor &RdxDesc = Reduction.second;
1194     if (!RdxDesc.IntermediateStore)
1195       return false;
1196 
1197     ScalarEvolution *SE = PSE.getSE();
1198     Value *InvariantAddress = RdxDesc.IntermediateStore->getPointerOperand();
1199     return V == InvariantAddress ||
1200            SE->getSCEV(V) == SE->getSCEV(InvariantAddress);
1201   });
1202 }
1203 
1204 bool LoopVectorizationLegality::isInductionPhi(const Value *V) const {
1205   Value *In0 = const_cast<Value *>(V);
1206   PHINode *PN = dyn_cast_or_null<PHINode>(In0);
1207   if (!PN)
1208     return false;
1209 
1210   return Inductions.count(PN);
1211 }
1212 
1213 const InductionDescriptor *
1214 LoopVectorizationLegality::getIntOrFpInductionDescriptor(PHINode *Phi) const {
1215   if (!isInductionPhi(Phi))
1216     return nullptr;
1217   auto &ID = getInductionVars().find(Phi)->second;
1218   if (ID.getKind() == InductionDescriptor::IK_IntInduction ||
1219       ID.getKind() == InductionDescriptor::IK_FpInduction)
1220     return &ID;
1221   return nullptr;
1222 }
1223 
1224 const InductionDescriptor *
1225 LoopVectorizationLegality::getPointerInductionDescriptor(PHINode *Phi) const {
1226   if (!isInductionPhi(Phi))
1227     return nullptr;
1228   auto &ID = getInductionVars().find(Phi)->second;
1229   if (ID.getKind() == InductionDescriptor::IK_PtrInduction)
1230     return &ID;
1231   return nullptr;
1232 }
1233 
1234 bool LoopVectorizationLegality::isCastedInductionVariable(
1235     const Value *V) const {
1236   auto *Inst = dyn_cast<Instruction>(V);
1237   return (Inst && InductionCastsToIgnore.count(Inst));
1238 }
1239 
1240 bool LoopVectorizationLegality::isInductionVariable(const Value *V) const {
1241   return isInductionPhi(V) || isCastedInductionVariable(V);
1242 }
1243 
1244 bool LoopVectorizationLegality::isFixedOrderRecurrence(
1245     const PHINode *Phi) const {
1246   return FixedOrderRecurrences.count(Phi);
1247 }
1248 
1249 bool LoopVectorizationLegality::blockNeedsPredication(BasicBlock *BB) const {
1250   return LoopAccessInfo::blockNeedsPredication(BB, TheLoop, DT);
1251 }
1252 
1253 bool LoopVectorizationLegality::blockCanBePredicated(
1254     BasicBlock *BB, SmallPtrSetImpl<Value *> &SafePtrs,
1255     SmallPtrSetImpl<const Instruction *> &MaskedOp) const {
1256   for (Instruction &I : *BB) {
1257     // We can predicate blocks with calls to assume, as long as we drop them in
1258     // case we flatten the CFG via predication.
1259     if (match(&I, m_Intrinsic<Intrinsic::assume>())) {
1260       MaskedOp.insert(&I);
1261       continue;
1262     }
1263 
1264     // Do not let llvm.experimental.noalias.scope.decl block the vectorization.
1265     // TODO: there might be cases that it should block the vectorization. Let's
1266     // ignore those for now.
1267     if (isa<NoAliasScopeDeclInst>(&I))
1268       continue;
1269 
1270     // We can allow masked calls if there's at least one vector variant, even
1271     // if we end up scalarizing due to the cost model calculations.
1272     // TODO: Allow other calls if they have appropriate attributes... readonly
1273     // and argmemonly?
1274     if (CallInst *CI = dyn_cast<CallInst>(&I))
1275       if (VFDatabase::hasMaskedVariant(*CI)) {
1276         MaskedOp.insert(CI);
1277         continue;
1278       }
1279 
1280     // Loads are handled via masking (or speculated if safe to do so.)
1281     if (auto *LI = dyn_cast<LoadInst>(&I)) {
1282       if (!SafePtrs.count(LI->getPointerOperand()))
1283         MaskedOp.insert(LI);
1284       continue;
1285     }
1286 
1287     // Predicated store requires some form of masking:
1288     // 1) masked store HW instruction,
1289     // 2) emulation via load-blend-store (only if safe and legal to do so,
1290     //    be aware on the race conditions), or
1291     // 3) element-by-element predicate check and scalar store.
1292     if (auto *SI = dyn_cast<StoreInst>(&I)) {
1293       MaskedOp.insert(SI);
1294       continue;
1295     }
1296 
1297     if (I.mayReadFromMemory() || I.mayWriteToMemory() || I.mayThrow())
1298       return false;
1299   }
1300 
1301   return true;
1302 }
1303 
1304 bool LoopVectorizationLegality::canVectorizeWithIfConvert() {
1305   if (!EnableIfConversion) {
1306     reportVectorizationFailure("If-conversion is disabled",
1307                                "if-conversion is disabled",
1308                                "IfConversionDisabled",
1309                                ORE, TheLoop);
1310     return false;
1311   }
1312 
1313   assert(TheLoop->getNumBlocks() > 1 && "Single block loops are vectorizable");
1314 
1315   // A list of pointers which are known to be dereferenceable within scope of
1316   // the loop body for each iteration of the loop which executes.  That is,
1317   // the memory pointed to can be dereferenced (with the access size implied by
1318   // the value's type) unconditionally within the loop header without
1319   // introducing a new fault.
1320   SmallPtrSet<Value *, 8> SafePointers;
1321 
1322   // Collect safe addresses.
1323   for (BasicBlock *BB : TheLoop->blocks()) {
1324     if (!blockNeedsPredication(BB)) {
1325       for (Instruction &I : *BB)
1326         if (auto *Ptr = getLoadStorePointerOperand(&I))
1327           SafePointers.insert(Ptr);
1328       continue;
1329     }
1330 
1331     // For a block which requires predication, a address may be safe to access
1332     // in the loop w/o predication if we can prove dereferenceability facts
1333     // sufficient to ensure it'll never fault within the loop. For the moment,
1334     // we restrict this to loads; stores are more complicated due to
1335     // concurrency restrictions.
1336     ScalarEvolution &SE = *PSE.getSE();
1337     SmallVector<const SCEVPredicate *, 4> Predicates;
1338     for (Instruction &I : *BB) {
1339       LoadInst *LI = dyn_cast<LoadInst>(&I);
1340       // Pass the Predicates pointer to isDereferenceableAndAlignedInLoop so
1341       // that it will consider loops that need guarding by SCEV checks. The
1342       // vectoriser will generate these checks if we decide to vectorise.
1343       if (LI && !LI->getType()->isVectorTy() && !mustSuppressSpeculation(*LI) &&
1344           isDereferenceableAndAlignedInLoop(LI, TheLoop, SE, *DT, AC,
1345                                             &Predicates))
1346         SafePointers.insert(LI->getPointerOperand());
1347       Predicates.clear();
1348     }
1349   }
1350 
1351   // Collect the blocks that need predication.
1352   for (BasicBlock *BB : TheLoop->blocks()) {
1353     // We support only branches and switch statements as terminators inside the
1354     // loop.
1355     if (isa<SwitchInst>(BB->getTerminator())) {
1356       if (TheLoop->isLoopExiting(BB)) {
1357         reportVectorizationFailure("Loop contains an unsupported switch",
1358                                    "loop contains an unsupported switch",
1359                                    "LoopContainsUnsupportedSwitch", ORE,
1360                                    TheLoop, BB->getTerminator());
1361         return false;
1362       }
1363     } else if (!isa<BranchInst>(BB->getTerminator())) {
1364       reportVectorizationFailure("Loop contains an unsupported terminator",
1365                                  "loop contains an unsupported terminator",
1366                                  "LoopContainsUnsupportedTerminator", ORE,
1367                                  TheLoop, BB->getTerminator());
1368       return false;
1369     }
1370 
1371     // We must be able to predicate all blocks that need to be predicated.
1372     if (blockNeedsPredication(BB) &&
1373         !blockCanBePredicated(BB, SafePointers, MaskedOp)) {
1374       reportVectorizationFailure(
1375           "Control flow cannot be substituted for a select",
1376           "control flow cannot be substituted for a select", "NoCFGForSelect",
1377           ORE, TheLoop, BB->getTerminator());
1378       return false;
1379     }
1380   }
1381 
1382   // We can if-convert this loop.
1383   return true;
1384 }
1385 
1386 // Helper function to canVectorizeLoopNestCFG.
1387 bool LoopVectorizationLegality::canVectorizeLoopCFG(Loop *Lp,
1388                                                     bool UseVPlanNativePath) {
1389   assert((UseVPlanNativePath || Lp->isInnermost()) &&
1390          "VPlan-native path is not enabled.");
1391 
1392   // TODO: ORE should be improved to show more accurate information when an
1393   // outer loop can't be vectorized because a nested loop is not understood or
1394   // legal. Something like: "outer_loop_location: loop not vectorized:
1395   // (inner_loop_location) loop control flow is not understood by vectorizer".
1396 
1397   // Store the result and return it at the end instead of exiting early, in case
1398   // allowExtraAnalysis is used to report multiple reasons for not vectorizing.
1399   bool Result = true;
1400   bool DoExtraAnalysis = ORE->allowExtraAnalysis(DEBUG_TYPE);
1401 
1402   // We must have a loop in canonical form. Loops with indirectbr in them cannot
1403   // be canonicalized.
1404   if (!Lp->getLoopPreheader()) {
1405     reportVectorizationFailure("Loop doesn't have a legal pre-header",
1406         "loop control flow is not understood by vectorizer",
1407         "CFGNotUnderstood", ORE, TheLoop);
1408     if (DoExtraAnalysis)
1409       Result = false;
1410     else
1411       return false;
1412   }
1413 
1414   // We must have a single backedge.
1415   if (Lp->getNumBackEdges() != 1) {
1416     reportVectorizationFailure("The loop must have a single backedge",
1417         "loop control flow is not understood by vectorizer",
1418         "CFGNotUnderstood", ORE, TheLoop);
1419     if (DoExtraAnalysis)
1420       Result = false;
1421     else
1422       return false;
1423   }
1424 
1425   return Result;
1426 }
1427 
1428 bool LoopVectorizationLegality::canVectorizeLoopNestCFG(
1429     Loop *Lp, bool UseVPlanNativePath) {
1430   // Store the result and return it at the end instead of exiting early, in case
1431   // allowExtraAnalysis is used to report multiple reasons for not vectorizing.
1432   bool Result = true;
1433   bool DoExtraAnalysis = ORE->allowExtraAnalysis(DEBUG_TYPE);
1434   if (!canVectorizeLoopCFG(Lp, UseVPlanNativePath)) {
1435     if (DoExtraAnalysis)
1436       Result = false;
1437     else
1438       return false;
1439   }
1440 
1441   // Recursively check whether the loop control flow of nested loops is
1442   // understood.
1443   for (Loop *SubLp : *Lp)
1444     if (!canVectorizeLoopNestCFG(SubLp, UseVPlanNativePath)) {
1445       if (DoExtraAnalysis)
1446         Result = false;
1447       else
1448         return false;
1449     }
1450 
1451   return Result;
1452 }
1453 
1454 bool LoopVectorizationLegality::isVectorizableEarlyExitLoop() {
1455   BasicBlock *LatchBB = TheLoop->getLoopLatch();
1456   if (!LatchBB) {
1457     reportVectorizationFailure("Loop does not have a latch",
1458                                "Cannot vectorize early exit loop",
1459                                "NoLatchEarlyExit", ORE, TheLoop);
1460     return false;
1461   }
1462 
1463   if (Reductions.size() || FixedOrderRecurrences.size()) {
1464     reportVectorizationFailure(
1465         "Found reductions or recurrences in early-exit loop",
1466         "Cannot vectorize early exit loop with reductions or recurrences",
1467         "RecurrencesInEarlyExitLoop", ORE, TheLoop);
1468     return false;
1469   }
1470 
1471   SmallVector<BasicBlock *, 8> ExitingBlocks;
1472   TheLoop->getExitingBlocks(ExitingBlocks);
1473 
1474   // Keep a record of all the exiting blocks.
1475   SmallVector<const SCEVPredicate *, 4> Predicates;
1476   for (BasicBlock *BB : ExitingBlocks) {
1477     const SCEV *EC =
1478         PSE.getSE()->getPredicatedExitCount(TheLoop, BB, &Predicates);
1479     if (isa<SCEVCouldNotCompute>(EC)) {
1480       UncountableExitingBlocks.push_back(BB);
1481 
1482       SmallVector<BasicBlock *, 2> Succs(successors(BB));
1483       if (Succs.size() != 2) {
1484         reportVectorizationFailure(
1485             "Early exiting block does not have exactly two successors",
1486             "Incorrect number of successors from early exiting block",
1487             "EarlyExitTooManySuccessors", ORE, TheLoop);
1488         return false;
1489       }
1490 
1491       BasicBlock *ExitBlock;
1492       if (!TheLoop->contains(Succs[0]))
1493         ExitBlock = Succs[0];
1494       else {
1495         assert(!TheLoop->contains(Succs[1]));
1496         ExitBlock = Succs[1];
1497       }
1498       UncountableExitBlocks.push_back(ExitBlock);
1499     } else
1500       CountableExitingBlocks.push_back(BB);
1501   }
1502   // We can safely ignore the predicates here because when vectorizing the loop
1503   // the PredicatatedScalarEvolution class will keep track of all predicates
1504   // for each exiting block anyway. This happens when calling
1505   // PSE.getSymbolicMaxBackedgeTakenCount() below.
1506   Predicates.clear();
1507 
1508   // We only support one uncountable early exit.
1509   if (getUncountableExitingBlocks().size() != 1) {
1510     reportVectorizationFailure(
1511         "Loop has too many uncountable exits",
1512         "Cannot vectorize early exit loop with more than one early exit",
1513         "TooManyUncountableEarlyExits", ORE, TheLoop);
1514     return false;
1515   }
1516 
1517   // The only supported early exit loops so far are ones where the early
1518   // exiting block is a unique predecessor of the latch block.
1519   BasicBlock *LatchPredBB = LatchBB->getUniquePredecessor();
1520   if (LatchPredBB != getUncountableEarlyExitingBlock()) {
1521     reportVectorizationFailure("Early exit is not the latch predecessor",
1522                                "Cannot vectorize early exit loop",
1523                                "EarlyExitNotLatchPredecessor", ORE, TheLoop);
1524     return false;
1525   }
1526 
1527   // The latch block must have a countable exit.
1528   if (isa<SCEVCouldNotCompute>(
1529           PSE.getSE()->getPredicatedExitCount(TheLoop, LatchBB, &Predicates))) {
1530     reportVectorizationFailure(
1531         "Cannot determine exact exit count for latch block",
1532         "Cannot vectorize early exit loop",
1533         "UnknownLatchExitCountEarlyExitLoop", ORE, TheLoop);
1534     return false;
1535   }
1536   assert(llvm::is_contained(CountableExitingBlocks, LatchBB) &&
1537          "Latch block not found in list of countable exits!");
1538 
1539   // Check to see if there are instructions that could potentially generate
1540   // exceptions or have side-effects.
1541   auto IsSafeOperation = [](Instruction *I) -> bool {
1542     switch (I->getOpcode()) {
1543     case Instruction::Load:
1544     case Instruction::Store:
1545     case Instruction::PHI:
1546     case Instruction::Br:
1547       // These are checked separately.
1548       return true;
1549     default:
1550       return isSafeToSpeculativelyExecute(I);
1551     }
1552   };
1553 
1554   for (auto *BB : TheLoop->blocks())
1555     for (auto &I : *BB) {
1556       if (I.mayWriteToMemory()) {
1557         // We don't support writes to memory.
1558         reportVectorizationFailure(
1559             "Writes to memory unsupported in early exit loops",
1560             "Cannot vectorize early exit loop with writes to memory",
1561             "WritesInEarlyExitLoop", ORE, TheLoop);
1562         return false;
1563       } else if (!IsSafeOperation(&I)) {
1564         reportVectorizationFailure("Early exit loop contains operations that "
1565                                    "cannot be speculatively executed",
1566                                    "Early exit loop contains operations that "
1567                                    "cannot be speculatively executed",
1568                                    "UnsafeOperationsEarlyExitLoop", ORE,
1569                                    TheLoop);
1570         return false;
1571       }
1572     }
1573 
1574   // The vectoriser cannot handle loads that occur after the early exit block.
1575   assert(LatchBB->getUniquePredecessor() == getUncountableEarlyExitingBlock() &&
1576          "Expected latch predecessor to be the early exiting block");
1577 
1578   // TODO: Handle loops that may fault.
1579   Predicates.clear();
1580   if (!isDereferenceableReadOnlyLoop(TheLoop, PSE.getSE(), DT, AC,
1581                                      &Predicates)) {
1582     reportVectorizationFailure(
1583         "Loop may fault",
1584         "Cannot vectorize potentially faulting early exit loop",
1585         "PotentiallyFaultingEarlyExitLoop", ORE, TheLoop);
1586     return false;
1587   }
1588 
1589   [[maybe_unused]] const SCEV *SymbolicMaxBTC =
1590       PSE.getSymbolicMaxBackedgeTakenCount();
1591   // Since we have an exact exit count for the latch and the early exit
1592   // dominates the latch, then this should guarantee a computed SCEV value.
1593   assert(!isa<SCEVCouldNotCompute>(SymbolicMaxBTC) &&
1594          "Failed to get symbolic expression for backedge taken count");
1595   LLVM_DEBUG(dbgs() << "LV: Found an early exit loop with symbolic max "
1596                        "backedge taken count: "
1597                     << *SymbolicMaxBTC << '\n');
1598   return true;
1599 }
1600 
1601 bool LoopVectorizationLegality::canVectorize(bool UseVPlanNativePath) {
1602   // Store the result and return it at the end instead of exiting early, in case
1603   // allowExtraAnalysis is used to report multiple reasons for not vectorizing.
1604   bool Result = true;
1605 
1606   bool DoExtraAnalysis = ORE->allowExtraAnalysis(DEBUG_TYPE);
1607   // Check whether the loop-related control flow in the loop nest is expected by
1608   // vectorizer.
1609   if (!canVectorizeLoopNestCFG(TheLoop, UseVPlanNativePath)) {
1610     if (DoExtraAnalysis) {
1611       LLVM_DEBUG(dbgs() << "LV: legality check failed: loop nest");
1612       Result = false;
1613     } else {
1614       return false;
1615     }
1616   }
1617 
1618   // We need to have a loop header.
1619   LLVM_DEBUG(dbgs() << "LV: Found a loop: " << TheLoop->getHeader()->getName()
1620                     << '\n');
1621 
1622   // Specific checks for outer loops. We skip the remaining legal checks at this
1623   // point because they don't support outer loops.
1624   if (!TheLoop->isInnermost()) {
1625     assert(UseVPlanNativePath && "VPlan-native path is not enabled.");
1626 
1627     if (!canVectorizeOuterLoop()) {
1628       reportVectorizationFailure("Unsupported outer loop",
1629                                  "unsupported outer loop",
1630                                  "UnsupportedOuterLoop",
1631                                  ORE, TheLoop);
1632       // TODO: Implement DoExtraAnalysis when subsequent legal checks support
1633       // outer loops.
1634       return false;
1635     }
1636 
1637     LLVM_DEBUG(dbgs() << "LV: We can vectorize this outer loop!\n");
1638     return Result;
1639   }
1640 
1641   assert(TheLoop->isInnermost() && "Inner loop expected.");
1642   // Check if we can if-convert non-single-bb loops.
1643   unsigned NumBlocks = TheLoop->getNumBlocks();
1644   if (NumBlocks != 1 && !canVectorizeWithIfConvert()) {
1645     LLVM_DEBUG(dbgs() << "LV: Can't if-convert the loop.\n");
1646     if (DoExtraAnalysis)
1647       Result = false;
1648     else
1649       return false;
1650   }
1651 
1652   // Check if we can vectorize the instructions and CFG in this loop.
1653   if (!canVectorizeInstrs()) {
1654     LLVM_DEBUG(dbgs() << "LV: Can't vectorize the instructions or CFG\n");
1655     if (DoExtraAnalysis)
1656       Result = false;
1657     else
1658       return false;
1659   }
1660 
1661   HasUncountableEarlyExit = false;
1662   if (isa<SCEVCouldNotCompute>(PSE.getBackedgeTakenCount())) {
1663     if (!isVectorizableEarlyExitLoop()) {
1664       if (DoExtraAnalysis)
1665         Result = false;
1666       else
1667         return false;
1668     } else
1669       HasUncountableEarlyExit = true;
1670   }
1671 
1672   // Go over each instruction and look at memory deps.
1673   if (!canVectorizeMemory()) {
1674     LLVM_DEBUG(dbgs() << "LV: Can't vectorize due to memory conflicts\n");
1675     if (DoExtraAnalysis)
1676       Result = false;
1677     else
1678       return false;
1679   }
1680 
1681   if (Result) {
1682     LLVM_DEBUG(dbgs() << "LV: We can vectorize this loop"
1683                       << (LAI->getRuntimePointerChecking()->Need
1684                               ? " (with a runtime bound check)"
1685                               : "")
1686                       << "!\n");
1687   }
1688 
1689   unsigned SCEVThreshold = VectorizeSCEVCheckThreshold;
1690   if (Hints->getForce() == LoopVectorizeHints::FK_Enabled)
1691     SCEVThreshold = PragmaVectorizeSCEVCheckThreshold;
1692 
1693   if (PSE.getPredicate().getComplexity() > SCEVThreshold) {
1694     LLVM_DEBUG(dbgs() << "LV: Vectorization not profitable "
1695                          "due to SCEVThreshold");
1696     reportVectorizationFailure("Too many SCEV checks needed",
1697         "Too many SCEV assumptions need to be made and checked at runtime",
1698         "TooManySCEVRunTimeChecks", ORE, TheLoop);
1699     if (DoExtraAnalysis)
1700       Result = false;
1701     else
1702       return false;
1703   }
1704 
1705   // Okay! We've done all the tests. If any have failed, return false. Otherwise
1706   // we can vectorize, and at this point we don't have any other mem analysis
1707   // which may limit our maximum vectorization factor, so just return true with
1708   // no restrictions.
1709   return Result;
1710 }
1711 
1712 bool LoopVectorizationLegality::canFoldTailByMasking() const {
1713 
1714   LLVM_DEBUG(dbgs() << "LV: checking if tail can be folded by masking.\n");
1715 
1716   SmallPtrSet<const Value *, 8> ReductionLiveOuts;
1717 
1718   for (const auto &Reduction : getReductionVars())
1719     ReductionLiveOuts.insert(Reduction.second.getLoopExitInstr());
1720 
1721   // TODO: handle non-reduction outside users when tail is folded by masking.
1722   for (auto *AE : AllowedExit) {
1723     // Check that all users of allowed exit values are inside the loop or
1724     // are the live-out of a reduction.
1725     if (ReductionLiveOuts.count(AE))
1726       continue;
1727     for (User *U : AE->users()) {
1728       Instruction *UI = cast<Instruction>(U);
1729       if (TheLoop->contains(UI))
1730         continue;
1731       LLVM_DEBUG(
1732           dbgs()
1733           << "LV: Cannot fold tail by masking, loop has an outside user for "
1734           << *UI << "\n");
1735       return false;
1736     }
1737   }
1738 
1739   for (const auto &Entry : getInductionVars()) {
1740     PHINode *OrigPhi = Entry.first;
1741     for (User *U : OrigPhi->users()) {
1742       auto *UI = cast<Instruction>(U);
1743       if (!TheLoop->contains(UI)) {
1744         LLVM_DEBUG(dbgs() << "LV: Cannot fold tail by masking, loop IV has an "
1745                              "outside user for "
1746                           << *UI << "\n");
1747         return false;
1748       }
1749     }
1750   }
1751 
1752   // The list of pointers that we can safely read and write to remains empty.
1753   SmallPtrSet<Value *, 8> SafePointers;
1754 
1755   // Check all blocks for predication, including those that ordinarily do not
1756   // need predication such as the header block.
1757   SmallPtrSet<const Instruction *, 8> TmpMaskedOp;
1758   for (BasicBlock *BB : TheLoop->blocks()) {
1759     if (!blockCanBePredicated(BB, SafePointers, TmpMaskedOp)) {
1760       LLVM_DEBUG(dbgs() << "LV: Cannot fold tail by masking.\n");
1761       return false;
1762     }
1763   }
1764 
1765   LLVM_DEBUG(dbgs() << "LV: can fold tail by masking.\n");
1766 
1767   return true;
1768 }
1769 
1770 void LoopVectorizationLegality::prepareToFoldTailByMasking() {
1771   // The list of pointers that we can safely read and write to remains empty.
1772   SmallPtrSet<Value *, 8> SafePointers;
1773 
1774   // Mark all blocks for predication, including those that ordinarily do not
1775   // need predication such as the header block.
1776   for (BasicBlock *BB : TheLoop->blocks()) {
1777     [[maybe_unused]] bool R = blockCanBePredicated(BB, SafePointers, MaskedOp);
1778     assert(R && "Must be able to predicate block when tail-folding.");
1779   }
1780 }
1781 
1782 } // namespace llvm
1783