xref: /llvm-project/llvm/include/llvm/Transforms/Vectorize/LoopVectorizationLegality.h (revision 4a2ebd6661cf1c929c280e863e4299164800413e)
1 //===- llvm/Transforms/Vectorize/LoopVectorizationLegality.h ----*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 /// \file
10 /// This file defines the LoopVectorizationLegality class. Original code
11 /// in Loop Vectorizer has been moved out to its own file for modularity
12 /// and reusability.
13 ///
14 /// Currently, it works for innermost loop vectorization. Extending this to
15 /// outer loop vectorization is a TODO item.
16 ///
17 /// Also provides:
18 /// 1) LoopVectorizeHints class which keeps a number of loop annotations
19 /// locally for easy look up. It has the ability to write them back as
20 /// loop metadata, upon request.
21 /// 2) LoopVectorizationRequirements class for lazy bail out for the purpose
22 /// of reporting useful failure to vectorize message.
23 //
24 //===----------------------------------------------------------------------===//
25 
26 #ifndef LLVM_TRANSFORMS_VECTORIZE_LOOPVECTORIZATIONLEGALITY_H
27 #define LLVM_TRANSFORMS_VECTORIZE_LOOPVECTORIZATIONLEGALITY_H
28 
29 #include "llvm/ADT/MapVector.h"
30 #include "llvm/Analysis/LoopAccessAnalysis.h"
31 #include "llvm/Support/TypeSize.h"
32 #include "llvm/Transforms/Utils/LoopUtils.h"
33 
34 namespace llvm {
35 class AssumptionCache;
36 class BasicBlock;
37 class BlockFrequencyInfo;
38 class DemandedBits;
39 class DominatorTree;
40 class Function;
41 class Loop;
42 class LoopInfo;
43 class Metadata;
44 class OptimizationRemarkEmitter;
45 class PredicatedScalarEvolution;
46 class ProfileSummaryInfo;
47 class TargetLibraryInfo;
48 class TargetTransformInfo;
49 class Type;
50 
51 /// Utility class for getting and setting loop vectorizer hints in the form
52 /// of loop metadata.
53 /// This class keeps a number of loop annotations locally (as member variables)
54 /// and can, upon request, write them back as metadata on the loop. It will
55 /// initially scan the loop for existing metadata, and will update the local
56 /// values based on information in the loop.
57 /// We cannot write all values to metadata, as the mere presence of some info,
58 /// for example 'force', means a decision has been made. So, we need to be
59 /// careful NOT to add them if the user hasn't specifically asked so.
60 class LoopVectorizeHints {
61   enum HintKind {
62     HK_WIDTH,
63     HK_INTERLEAVE,
64     HK_FORCE,
65     HK_ISVECTORIZED,
66     HK_PREDICATE,
67     HK_SCALABLE
68   };
69 
70   /// Hint - associates name and validation with the hint value.
71   struct Hint {
72     const char *Name;
73     unsigned Value; // This may have to change for non-numeric values.
74     HintKind Kind;
75 
76     Hint(const char *Name, unsigned Value, HintKind Kind)
77         : Name(Name), Value(Value), Kind(Kind) {}
78 
79     bool validate(unsigned Val);
80   };
81 
82   /// Vectorization width.
83   Hint Width;
84 
85   /// Vectorization interleave factor.
86   Hint Interleave;
87 
88   /// Vectorization forced
89   Hint Force;
90 
91   /// Already Vectorized
92   Hint IsVectorized;
93 
94   /// Vector Predicate
95   Hint Predicate;
96 
97   /// Says whether we should use fixed width or scalable vectorization.
98   Hint Scalable;
99 
100   /// Return the loop metadata prefix.
101   static StringRef Prefix() { return "llvm.loop."; }
102 
103   /// True if there is any unsafe math in the loop.
104   bool PotentiallyUnsafe = false;
105 
106 public:
107   enum ForceKind {
108     FK_Undefined = -1, ///< Not selected.
109     FK_Disabled = 0,   ///< Forcing disabled.
110     FK_Enabled = 1,    ///< Forcing enabled.
111   };
112 
113   enum ScalableForceKind {
114     /// Not selected.
115     SK_Unspecified = -1,
116     /// Disables vectorization with scalable vectors.
117     SK_FixedWidthOnly = 0,
118     /// Vectorize loops using scalable vectors or fixed-width vectors, but favor
119     /// scalable vectors when the cost-model is inconclusive. This is the
120     /// default when the scalable.enable hint is enabled through a pragma.
121     SK_PreferScalable = 1
122   };
123 
124   LoopVectorizeHints(const Loop *L, bool InterleaveOnlyWhenForced,
125                      OptimizationRemarkEmitter &ORE,
126                      const TargetTransformInfo *TTI = nullptr);
127 
128   /// Mark the loop L as already vectorized by setting the width to 1.
129   void setAlreadyVectorized();
130 
131   bool allowVectorization(Function *F, Loop *L,
132                           bool VectorizeOnlyWhenForced) const;
133 
134   /// Dumps all the hint information.
135   void emitRemarkWithHints() const;
136 
137   ElementCount getWidth() const {
138     return ElementCount::get(Width.Value, (ScalableForceKind)Scalable.Value ==
139                                               SK_PreferScalable);
140   }
141 
142   unsigned getInterleave() const {
143     if (Interleave.Value)
144       return Interleave.Value;
145     // If interleaving is not explicitly set, assume that if we do not want
146     // unrolling, we also don't want any interleaving.
147     if (llvm::hasUnrollTransformation(TheLoop) & TM_Disable)
148       return 1;
149     return 0;
150   }
151   unsigned getIsVectorized() const { return IsVectorized.Value; }
152   unsigned getPredicate() const { return Predicate.Value; }
153   enum ForceKind getForce() const {
154     if ((ForceKind)Force.Value == FK_Undefined &&
155         hasDisableAllTransformsHint(TheLoop))
156       return FK_Disabled;
157     return (ForceKind)Force.Value;
158   }
159 
160   /// \return true if scalable vectorization has been explicitly disabled.
161   bool isScalableVectorizationDisabled() const {
162     return (ScalableForceKind)Scalable.Value == SK_FixedWidthOnly;
163   }
164 
165   /// If hints are provided that force vectorization, use the AlwaysPrint
166   /// pass name to force the frontend to print the diagnostic.
167   const char *vectorizeAnalysisPassName() const;
168 
169   /// When enabling loop hints are provided we allow the vectorizer to change
170   /// the order of operations that is given by the scalar loop. This is not
171   /// enabled by default because can be unsafe or inefficient. For example,
172   /// reordering floating-point operations will change the way round-off
173   /// error accumulates in the loop.
174   bool allowReordering() const;
175 
176   bool isPotentiallyUnsafe() const {
177     // Avoid FP vectorization if the target is unsure about proper support.
178     // This may be related to the SIMD unit in the target not handling
179     // IEEE 754 FP ops properly, or bad single-to-double promotions.
180     // Otherwise, a sequence of vectorized loops, even without reduction,
181     // could lead to different end results on the destination vectors.
182     return getForce() != LoopVectorizeHints::FK_Enabled && PotentiallyUnsafe;
183   }
184 
185   void setPotentiallyUnsafe() { PotentiallyUnsafe = true; }
186 
187 private:
188   /// Find hints specified in the loop metadata and update local values.
189   void getHintsFromMetadata();
190 
191   /// Checks string hint with one operand and set value if valid.
192   void setHint(StringRef Name, Metadata *Arg);
193 
194   /// The loop these hints belong to.
195   const Loop *TheLoop;
196 
197   /// Interface to emit optimization remarks.
198   OptimizationRemarkEmitter &ORE;
199 };
200 
201 /// This holds vectorization requirements that must be verified late in
202 /// the process. The requirements are set by legalize and costmodel. Once
203 /// vectorization has been determined to be possible and profitable the
204 /// requirements can be verified by looking for metadata or compiler options.
205 /// For example, some loops require FP commutativity which is only allowed if
206 /// vectorization is explicitly specified or if the fast-math compiler option
207 /// has been provided.
208 /// Late evaluation of these requirements allows helpful diagnostics to be
209 /// composed that tells the user what need to be done to vectorize the loop. For
210 /// example, by specifying #pragma clang loop vectorize or -ffast-math. Late
211 /// evaluation should be used only when diagnostics can generated that can be
212 /// followed by a non-expert user.
213 class LoopVectorizationRequirements {
214 public:
215   /// Track the 1st floating-point instruction that can not be reassociated.
216   void addExactFPMathInst(Instruction *I) {
217     if (I && !ExactFPMathInst)
218       ExactFPMathInst = I;
219   }
220 
221   Instruction *getExactFPInst() { return ExactFPMathInst; }
222 
223 private:
224   Instruction *ExactFPMathInst = nullptr;
225 };
226 
227 /// This holds details about a histogram operation -- a load -> update -> store
228 /// sequence where each lane in a vector might be updating the same element as
229 /// another lane.
230 struct HistogramInfo {
231   LoadInst *Load;
232   Instruction *Update;
233   StoreInst *Store;
234 
235   HistogramInfo(LoadInst *Load, Instruction *Update, StoreInst *Store)
236       : Load(Load), Update(Update), Store(Store) {}
237 };
238 
239 /// LoopVectorizationLegality checks if it is legal to vectorize a loop, and
240 /// to what vectorization factor.
241 /// This class does not look at the profitability of vectorization, only the
242 /// legality. This class has two main kinds of checks:
243 /// * Memory checks - The code in canVectorizeMemory checks if vectorization
244 ///   will change the order of memory accesses in a way that will change the
245 ///   correctness of the program.
246 /// * Scalars checks - The code in canVectorizeInstrs and canVectorizeMemory
247 /// checks for a number of different conditions, such as the availability of a
248 /// single induction variable, that all types are supported and vectorize-able,
249 /// etc. This code reflects the capabilities of InnerLoopVectorizer.
250 /// This class is also used by InnerLoopVectorizer for identifying
251 /// induction variable and the different reduction variables.
252 class LoopVectorizationLegality {
253 public:
254   LoopVectorizationLegality(
255       Loop *L, PredicatedScalarEvolution &PSE, DominatorTree *DT,
256       TargetTransformInfo *TTI, TargetLibraryInfo *TLI, Function *F,
257       LoopAccessInfoManager &LAIs, LoopInfo *LI, OptimizationRemarkEmitter *ORE,
258       LoopVectorizationRequirements *R, LoopVectorizeHints *H, DemandedBits *DB,
259       AssumptionCache *AC, BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI)
260       : TheLoop(L), LI(LI), PSE(PSE), TTI(TTI), TLI(TLI), DT(DT), LAIs(LAIs),
261         ORE(ORE), Requirements(R), Hints(H), DB(DB), AC(AC), BFI(BFI),
262         PSI(PSI) {}
263 
264   /// ReductionList contains the reduction descriptors for all
265   /// of the reductions that were found in the loop.
266   using ReductionList = MapVector<PHINode *, RecurrenceDescriptor>;
267 
268   /// InductionList saves induction variables and maps them to the
269   /// induction descriptor.
270   using InductionList = MapVector<PHINode *, InductionDescriptor>;
271 
272   /// RecurrenceSet contains the phi nodes that are recurrences other than
273   /// inductions and reductions.
274   using RecurrenceSet = SmallPtrSet<const PHINode *, 8>;
275 
276   /// Returns true if it is legal to vectorize this loop.
277   /// This does not mean that it is profitable to vectorize this
278   /// loop, only that it is legal to do so.
279   /// Temporarily taking UseVPlanNativePath parameter. If true, take
280   /// the new code path being implemented for outer loop vectorization
281   /// (should be functional for inner loop vectorization) based on VPlan.
282   /// If false, good old LV code.
283   bool canVectorize(bool UseVPlanNativePath);
284 
285   /// Returns true if it is legal to vectorize the FP math operations in this
286   /// loop. Vectorizing is legal if we allow reordering of FP operations, or if
287   /// we can use in-order reductions.
288   bool canVectorizeFPMath(bool EnableStrictReductions);
289 
290   /// Return true if we can vectorize this loop while folding its tail by
291   /// masking.
292   bool canFoldTailByMasking() const;
293 
294   /// Mark all respective loads/stores for masking. Must only be called when
295   /// tail-folding is possible.
296   void prepareToFoldTailByMasking();
297 
298   /// Returns the primary induction variable.
299   PHINode *getPrimaryInduction() { return PrimaryInduction; }
300 
301   /// Returns the reduction variables found in the loop.
302   const ReductionList &getReductionVars() const { return Reductions; }
303 
304   /// Returns the induction variables found in the loop.
305   const InductionList &getInductionVars() const { return Inductions; }
306 
307   /// Return the fixed-order recurrences found in the loop.
308   RecurrenceSet &getFixedOrderRecurrences() { return FixedOrderRecurrences; }
309 
310   /// Returns the widest induction type.
311   Type *getWidestInductionType() { return WidestIndTy; }
312 
313   /// Returns True if given store is a final invariant store of one of the
314   /// reductions found in the loop.
315   bool isInvariantStoreOfReduction(StoreInst *SI);
316 
317   /// Returns True if given address is invariant and is used to store recurrent
318   /// expression
319   bool isInvariantAddressOfReduction(Value *V);
320 
321   /// Returns True if V is a Phi node of an induction variable in this loop.
322   bool isInductionPhi(const Value *V) const;
323 
324   /// Returns a pointer to the induction descriptor, if \p Phi is an integer or
325   /// floating point induction.
326   const InductionDescriptor *getIntOrFpInductionDescriptor(PHINode *Phi) const;
327 
328   /// Returns a pointer to the induction descriptor, if \p Phi is pointer
329   /// induction.
330   const InductionDescriptor *getPointerInductionDescriptor(PHINode *Phi) const;
331 
332   /// Returns True if V is a cast that is part of an induction def-use chain,
333   /// and had been proven to be redundant under a runtime guard (in other
334   /// words, the cast has the same SCEV expression as the induction phi).
335   bool isCastedInductionVariable(const Value *V) const;
336 
337   /// Returns True if V can be considered as an induction variable in this
338   /// loop. V can be the induction phi, or some redundant cast in the def-use
339   /// chain of the inducion phi.
340   bool isInductionVariable(const Value *V) const;
341 
342   /// Returns True if PN is a reduction variable in this loop.
343   bool isReductionVariable(PHINode *PN) const { return Reductions.count(PN); }
344 
345   /// Returns True if Phi is a fixed-order recurrence in this loop.
346   bool isFixedOrderRecurrence(const PHINode *Phi) const;
347 
348   /// Return true if the block BB needs to be predicated in order for the loop
349   /// to be vectorized.
350   bool blockNeedsPredication(BasicBlock *BB) const;
351 
352   /// Check if this pointer is consecutive when vectorizing. This happens
353   /// when the last index of the GEP is the induction variable, or that the
354   /// pointer itself is an induction variable.
355   /// This check allows us to vectorize A[idx] into a wide load/store.
356   /// Returns:
357   /// 0 - Stride is unknown or non-consecutive.
358   /// 1 - Address is consecutive.
359   /// -1 - Address is consecutive, and decreasing.
360   /// NOTE: This method must only be used before modifying the original scalar
361   /// loop. Do not use after invoking 'createVectorizedLoopSkeleton' (PR34965).
362   int isConsecutivePtr(Type *AccessTy, Value *Ptr) const;
363 
364   /// Returns true if \p V is invariant across all loop iterations according to
365   /// SCEV.
366   bool isInvariant(Value *V) const;
367 
368   /// Returns true if value V is uniform across \p VF lanes, when \p VF is
369   /// provided, and otherwise if \p V is invariant across all loop iterations.
370   bool isUniform(Value *V, ElementCount VF) const;
371 
372   /// A uniform memory op is a load or store which accesses the same memory
373   /// location on all \p VF lanes, if \p VF is provided and otherwise if the
374   /// memory location is invariant.
375   bool isUniformMemOp(Instruction &I, ElementCount VF) const;
376 
377   /// Returns the information that we collected about runtime memory check.
378   const RuntimePointerChecking *getRuntimePointerChecking() const {
379     return LAI->getRuntimePointerChecking();
380   }
381 
382   const LoopAccessInfo *getLAI() const { return LAI; }
383 
384   bool isSafeForAnyVectorWidth() const {
385     return LAI->getDepChecker().isSafeForAnyVectorWidth();
386   }
387 
388   uint64_t getMaxSafeVectorWidthInBits() const {
389     return LAI->getDepChecker().getMaxSafeVectorWidthInBits();
390   }
391 
392   /// Returns true if the loop has exactly one uncountable early exit, i.e. an
393   /// uncountable exit that isn't the latch block.
394   bool hasUncountableEarlyExit() const {
395     return getUncountableEdge().has_value();
396   }
397 
398   /// Returns the uncountable early exiting block, if there is exactly one.
399   BasicBlock *getUncountableEarlyExitingBlock() const {
400     return hasUncountableEarlyExit() ? getUncountableEdge()->first : nullptr;
401   }
402 
403   /// Returns the destination of the uncountable early exiting block, if there
404   /// is exactly one.
405   BasicBlock *getUncountableEarlyExitBlock() const {
406     return hasUncountableEarlyExit() ? getUncountableEdge()->second : nullptr;
407   }
408 
409   /// Returns true if vector representation of the instruction \p I
410   /// requires mask.
411   bool isMaskRequired(const Instruction *I) const {
412     return MaskedOp.contains(I);
413   }
414 
415   /// Returns true if there is at least one function call in the loop which
416   /// has a vectorized variant available.
417   bool hasVectorCallVariants() const { return VecCallVariantsFound; }
418 
419   /// Returns true if there is at least one function call in the loop which
420   /// returns a struct type and needs to be vectorized.
421   bool hasStructVectorCall() const { return StructVecCallFound; }
422 
423   unsigned getNumStores() const { return LAI->getNumStores(); }
424   unsigned getNumLoads() const { return LAI->getNumLoads(); }
425 
426   /// Returns a HistogramInfo* for the given instruction if it was determined
427   /// to be part of a load -> update -> store sequence where multiple lanes
428   /// may be working on the same memory address.
429   std::optional<const HistogramInfo *> getHistogramInfo(Instruction *I) const {
430     for (const HistogramInfo &HGram : Histograms)
431       if (HGram.Load == I || HGram.Update == I || HGram.Store == I)
432         return &HGram;
433 
434     return std::nullopt;
435   }
436 
437   /// Returns a list of all known histogram operations in the loop.
438   bool hasHistograms() const { return !Histograms.empty(); }
439 
440   PredicatedScalarEvolution *getPredicatedScalarEvolution() const {
441     return &PSE;
442   }
443 
444   Loop *getLoop() const { return TheLoop; }
445 
446   LoopInfo *getLoopInfo() const { return LI; }
447 
448   AssumptionCache *getAssumptionCache() const { return AC; }
449 
450   ScalarEvolution *getScalarEvolution() const { return PSE.getSE(); }
451 
452   DominatorTree *getDominatorTree() const { return DT; }
453 
454   /// Returns all exiting blocks with a countable exit, i.e. the
455   /// exit-not-taken count is known exactly at compile time.
456   const SmallVector<BasicBlock *, 4> &getCountableExitingBlocks() const {
457     return CountableExitingBlocks;
458   }
459 
460   /// Returns the loop edge to an uncountable exit, or std::nullopt if there
461   /// isn't a single such edge.
462   std::optional<std::pair<BasicBlock *, BasicBlock *>>
463   getUncountableEdge() const {
464     return UncountableEdge;
465   }
466 
467 private:
468   /// Return true if the pre-header, exiting and latch blocks of \p Lp and all
469   /// its nested loops are considered legal for vectorization. These legal
470   /// checks are common for inner and outer loop vectorization.
471   /// Temporarily taking UseVPlanNativePath parameter. If true, take
472   /// the new code path being implemented for outer loop vectorization
473   /// (should be functional for inner loop vectorization) based on VPlan.
474   /// If false, good old LV code.
475   bool canVectorizeLoopNestCFG(Loop *Lp, bool UseVPlanNativePath);
476 
477   /// Set up outer loop inductions by checking Phis in outer loop header for
478   /// supported inductions (int inductions). Return false if any of these Phis
479   /// is not a supported induction or if we fail to find an induction.
480   bool setupOuterLoopInductions();
481 
482   /// Return true if the pre-header, exiting and latch blocks of \p Lp
483   /// (non-recursive) are considered legal for vectorization.
484   /// Temporarily taking UseVPlanNativePath parameter. If true, take
485   /// the new code path being implemented for outer loop vectorization
486   /// (should be functional for inner loop vectorization) based on VPlan.
487   /// If false, good old LV code.
488   bool canVectorizeLoopCFG(Loop *Lp, bool UseVPlanNativePath);
489 
490   /// Check if a single basic block loop is vectorizable.
491   /// At this point we know that this is a loop with a constant trip count
492   /// and we only need to check individual instructions.
493   bool canVectorizeInstrs();
494 
495   /// When we vectorize loops we may change the order in which
496   /// we read and write from memory. This method checks if it is
497   /// legal to vectorize the code, considering only memory constrains.
498   /// Returns true if the loop is vectorizable
499   bool canVectorizeMemory();
500 
501   /// If LAA cannot determine whether all dependences are safe, we may be able
502   /// to further analyse some IndirectUnsafe dependences and if they match a
503   /// certain pattern (like a histogram) then we may still be able to vectorize.
504   bool canVectorizeIndirectUnsafeDependences();
505 
506   /// Return true if we can vectorize this loop using the IF-conversion
507   /// transformation.
508   bool canVectorizeWithIfConvert();
509 
510   /// Return true if we can vectorize this outer loop. The method performs
511   /// specific checks for outer loop vectorization.
512   bool canVectorizeOuterLoop();
513 
514   /// Returns true if this is an early exit loop that can be vectorized.
515   /// Currently, a loop with an uncountable early exit is considered
516   /// vectorizable if:
517   ///   1. There are no writes to memory in the loop.
518   ///   2. The loop has only one early uncountable exit
519   ///   3. The early exit block dominates the latch block.
520   ///   4. The latch block has an exact exit count.
521   ///   5. The loop does not contain reductions or recurrences.
522   ///   6. We can prove at compile-time that loops will not contain faulting
523   ///   loads.
524   ///   7. It is safe to speculatively execute instructions such as divide or
525   ///   call instructions.
526   /// The list above is not based on theoretical limitations of vectorization,
527   /// but simply a statement that more work is needed to support these
528   /// additional cases safely.
529   bool isVectorizableEarlyExitLoop();
530 
531   /// Return true if all of the instructions in the block can be speculatively
532   /// executed, and record the loads/stores that require masking.
533   /// \p SafePtrs is a list of addresses that are known to be legal and we know
534   /// that we can read from them without segfault.
535   /// \p MaskedOp is a list of instructions that have to be transformed into
536   /// calls to the appropriate masked intrinsic when the loop is vectorized
537   /// or dropped if the instruction is a conditional assume intrinsic.
538   bool
539   blockCanBePredicated(BasicBlock *BB, SmallPtrSetImpl<Value *> &SafePtrs,
540                        SmallPtrSetImpl<const Instruction *> &MaskedOp) const;
541 
542   /// Updates the vectorization state by adding \p Phi to the inductions list.
543   /// This can set \p Phi as the main induction of the loop if \p Phi is a
544   /// better choice for the main induction than the existing one.
545   void addInductionPhi(PHINode *Phi, const InductionDescriptor &ID,
546                        SmallPtrSetImpl<Value *> &AllowedExit);
547 
548   /// The loop that we evaluate.
549   Loop *TheLoop;
550 
551   /// Loop Info analysis.
552   LoopInfo *LI;
553 
554   /// A wrapper around ScalarEvolution used to add runtime SCEV checks.
555   /// Applies dynamic knowledge to simplify SCEV expressions in the context
556   /// of existing SCEV assumptions. The analysis will also add a minimal set
557   /// of new predicates if this is required to enable vectorization and
558   /// unrolling.
559   PredicatedScalarEvolution &PSE;
560 
561   /// Target Transform Info.
562   TargetTransformInfo *TTI;
563 
564   /// Target Library Info.
565   TargetLibraryInfo *TLI;
566 
567   /// Dominator Tree.
568   DominatorTree *DT;
569 
570   // LoopAccess analysis.
571   LoopAccessInfoManager &LAIs;
572 
573   const LoopAccessInfo *LAI = nullptr;
574 
575   /// Interface to emit optimization remarks.
576   OptimizationRemarkEmitter *ORE;
577 
578   //  ---  vectorization state --- //
579 
580   /// Holds the primary induction variable. This is the counter of the
581   /// loop.
582   PHINode *PrimaryInduction = nullptr;
583 
584   /// Holds the reduction variables.
585   ReductionList Reductions;
586 
587   /// Holds all of the induction variables that we found in the loop.
588   /// Notice that inductions don't need to start at zero and that induction
589   /// variables can be pointers.
590   InductionList Inductions;
591 
592   /// Holds all the casts that participate in the update chain of the induction
593   /// variables, and that have been proven to be redundant (possibly under a
594   /// runtime guard). These casts can be ignored when creating the vectorized
595   /// loop body.
596   SmallPtrSet<Instruction *, 4> InductionCastsToIgnore;
597 
598   /// Holds the phi nodes that are fixed-order recurrences.
599   RecurrenceSet FixedOrderRecurrences;
600 
601   /// Holds the widest induction type encountered.
602   Type *WidestIndTy = nullptr;
603 
604   /// Allowed outside users. This holds the variables that can be accessed from
605   /// outside the loop.
606   SmallPtrSet<Value *, 4> AllowedExit;
607 
608   /// Vectorization requirements that will go through late-evaluation.
609   LoopVectorizationRequirements *Requirements;
610 
611   /// Used to emit an analysis of any legality issues.
612   LoopVectorizeHints *Hints;
613 
614   /// The demanded bits analysis is used to compute the minimum type size in
615   /// which a reduction can be computed.
616   DemandedBits *DB;
617 
618   /// The assumption cache analysis is used to compute the minimum type size in
619   /// which a reduction can be computed.
620   AssumptionCache *AC;
621 
622   /// While vectorizing these instructions we have to generate a
623   /// call to the appropriate masked intrinsic or drop them in case of
624   /// conditional assumes.
625   SmallPtrSet<const Instruction *, 8> MaskedOp;
626 
627   /// Contains all identified histogram operations, which are sequences of
628   /// load -> update -> store instructions where multiple lanes in a vector
629   /// may work on the same memory location.
630   SmallVector<HistogramInfo, 1> Histograms;
631 
632   /// BFI and PSI are used to check for profile guided size optimizations.
633   BlockFrequencyInfo *BFI;
634   ProfileSummaryInfo *PSI;
635 
636   /// If we discover function calls within the loop which have a valid
637   /// vectorized variant, record that fact so that LoopVectorize can
638   /// (potentially) make a better decision on the maximum VF and enable
639   /// the use of those function variants.
640   bool VecCallVariantsFound = false;
641 
642   /// If we find a call (to be vectorized) that returns a struct type, record
643   /// that so we can bail out until this is supported.
644   /// TODO: Remove this flag once vectorizing calls with struct returns is
645   /// supported.
646   bool StructVecCallFound = false;
647 
648   /// Keep track of all the countable and uncountable exiting blocks if
649   /// the exact backedge taken count is not computable.
650   SmallVector<BasicBlock *, 4> CountableExitingBlocks;
651 
652   /// Keep track of the loop edge to an uncountable exit, comprising a pair
653   /// of (Exiting, Exit) blocks, if there is exactly one early exit.
654   std::optional<std::pair<BasicBlock *, BasicBlock *>> UncountableEdge;
655 };
656 
657 } // namespace llvm
658 
659 #endif // LLVM_TRANSFORMS_VECTORIZE_LOOPVECTORIZATIONLEGALITY_H
660