xref: /freebsd-src/contrib/llvm-project/llvm/lib/Transforms/Vectorize/VPlan.h (revision 4824e7fd18a1223177218d4aec1b3c6c5c4a444e)
1 //===- VPlan.h - Represent A Vectorizer Plan --------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 /// \file
10 /// This file contains the declarations of the Vectorization Plan base classes:
11 /// 1. VPBasicBlock and VPRegionBlock that inherit from a common pure virtual
12 ///    VPBlockBase, together implementing a Hierarchical CFG;
13 /// 2. Specializations of GraphTraits that allow VPBlockBase graphs to be
14 ///    treated as proper graphs for generic algorithms;
15 /// 3. Pure virtual VPRecipeBase serving as the base class for recipes contained
16 ///    within VPBasicBlocks;
17 /// 4. VPInstruction, a concrete Recipe and VPUser modeling a single planned
18 ///    instruction;
19 /// 5. The VPlan class holding a candidate for vectorization;
20 /// 6. The VPlanPrinter class providing a way to print a plan in dot format;
21 /// These are documented in docs/VectorizationPlan.rst.
22 //
23 //===----------------------------------------------------------------------===//
24 
25 #ifndef LLVM_TRANSFORMS_VECTORIZE_VPLAN_H
26 #define LLVM_TRANSFORMS_VECTORIZE_VPLAN_H
27 
28 #include "VPlanLoopInfo.h"
29 #include "VPlanValue.h"
30 #include "llvm/ADT/DenseMap.h"
31 #include "llvm/ADT/DepthFirstIterator.h"
32 #include "llvm/ADT/GraphTraits.h"
33 #include "llvm/ADT/Optional.h"
34 #include "llvm/ADT/SmallBitVector.h"
35 #include "llvm/ADT/SmallPtrSet.h"
36 #include "llvm/ADT/SmallSet.h"
37 #include "llvm/ADT/SmallVector.h"
38 #include "llvm/ADT/Twine.h"
39 #include "llvm/ADT/ilist.h"
40 #include "llvm/ADT/ilist_node.h"
41 #include "llvm/Analysis/VectorUtils.h"
42 #include "llvm/IR/IRBuilder.h"
43 #include "llvm/Support/InstructionCost.h"
44 #include <algorithm>
45 #include <cassert>
46 #include <cstddef>
47 #include <map>
48 #include <string>
49 
50 namespace llvm {
51 
52 class BasicBlock;
53 class DominatorTree;
54 class InnerLoopVectorizer;
55 class LoopInfo;
56 class raw_ostream;
57 class RecurrenceDescriptor;
58 class Value;
59 class VPBasicBlock;
60 class VPRegionBlock;
61 class VPlan;
62 class VPReplicateRecipe;
63 class VPlanSlp;
64 
65 /// Returns a calculation for the total number of elements for a given \p VF.
66 /// For fixed width vectors this value is a constant, whereas for scalable
67 /// vectors it is an expression determined at runtime.
68 Value *getRuntimeVF(IRBuilder<> &B, Type *Ty, ElementCount VF);
69 
70 /// A range of powers-of-2 vectorization factors with fixed start and
71 /// adjustable end. The range includes start and excludes end, e.g.,:
72 /// [1, 9) = {1, 2, 4, 8}
73 struct VFRange {
74   // A power of 2.
75   const ElementCount Start;
76 
77   // Need not be a power of 2. If End <= Start range is empty.
78   ElementCount End;
79 
80   bool isEmpty() const {
81     return End.getKnownMinValue() <= Start.getKnownMinValue();
82   }
83 
84   VFRange(const ElementCount &Start, const ElementCount &End)
85       : Start(Start), End(End) {
86     assert(Start.isScalable() == End.isScalable() &&
87            "Both Start and End should have the same scalable flag");
88     assert(isPowerOf2_32(Start.getKnownMinValue()) &&
89            "Expected Start to be a power of 2");
90   }
91 };
92 
93 using VPlanPtr = std::unique_ptr<VPlan>;
94 
95 /// In what follows, the term "input IR" refers to code that is fed into the
96 /// vectorizer whereas the term "output IR" refers to code that is generated by
97 /// the vectorizer.
98 
99 /// VPLane provides a way to access lanes in both fixed width and scalable
100 /// vectors, where for the latter the lane index sometimes needs calculating
101 /// as a runtime expression.
102 class VPLane {
103 public:
104   /// Kind describes how to interpret Lane.
105   enum class Kind : uint8_t {
106     /// For First, Lane is the index into the first N elements of a
107     /// fixed-vector <N x <ElTy>> or a scalable vector <vscale x N x <ElTy>>.
108     First,
109     /// For ScalableLast, Lane is the offset from the start of the last
110     /// N-element subvector in a scalable vector <vscale x N x <ElTy>>. For
111     /// example, a Lane of 0 corresponds to lane `(vscale - 1) * N`, a Lane of
112     /// 1 corresponds to `((vscale - 1) * N) + 1`, etc.
113     ScalableLast
114   };
115 
116 private:
117   /// in [0..VF)
118   unsigned Lane;
119 
120   /// Indicates how the Lane should be interpreted, as described above.
121   Kind LaneKind;
122 
123 public:
124   VPLane(unsigned Lane, Kind LaneKind) : Lane(Lane), LaneKind(LaneKind) {}
125 
126   static VPLane getFirstLane() { return VPLane(0, VPLane::Kind::First); }
127 
128   static VPLane getLastLaneForVF(const ElementCount &VF) {
129     unsigned LaneOffset = VF.getKnownMinValue() - 1;
130     Kind LaneKind;
131     if (VF.isScalable())
132       // In this case 'LaneOffset' refers to the offset from the start of the
133       // last subvector with VF.getKnownMinValue() elements.
134       LaneKind = VPLane::Kind::ScalableLast;
135     else
136       LaneKind = VPLane::Kind::First;
137     return VPLane(LaneOffset, LaneKind);
138   }
139 
140   /// Returns a compile-time known value for the lane index and asserts if the
141   /// lane can only be calculated at runtime.
142   unsigned getKnownLane() const {
143     assert(LaneKind == Kind::First);
144     return Lane;
145   }
146 
147   /// Returns an expression describing the lane index that can be used at
148   /// runtime.
149   Value *getAsRuntimeExpr(IRBuilder<> &Builder, const ElementCount &VF) const;
150 
151   /// Returns the Kind of lane offset.
152   Kind getKind() const { return LaneKind; }
153 
154   /// Returns true if this is the first lane of the whole vector.
155   bool isFirstLane() const { return Lane == 0 && LaneKind == Kind::First; }
156 
157   /// Maps the lane to a cache index based on \p VF.
158   unsigned mapToCacheIndex(const ElementCount &VF) const {
159     switch (LaneKind) {
160     case VPLane::Kind::ScalableLast:
161       assert(VF.isScalable() && Lane < VF.getKnownMinValue());
162       return VF.getKnownMinValue() + Lane;
163     default:
164       assert(Lane < VF.getKnownMinValue());
165       return Lane;
166     }
167   }
168 
169   /// Returns the maxmimum number of lanes that we are able to consider
170   /// caching for \p VF.
171   static unsigned getNumCachedLanes(const ElementCount &VF) {
172     return VF.getKnownMinValue() * (VF.isScalable() ? 2 : 1);
173   }
174 };
175 
176 /// VPIteration represents a single point in the iteration space of the output
177 /// (vectorized and/or unrolled) IR loop.
178 struct VPIteration {
179   /// in [0..UF)
180   unsigned Part;
181 
182   VPLane Lane;
183 
184   VPIteration(unsigned Part, unsigned Lane,
185               VPLane::Kind Kind = VPLane::Kind::First)
186       : Part(Part), Lane(Lane, Kind) {}
187 
188   VPIteration(unsigned Part, const VPLane &Lane) : Part(Part), Lane(Lane) {}
189 
190   bool isFirstIteration() const { return Part == 0 && Lane.isFirstLane(); }
191 };
192 
193 /// VPTransformState holds information passed down when "executing" a VPlan,
194 /// needed for generating the output IR.
195 struct VPTransformState {
196   VPTransformState(ElementCount VF, unsigned UF, LoopInfo *LI,
197                    DominatorTree *DT, IRBuilder<> &Builder,
198                    InnerLoopVectorizer *ILV, VPlan *Plan)
199       : VF(VF), UF(UF), Instance(), LI(LI), DT(DT), Builder(Builder), ILV(ILV),
200         Plan(Plan) {}
201 
202   /// The chosen Vectorization and Unroll Factors of the loop being vectorized.
203   ElementCount VF;
204   unsigned UF;
205 
206   /// Hold the indices to generate specific scalar instructions. Null indicates
207   /// that all instances are to be generated, using either scalar or vector
208   /// instructions.
209   Optional<VPIteration> Instance;
210 
211   struct DataState {
212     /// A type for vectorized values in the new loop. Each value from the
213     /// original loop, when vectorized, is represented by UF vector values in
214     /// the new unrolled loop, where UF is the unroll factor.
215     typedef SmallVector<Value *, 2> PerPartValuesTy;
216 
217     DenseMap<VPValue *, PerPartValuesTy> PerPartOutput;
218 
219     using ScalarsPerPartValuesTy = SmallVector<SmallVector<Value *, 4>, 2>;
220     DenseMap<VPValue *, ScalarsPerPartValuesTy> PerPartScalars;
221   } Data;
222 
223   /// Get the generated Value for a given VPValue and a given Part. Note that
224   /// as some Defs are still created by ILV and managed in its ValueMap, this
225   /// method will delegate the call to ILV in such cases in order to provide
226   /// callers a consistent API.
227   /// \see set.
228   Value *get(VPValue *Def, unsigned Part);
229 
230   /// Get the generated Value for a given VPValue and given Part and Lane.
231   Value *get(VPValue *Def, const VPIteration &Instance);
232 
233   bool hasVectorValue(VPValue *Def, unsigned Part) {
234     auto I = Data.PerPartOutput.find(Def);
235     return I != Data.PerPartOutput.end() && Part < I->second.size() &&
236            I->second[Part];
237   }
238 
239   bool hasAnyVectorValue(VPValue *Def) const {
240     return Data.PerPartOutput.find(Def) != Data.PerPartOutput.end();
241   }
242 
243   bool hasScalarValue(VPValue *Def, VPIteration Instance) {
244     auto I = Data.PerPartScalars.find(Def);
245     if (I == Data.PerPartScalars.end())
246       return false;
247     unsigned CacheIdx = Instance.Lane.mapToCacheIndex(VF);
248     return Instance.Part < I->second.size() &&
249            CacheIdx < I->second[Instance.Part].size() &&
250            I->second[Instance.Part][CacheIdx];
251   }
252 
253   /// Set the generated Value for a given VPValue and a given Part.
254   void set(VPValue *Def, Value *V, unsigned Part) {
255     if (!Data.PerPartOutput.count(Def)) {
256       DataState::PerPartValuesTy Entry(UF);
257       Data.PerPartOutput[Def] = Entry;
258     }
259     Data.PerPartOutput[Def][Part] = V;
260   }
261   /// Reset an existing vector value for \p Def and a given \p Part.
262   void reset(VPValue *Def, Value *V, unsigned Part) {
263     auto Iter = Data.PerPartOutput.find(Def);
264     assert(Iter != Data.PerPartOutput.end() &&
265            "need to overwrite existing value");
266     Iter->second[Part] = V;
267   }
268 
269   /// Set the generated scalar \p V for \p Def and the given \p Instance.
270   void set(VPValue *Def, Value *V, const VPIteration &Instance) {
271     auto Iter = Data.PerPartScalars.insert({Def, {}});
272     auto &PerPartVec = Iter.first->second;
273     while (PerPartVec.size() <= Instance.Part)
274       PerPartVec.emplace_back();
275     auto &Scalars = PerPartVec[Instance.Part];
276     unsigned CacheIdx = Instance.Lane.mapToCacheIndex(VF);
277     while (Scalars.size() <= CacheIdx)
278       Scalars.push_back(nullptr);
279     assert(!Scalars[CacheIdx] && "should overwrite existing value");
280     Scalars[CacheIdx] = V;
281   }
282 
283   /// Reset an existing scalar value for \p Def and a given \p Instance.
284   void reset(VPValue *Def, Value *V, const VPIteration &Instance) {
285     auto Iter = Data.PerPartScalars.find(Def);
286     assert(Iter != Data.PerPartScalars.end() &&
287            "need to overwrite existing value");
288     assert(Instance.Part < Iter->second.size() &&
289            "need to overwrite existing value");
290     unsigned CacheIdx = Instance.Lane.mapToCacheIndex(VF);
291     assert(CacheIdx < Iter->second[Instance.Part].size() &&
292            "need to overwrite existing value");
293     Iter->second[Instance.Part][CacheIdx] = V;
294   }
295 
296   /// Hold state information used when constructing the CFG of the output IR,
297   /// traversing the VPBasicBlocks and generating corresponding IR BasicBlocks.
298   struct CFGState {
299     /// The previous VPBasicBlock visited. Initially set to null.
300     VPBasicBlock *PrevVPBB = nullptr;
301 
302     /// The previous IR BasicBlock created or used. Initially set to the new
303     /// header BasicBlock.
304     BasicBlock *PrevBB = nullptr;
305 
306     /// The last IR BasicBlock in the output IR. Set to the new latch
307     /// BasicBlock, used for placing the newly created BasicBlocks.
308     BasicBlock *LastBB = nullptr;
309 
310     /// The IR BasicBlock that is the preheader of the vector loop in the output
311     /// IR.
312     /// FIXME: The vector preheader should also be modeled in VPlan, so any code
313     /// that needs to be added to the preheader gets directly generated by
314     /// VPlan. There should be no need to manage a pointer to the IR BasicBlock.
315     BasicBlock *VectorPreHeader = nullptr;
316 
317     /// A mapping of each VPBasicBlock to the corresponding BasicBlock. In case
318     /// of replication, maps the BasicBlock of the last replica created.
319     SmallDenseMap<VPBasicBlock *, BasicBlock *> VPBB2IRBB;
320 
321     /// Vector of VPBasicBlocks whose terminator instruction needs to be fixed
322     /// up at the end of vector code generation.
323     SmallVector<VPBasicBlock *, 8> VPBBsToFix;
324 
325     CFGState() = default;
326   } CFG;
327 
328   /// Hold a pointer to LoopInfo to register new basic blocks in the loop.
329   LoopInfo *LI;
330 
331   /// Hold a pointer to Dominator Tree to register new basic blocks in the loop.
332   DominatorTree *DT;
333 
334   /// Hold a reference to the IRBuilder used to generate output IR code.
335   IRBuilder<> &Builder;
336 
337   VPValue2ValueTy VPValue2Value;
338 
339   /// Hold the canonical scalar IV of the vector loop (start=0, step=VF*UF).
340   Value *CanonicalIV = nullptr;
341 
342   /// Hold the trip count of the scalar loop.
343   Value *TripCount = nullptr;
344 
345   /// Hold a pointer to InnerLoopVectorizer to reuse its IR generation methods.
346   InnerLoopVectorizer *ILV;
347 
348   /// Pointer to the VPlan code is generated for.
349   VPlan *Plan;
350 
351   /// Holds recipes that may generate a poison value that is used after
352   /// vectorization, even when their operands are not poison.
353   SmallPtrSet<VPRecipeBase *, 16> MayGeneratePoisonRecipes;
354 };
355 
356 /// VPUsers instance used by VPBlockBase to manage CondBit and the block
357 /// predicate. Currently VPBlockUsers are used in VPBlockBase for historical
358 /// reasons, but in the future the only VPUsers should either be recipes or
359 /// live-outs.VPBlockBase uses.
360 struct VPBlockUser : public VPUser {
361   VPBlockUser() : VPUser({}, VPUserID::Block) {}
362 
363   VPValue *getSingleOperandOrNull() {
364     if (getNumOperands() == 1)
365       return getOperand(0);
366 
367     return nullptr;
368   }
369   const VPValue *getSingleOperandOrNull() const {
370     if (getNumOperands() == 1)
371       return getOperand(0);
372 
373     return nullptr;
374   }
375 
376   void resetSingleOpUser(VPValue *NewVal) {
377     assert(getNumOperands() <= 1 && "Didn't expect more than one operand!");
378     if (!NewVal) {
379       if (getNumOperands() == 1)
380         removeLastOperand();
381       return;
382     }
383 
384     if (getNumOperands() == 1)
385       setOperand(0, NewVal);
386     else
387       addOperand(NewVal);
388   }
389 };
390 
391 /// VPBlockBase is the building block of the Hierarchical Control-Flow Graph.
392 /// A VPBlockBase can be either a VPBasicBlock or a VPRegionBlock.
393 class VPBlockBase {
394   friend class VPBlockUtils;
395 
396   const unsigned char SubclassID; ///< Subclass identifier (for isa/dyn_cast).
397 
398   /// An optional name for the block.
399   std::string Name;
400 
401   /// The immediate VPRegionBlock which this VPBlockBase belongs to, or null if
402   /// it is a topmost VPBlockBase.
403   VPRegionBlock *Parent = nullptr;
404 
405   /// List of predecessor blocks.
406   SmallVector<VPBlockBase *, 1> Predecessors;
407 
408   /// List of successor blocks.
409   SmallVector<VPBlockBase *, 1> Successors;
410 
411   /// Successor selector managed by a VPUser. For blocks with zero or one
412   /// successors, there is no operand. Otherwise there is exactly one operand
413   /// which is the branch condition.
414   VPBlockUser CondBitUser;
415 
416   /// If the block is predicated, its predicate is stored as an operand of this
417   /// VPUser to maintain the def-use relations. Otherwise there is no operand
418   /// here.
419   VPBlockUser PredicateUser;
420 
421   /// VPlan containing the block. Can only be set on the entry block of the
422   /// plan.
423   VPlan *Plan = nullptr;
424 
425   /// Add \p Successor as the last successor to this block.
426   void appendSuccessor(VPBlockBase *Successor) {
427     assert(Successor && "Cannot add nullptr successor!");
428     Successors.push_back(Successor);
429   }
430 
431   /// Add \p Predecessor as the last predecessor to this block.
432   void appendPredecessor(VPBlockBase *Predecessor) {
433     assert(Predecessor && "Cannot add nullptr predecessor!");
434     Predecessors.push_back(Predecessor);
435   }
436 
437   /// Remove \p Predecessor from the predecessors of this block.
438   void removePredecessor(VPBlockBase *Predecessor) {
439     auto Pos = find(Predecessors, Predecessor);
440     assert(Pos && "Predecessor does not exist");
441     Predecessors.erase(Pos);
442   }
443 
444   /// Remove \p Successor from the successors of this block.
445   void removeSuccessor(VPBlockBase *Successor) {
446     auto Pos = find(Successors, Successor);
447     assert(Pos && "Successor does not exist");
448     Successors.erase(Pos);
449   }
450 
451 protected:
452   VPBlockBase(const unsigned char SC, const std::string &N)
453       : SubclassID(SC), Name(N) {}
454 
455 public:
456   /// An enumeration for keeping track of the concrete subclass of VPBlockBase
457   /// that are actually instantiated. Values of this enumeration are kept in the
458   /// SubclassID field of the VPBlockBase objects. They are used for concrete
459   /// type identification.
460   using VPBlockTy = enum { VPBasicBlockSC, VPRegionBlockSC };
461 
462   using VPBlocksTy = SmallVectorImpl<VPBlockBase *>;
463 
464   virtual ~VPBlockBase() = default;
465 
466   const std::string &getName() const { return Name; }
467 
468   void setName(const Twine &newName) { Name = newName.str(); }
469 
470   /// \return an ID for the concrete type of this object.
471   /// This is used to implement the classof checks. This should not be used
472   /// for any other purpose, as the values may change as LLVM evolves.
473   unsigned getVPBlockID() const { return SubclassID; }
474 
475   VPRegionBlock *getParent() { return Parent; }
476   const VPRegionBlock *getParent() const { return Parent; }
477 
478   /// \return A pointer to the plan containing the current block.
479   VPlan *getPlan();
480   const VPlan *getPlan() const;
481 
482   /// Sets the pointer of the plan containing the block. The block must be the
483   /// entry block into the VPlan.
484   void setPlan(VPlan *ParentPlan);
485 
486   void setParent(VPRegionBlock *P) { Parent = P; }
487 
488   /// \return the VPBasicBlock that is the entry of this VPBlockBase,
489   /// recursively, if the latter is a VPRegionBlock. Otherwise, if this
490   /// VPBlockBase is a VPBasicBlock, it is returned.
491   const VPBasicBlock *getEntryBasicBlock() const;
492   VPBasicBlock *getEntryBasicBlock();
493 
494   /// \return the VPBasicBlock that is the exit of this VPBlockBase,
495   /// recursively, if the latter is a VPRegionBlock. Otherwise, if this
496   /// VPBlockBase is a VPBasicBlock, it is returned.
497   const VPBasicBlock *getExitBasicBlock() const;
498   VPBasicBlock *getExitBasicBlock();
499 
500   const VPBlocksTy &getSuccessors() const { return Successors; }
501   VPBlocksTy &getSuccessors() { return Successors; }
502 
503   const VPBlocksTy &getPredecessors() const { return Predecessors; }
504   VPBlocksTy &getPredecessors() { return Predecessors; }
505 
506   /// \return the successor of this VPBlockBase if it has a single successor.
507   /// Otherwise return a null pointer.
508   VPBlockBase *getSingleSuccessor() const {
509     return (Successors.size() == 1 ? *Successors.begin() : nullptr);
510   }
511 
512   /// \return the predecessor of this VPBlockBase if it has a single
513   /// predecessor. Otherwise return a null pointer.
514   VPBlockBase *getSinglePredecessor() const {
515     return (Predecessors.size() == 1 ? *Predecessors.begin() : nullptr);
516   }
517 
518   size_t getNumSuccessors() const { return Successors.size(); }
519   size_t getNumPredecessors() const { return Predecessors.size(); }
520 
521   /// An Enclosing Block of a block B is any block containing B, including B
522   /// itself. \return the closest enclosing block starting from "this", which
523   /// has successors. \return the root enclosing block if all enclosing blocks
524   /// have no successors.
525   VPBlockBase *getEnclosingBlockWithSuccessors();
526 
527   /// \return the closest enclosing block starting from "this", which has
528   /// predecessors. \return the root enclosing block if all enclosing blocks
529   /// have no predecessors.
530   VPBlockBase *getEnclosingBlockWithPredecessors();
531 
532   /// \return the successors either attached directly to this VPBlockBase or, if
533   /// this VPBlockBase is the exit block of a VPRegionBlock and has no
534   /// successors of its own, search recursively for the first enclosing
535   /// VPRegionBlock that has successors and return them. If no such
536   /// VPRegionBlock exists, return the (empty) successors of the topmost
537   /// VPBlockBase reached.
538   const VPBlocksTy &getHierarchicalSuccessors() {
539     return getEnclosingBlockWithSuccessors()->getSuccessors();
540   }
541 
542   /// \return the hierarchical successor of this VPBlockBase if it has a single
543   /// hierarchical successor. Otherwise return a null pointer.
544   VPBlockBase *getSingleHierarchicalSuccessor() {
545     return getEnclosingBlockWithSuccessors()->getSingleSuccessor();
546   }
547 
548   /// \return the predecessors either attached directly to this VPBlockBase or,
549   /// if this VPBlockBase is the entry block of a VPRegionBlock and has no
550   /// predecessors of its own, search recursively for the first enclosing
551   /// VPRegionBlock that has predecessors and return them. If no such
552   /// VPRegionBlock exists, return the (empty) predecessors of the topmost
553   /// VPBlockBase reached.
554   const VPBlocksTy &getHierarchicalPredecessors() {
555     return getEnclosingBlockWithPredecessors()->getPredecessors();
556   }
557 
558   /// \return the hierarchical predecessor of this VPBlockBase if it has a
559   /// single hierarchical predecessor. Otherwise return a null pointer.
560   VPBlockBase *getSingleHierarchicalPredecessor() {
561     return getEnclosingBlockWithPredecessors()->getSinglePredecessor();
562   }
563 
564   /// \return the condition bit selecting the successor.
565   VPValue *getCondBit();
566   /// \return the condition bit selecting the successor.
567   const VPValue *getCondBit() const;
568   /// Set the condition bit selecting the successor.
569   void setCondBit(VPValue *CV);
570 
571   /// \return the block's predicate.
572   VPValue *getPredicate();
573   /// \return the block's predicate.
574   const VPValue *getPredicate() const;
575   /// Set the block's predicate.
576   void setPredicate(VPValue *Pred);
577 
578   /// Set a given VPBlockBase \p Successor as the single successor of this
579   /// VPBlockBase. This VPBlockBase is not added as predecessor of \p Successor.
580   /// This VPBlockBase must have no successors.
581   void setOneSuccessor(VPBlockBase *Successor) {
582     assert(Successors.empty() && "Setting one successor when others exist.");
583     appendSuccessor(Successor);
584   }
585 
586   /// Set two given VPBlockBases \p IfTrue and \p IfFalse to be the two
587   /// successors of this VPBlockBase. \p Condition is set as the successor
588   /// selector. This VPBlockBase is not added as predecessor of \p IfTrue or \p
589   /// IfFalse. This VPBlockBase must have no successors.
590   void setTwoSuccessors(VPBlockBase *IfTrue, VPBlockBase *IfFalse,
591                         VPValue *Condition) {
592     assert(Successors.empty() && "Setting two successors when others exist.");
593     assert(Condition && "Setting two successors without condition!");
594     setCondBit(Condition);
595     appendSuccessor(IfTrue);
596     appendSuccessor(IfFalse);
597   }
598 
599   /// Set each VPBasicBlock in \p NewPreds as predecessor of this VPBlockBase.
600   /// This VPBlockBase must have no predecessors. This VPBlockBase is not added
601   /// as successor of any VPBasicBlock in \p NewPreds.
602   void setPredecessors(ArrayRef<VPBlockBase *> NewPreds) {
603     assert(Predecessors.empty() && "Block predecessors already set.");
604     for (auto *Pred : NewPreds)
605       appendPredecessor(Pred);
606   }
607 
608   /// Remove all the predecessor of this block.
609   void clearPredecessors() { Predecessors.clear(); }
610 
611   /// Remove all the successors of this block and set to null its condition bit
612   void clearSuccessors() {
613     Successors.clear();
614     setCondBit(nullptr);
615   }
616 
617   /// The method which generates the output IR that correspond to this
618   /// VPBlockBase, thereby "executing" the VPlan.
619   virtual void execute(struct VPTransformState *State) = 0;
620 
621   /// Delete all blocks reachable from a given VPBlockBase, inclusive.
622   static void deleteCFG(VPBlockBase *Entry);
623 
624   /// Return true if it is legal to hoist instructions into this block.
625   bool isLegalToHoistInto() {
626     // There are currently no constraints that prevent an instruction to be
627     // hoisted into a VPBlockBase.
628     return true;
629   }
630 
631   /// Replace all operands of VPUsers in the block with \p NewValue and also
632   /// replaces all uses of VPValues defined in the block with NewValue.
633   virtual void dropAllReferences(VPValue *NewValue) = 0;
634 
635 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
636   void printAsOperand(raw_ostream &OS, bool PrintType) const {
637     OS << getName();
638   }
639 
640   /// Print plain-text dump of this VPBlockBase to \p O, prefixing all lines
641   /// with \p Indent. \p SlotTracker is used to print unnamed VPValue's using
642   /// consequtive numbers.
643   ///
644   /// Note that the numbering is applied to the whole VPlan, so printing
645   /// individual blocks is consistent with the whole VPlan printing.
646   virtual void print(raw_ostream &O, const Twine &Indent,
647                      VPSlotTracker &SlotTracker) const = 0;
648 
649   /// Print plain-text dump of this VPlan to \p O.
650   void print(raw_ostream &O) const {
651     VPSlotTracker SlotTracker(getPlan());
652     print(O, "", SlotTracker);
653   }
654 
655   /// Print the successors of this block to \p O, prefixing all lines with \p
656   /// Indent.
657   void printSuccessors(raw_ostream &O, const Twine &Indent) const;
658 
659   /// Dump this VPBlockBase to dbgs().
660   LLVM_DUMP_METHOD void dump() const { print(dbgs()); }
661 #endif
662 };
663 
664 /// VPRecipeBase is a base class modeling a sequence of one or more output IR
665 /// instructions. VPRecipeBase owns the the VPValues it defines through VPDef
666 /// and is responsible for deleting its defined values. Single-value
667 /// VPRecipeBases that also inherit from VPValue must make sure to inherit from
668 /// VPRecipeBase before VPValue.
669 class VPRecipeBase : public ilist_node_with_parent<VPRecipeBase, VPBasicBlock>,
670                      public VPDef,
671                      public VPUser {
672   friend VPBasicBlock;
673   friend class VPBlockUtils;
674 
675   /// Each VPRecipe belongs to a single VPBasicBlock.
676   VPBasicBlock *Parent = nullptr;
677 
678 public:
679   VPRecipeBase(const unsigned char SC, ArrayRef<VPValue *> Operands)
680       : VPDef(SC), VPUser(Operands, VPUser::VPUserID::Recipe) {}
681 
682   template <typename IterT>
683   VPRecipeBase(const unsigned char SC, iterator_range<IterT> Operands)
684       : VPDef(SC), VPUser(Operands, VPUser::VPUserID::Recipe) {}
685   virtual ~VPRecipeBase() = default;
686 
687   /// \return the VPBasicBlock which this VPRecipe belongs to.
688   VPBasicBlock *getParent() { return Parent; }
689   const VPBasicBlock *getParent() const { return Parent; }
690 
691   /// The method which generates the output IR instructions that correspond to
692   /// this VPRecipe, thereby "executing" the VPlan.
693   virtual void execute(struct VPTransformState &State) = 0;
694 
695   /// Insert an unlinked recipe into a basic block immediately before
696   /// the specified recipe.
697   void insertBefore(VPRecipeBase *InsertPos);
698 
699   /// Insert an unlinked Recipe into a basic block immediately after
700   /// the specified Recipe.
701   void insertAfter(VPRecipeBase *InsertPos);
702 
703   /// Unlink this recipe from its current VPBasicBlock and insert it into
704   /// the VPBasicBlock that MovePos lives in, right after MovePos.
705   void moveAfter(VPRecipeBase *MovePos);
706 
707   /// Unlink this recipe and insert into BB before I.
708   ///
709   /// \pre I is a valid iterator into BB.
710   void moveBefore(VPBasicBlock &BB, iplist<VPRecipeBase>::iterator I);
711 
712   /// This method unlinks 'this' from the containing basic block, but does not
713   /// delete it.
714   void removeFromParent();
715 
716   /// This method unlinks 'this' from the containing basic block and deletes it.
717   ///
718   /// \returns an iterator pointing to the element after the erased one
719   iplist<VPRecipeBase>::iterator eraseFromParent();
720 
721   /// Returns the underlying instruction, if the recipe is a VPValue or nullptr
722   /// otherwise.
723   Instruction *getUnderlyingInstr() {
724     return cast<Instruction>(getVPSingleValue()->getUnderlyingValue());
725   }
726   const Instruction *getUnderlyingInstr() const {
727     return cast<Instruction>(getVPSingleValue()->getUnderlyingValue());
728   }
729 
730   /// Method to support type inquiry through isa, cast, and dyn_cast.
731   static inline bool classof(const VPDef *D) {
732     // All VPDefs are also VPRecipeBases.
733     return true;
734   }
735 
736   static inline bool classof(const VPUser *U) {
737     return U->getVPUserID() == VPUser::VPUserID::Recipe;
738   }
739 
740   /// Returns true if the recipe may have side-effects.
741   bool mayHaveSideEffects() const;
742 
743   /// Returns true for PHI-like recipes.
744   bool isPhi() const {
745     return getVPDefID() >= VPFirstPHISC && getVPDefID() <= VPLastPHISC;
746   }
747 
748   /// Returns true if the recipe may read from memory.
749   bool mayReadFromMemory() const;
750 
751   /// Returns true if the recipe may write to memory.
752   bool mayWriteToMemory() const;
753 
754   /// Returns true if the recipe may read from or write to memory.
755   bool mayReadOrWriteMemory() const {
756     return mayReadFromMemory() || mayWriteToMemory();
757   }
758 };
759 
760 inline bool VPUser::classof(const VPDef *Def) {
761   return Def->getVPDefID() == VPRecipeBase::VPInstructionSC ||
762          Def->getVPDefID() == VPRecipeBase::VPWidenSC ||
763          Def->getVPDefID() == VPRecipeBase::VPWidenCallSC ||
764          Def->getVPDefID() == VPRecipeBase::VPWidenSelectSC ||
765          Def->getVPDefID() == VPRecipeBase::VPWidenGEPSC ||
766          Def->getVPDefID() == VPRecipeBase::VPBlendSC ||
767          Def->getVPDefID() == VPRecipeBase::VPInterleaveSC ||
768          Def->getVPDefID() == VPRecipeBase::VPReplicateSC ||
769          Def->getVPDefID() == VPRecipeBase::VPReductionSC ||
770          Def->getVPDefID() == VPRecipeBase::VPBranchOnMaskSC ||
771          Def->getVPDefID() == VPRecipeBase::VPWidenMemoryInstructionSC;
772 }
773 
774 /// This is a concrete Recipe that models a single VPlan-level instruction.
775 /// While as any Recipe it may generate a sequence of IR instructions when
776 /// executed, these instructions would always form a single-def expression as
777 /// the VPInstruction is also a single def-use vertex.
778 class VPInstruction : public VPRecipeBase, public VPValue {
779   friend class VPlanSlp;
780 
781 public:
782   /// VPlan opcodes, extending LLVM IR with idiomatics instructions.
783   enum {
784     FirstOrderRecurrenceSplice =
785         Instruction::OtherOpsEnd + 1, // Combines the incoming and previous
786                                       // values of a first-order recurrence.
787     Not,
788     ICmpULE,
789     SLPLoad,
790     SLPStore,
791     ActiveLaneMask,
792   };
793 
794 private:
795   typedef unsigned char OpcodeTy;
796   OpcodeTy Opcode;
797   FastMathFlags FMF;
798 
799   /// Utility method serving execute(): generates a single instance of the
800   /// modeled instruction.
801   void generateInstruction(VPTransformState &State, unsigned Part);
802 
803 protected:
804   void setUnderlyingInstr(Instruction *I) { setUnderlyingValue(I); }
805 
806 public:
807   VPInstruction(unsigned Opcode, ArrayRef<VPValue *> Operands)
808       : VPRecipeBase(VPRecipeBase::VPInstructionSC, Operands),
809         VPValue(VPValue::VPVInstructionSC, nullptr, this), Opcode(Opcode) {}
810 
811   VPInstruction(unsigned Opcode, std::initializer_list<VPValue *> Operands)
812       : VPInstruction(Opcode, ArrayRef<VPValue *>(Operands)) {}
813 
814   /// Method to support type inquiry through isa, cast, and dyn_cast.
815   static inline bool classof(const VPValue *V) {
816     return V->getVPValueID() == VPValue::VPVInstructionSC;
817   }
818 
819   VPInstruction *clone() const {
820     SmallVector<VPValue *, 2> Operands(operands());
821     return new VPInstruction(Opcode, Operands);
822   }
823 
824   /// Method to support type inquiry through isa, cast, and dyn_cast.
825   static inline bool classof(const VPDef *R) {
826     return R->getVPDefID() == VPRecipeBase::VPInstructionSC;
827   }
828 
829   unsigned getOpcode() const { return Opcode; }
830 
831   /// Generate the instruction.
832   /// TODO: We currently execute only per-part unless a specific instance is
833   /// provided.
834   void execute(VPTransformState &State) override;
835 
836 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
837   /// Print the VPInstruction to \p O.
838   void print(raw_ostream &O, const Twine &Indent,
839              VPSlotTracker &SlotTracker) const override;
840 
841   /// Print the VPInstruction to dbgs() (for debugging).
842   LLVM_DUMP_METHOD void dump() const;
843 #endif
844 
845   /// Return true if this instruction may modify memory.
846   bool mayWriteToMemory() const {
847     // TODO: we can use attributes of the called function to rule out memory
848     //       modifications.
849     return Opcode == Instruction::Store || Opcode == Instruction::Call ||
850            Opcode == Instruction::Invoke || Opcode == SLPStore;
851   }
852 
853   bool hasResult() const {
854     // CallInst may or may not have a result, depending on the called function.
855     // Conservatively return calls have results for now.
856     switch (getOpcode()) {
857     case Instruction::Ret:
858     case Instruction::Br:
859     case Instruction::Store:
860     case Instruction::Switch:
861     case Instruction::IndirectBr:
862     case Instruction::Resume:
863     case Instruction::CatchRet:
864     case Instruction::Unreachable:
865     case Instruction::Fence:
866     case Instruction::AtomicRMW:
867       return false;
868     default:
869       return true;
870     }
871   }
872 
873   /// Set the fast-math flags.
874   void setFastMathFlags(FastMathFlags FMFNew);
875 };
876 
877 /// VPWidenRecipe is a recipe for producing a copy of vector type its
878 /// ingredient. This recipe covers most of the traditional vectorization cases
879 /// where each ingredient transforms into a vectorized version of itself.
880 class VPWidenRecipe : public VPRecipeBase, public VPValue {
881 public:
882   template <typename IterT>
883   VPWidenRecipe(Instruction &I, iterator_range<IterT> Operands)
884       : VPRecipeBase(VPRecipeBase::VPWidenSC, Operands),
885         VPValue(VPValue::VPVWidenSC, &I, this) {}
886 
887   ~VPWidenRecipe() override = default;
888 
889   /// Method to support type inquiry through isa, cast, and dyn_cast.
890   static inline bool classof(const VPDef *D) {
891     return D->getVPDefID() == VPRecipeBase::VPWidenSC;
892   }
893   static inline bool classof(const VPValue *V) {
894     return V->getVPValueID() == VPValue::VPVWidenSC;
895   }
896 
897   /// Produce widened copies of all Ingredients.
898   void execute(VPTransformState &State) override;
899 
900 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
901   /// Print the recipe.
902   void print(raw_ostream &O, const Twine &Indent,
903              VPSlotTracker &SlotTracker) const override;
904 #endif
905 };
906 
907 /// A recipe for widening Call instructions.
908 class VPWidenCallRecipe : public VPRecipeBase, public VPValue {
909 
910 public:
911   template <typename IterT>
912   VPWidenCallRecipe(CallInst &I, iterator_range<IterT> CallArguments)
913       : VPRecipeBase(VPRecipeBase::VPWidenCallSC, CallArguments),
914         VPValue(VPValue::VPVWidenCallSC, &I, this) {}
915 
916   ~VPWidenCallRecipe() override = default;
917 
918   /// Method to support type inquiry through isa, cast, and dyn_cast.
919   static inline bool classof(const VPDef *D) {
920     return D->getVPDefID() == VPRecipeBase::VPWidenCallSC;
921   }
922 
923   /// Produce a widened version of the call instruction.
924   void execute(VPTransformState &State) override;
925 
926 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
927   /// Print the recipe.
928   void print(raw_ostream &O, const Twine &Indent,
929              VPSlotTracker &SlotTracker) const override;
930 #endif
931 };
932 
933 /// A recipe for widening select instructions.
934 class VPWidenSelectRecipe : public VPRecipeBase, public VPValue {
935 
936   /// Is the condition of the select loop invariant?
937   bool InvariantCond;
938 
939 public:
940   template <typename IterT>
941   VPWidenSelectRecipe(SelectInst &I, iterator_range<IterT> Operands,
942                       bool InvariantCond)
943       : VPRecipeBase(VPRecipeBase::VPWidenSelectSC, Operands),
944         VPValue(VPValue::VPVWidenSelectSC, &I, this),
945         InvariantCond(InvariantCond) {}
946 
947   ~VPWidenSelectRecipe() override = default;
948 
949   /// Method to support type inquiry through isa, cast, and dyn_cast.
950   static inline bool classof(const VPDef *D) {
951     return D->getVPDefID() == VPRecipeBase::VPWidenSelectSC;
952   }
953 
954   /// Produce a widened version of the select instruction.
955   void execute(VPTransformState &State) override;
956 
957 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
958   /// Print the recipe.
959   void print(raw_ostream &O, const Twine &Indent,
960              VPSlotTracker &SlotTracker) const override;
961 #endif
962 };
963 
964 /// A recipe for handling GEP instructions.
965 class VPWidenGEPRecipe : public VPRecipeBase, public VPValue {
966   bool IsPtrLoopInvariant;
967   SmallBitVector IsIndexLoopInvariant;
968 
969 public:
970   template <typename IterT>
971   VPWidenGEPRecipe(GetElementPtrInst *GEP, iterator_range<IterT> Operands)
972       : VPRecipeBase(VPRecipeBase::VPWidenGEPSC, Operands),
973         VPValue(VPWidenGEPSC, GEP, this),
974         IsIndexLoopInvariant(GEP->getNumIndices(), false) {}
975 
976   template <typename IterT>
977   VPWidenGEPRecipe(GetElementPtrInst *GEP, iterator_range<IterT> Operands,
978                    Loop *OrigLoop)
979       : VPRecipeBase(VPRecipeBase::VPWidenGEPSC, Operands),
980         VPValue(VPValue::VPVWidenGEPSC, GEP, this),
981         IsIndexLoopInvariant(GEP->getNumIndices(), false) {
982     IsPtrLoopInvariant = OrigLoop->isLoopInvariant(GEP->getPointerOperand());
983     for (auto Index : enumerate(GEP->indices()))
984       IsIndexLoopInvariant[Index.index()] =
985           OrigLoop->isLoopInvariant(Index.value().get());
986   }
987   ~VPWidenGEPRecipe() override = default;
988 
989   /// Method to support type inquiry through isa, cast, and dyn_cast.
990   static inline bool classof(const VPDef *D) {
991     return D->getVPDefID() == VPRecipeBase::VPWidenGEPSC;
992   }
993 
994   /// Generate the gep nodes.
995   void execute(VPTransformState &State) override;
996 
997 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
998   /// Print the recipe.
999   void print(raw_ostream &O, const Twine &Indent,
1000              VPSlotTracker &SlotTracker) const override;
1001 #endif
1002 };
1003 
1004 /// A recipe for handling phi nodes of integer and floating-point inductions,
1005 /// producing their vector and scalar values.
1006 class VPWidenIntOrFpInductionRecipe : public VPRecipeBase {
1007   PHINode *IV;
1008 
1009 public:
1010   VPWidenIntOrFpInductionRecipe(PHINode *IV, VPValue *Start, Instruction *Cast,
1011                                 TruncInst *Trunc = nullptr)
1012       : VPRecipeBase(VPWidenIntOrFpInductionSC, {Start}), IV(IV) {
1013     if (Trunc)
1014       new VPValue(Trunc, this);
1015     else
1016       new VPValue(IV, this);
1017 
1018     if (Cast)
1019       new VPValue(Cast, this);
1020   }
1021   ~VPWidenIntOrFpInductionRecipe() override = default;
1022 
1023   /// Method to support type inquiry through isa, cast, and dyn_cast.
1024   static inline bool classof(const VPDef *D) {
1025     return D->getVPDefID() == VPRecipeBase::VPWidenIntOrFpInductionSC;
1026   }
1027 
1028   /// Generate the vectorized and scalarized versions of the phi node as
1029   /// needed by their users.
1030   void execute(VPTransformState &State) override;
1031 
1032 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1033   /// Print the recipe.
1034   void print(raw_ostream &O, const Twine &Indent,
1035              VPSlotTracker &SlotTracker) const override;
1036 #endif
1037 
1038   /// Returns the start value of the induction.
1039   VPValue *getStartValue() { return getOperand(0); }
1040 
1041   /// Returns the cast VPValue, if one is attached, or nullptr otherwise.
1042   VPValue *getCastValue() {
1043     if (getNumDefinedValues() != 2)
1044       return nullptr;
1045     return getVPValue(1);
1046   }
1047 
1048   /// Returns the first defined value as TruncInst, if it is one or nullptr
1049   /// otherwise.
1050   TruncInst *getTruncInst() {
1051     return dyn_cast_or_null<TruncInst>(getVPValue(0)->getUnderlyingValue());
1052   }
1053   const TruncInst *getTruncInst() const {
1054     return dyn_cast_or_null<TruncInst>(getVPValue(0)->getUnderlyingValue());
1055   }
1056 };
1057 
1058 /// A recipe for handling first order recurrences and pointer inductions. For
1059 /// first-order recurrences, the start value is the first operand of the recipe
1060 /// and the incoming value from the backedge is the second operand. It also
1061 /// serves as base class for VPReductionPHIRecipe. In the VPlan native path, all
1062 /// incoming VPValues & VPBasicBlock pairs are managed in the recipe directly.
1063 class VPWidenPHIRecipe : public VPRecipeBase, public VPValue {
1064   /// List of incoming blocks. Only used in the VPlan native path.
1065   SmallVector<VPBasicBlock *, 2> IncomingBlocks;
1066 
1067 protected:
1068   VPWidenPHIRecipe(unsigned char VPVID, unsigned char VPDefID, PHINode *Phi,
1069                    VPValue *Start = nullptr)
1070       : VPRecipeBase(VPDefID, {}), VPValue(VPVID, Phi, this) {
1071     if (Start)
1072       addOperand(Start);
1073   }
1074 
1075 public:
1076   /// Create a VPWidenPHIRecipe for \p Phi
1077   VPWidenPHIRecipe(PHINode *Phi)
1078       : VPWidenPHIRecipe(VPVWidenPHISC, VPWidenPHISC, Phi) {}
1079 
1080   /// Create a new VPWidenPHIRecipe for \p Phi with start value \p Start.
1081   VPWidenPHIRecipe(PHINode *Phi, VPValue &Start) : VPWidenPHIRecipe(Phi) {
1082     addOperand(&Start);
1083   }
1084 
1085   ~VPWidenPHIRecipe() override = default;
1086 
1087   /// Method to support type inquiry through isa, cast, and dyn_cast.
1088   static inline bool classof(const VPRecipeBase *B) {
1089     return B->getVPDefID() == VPRecipeBase::VPWidenPHISC ||
1090            B->getVPDefID() == VPRecipeBase::VPFirstOrderRecurrencePHISC ||
1091            B->getVPDefID() == VPRecipeBase::VPReductionPHISC;
1092   }
1093   static inline bool classof(const VPValue *V) {
1094     return V->getVPValueID() == VPValue::VPVWidenPHISC ||
1095            V->getVPValueID() == VPValue::VPVFirstOrderRecurrencePHISC ||
1096            V->getVPValueID() == VPValue::VPVReductionPHISC;
1097   }
1098 
1099   /// Generate the phi/select nodes.
1100   void execute(VPTransformState &State) override;
1101 
1102 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1103   /// Print the recipe.
1104   void print(raw_ostream &O, const Twine &Indent,
1105              VPSlotTracker &SlotTracker) const override;
1106 #endif
1107 
1108   /// Returns the start value of the phi, if it is a reduction or first-order
1109   /// recurrence.
1110   VPValue *getStartValue() {
1111     return getNumOperands() == 0 ? nullptr : getOperand(0);
1112   }
1113 
1114   /// Returns the incoming value from the loop backedge, if it is a reduction or
1115   /// first-order recurrence.
1116   VPValue *getBackedgeValue() {
1117     return getOperand(1);
1118   }
1119 
1120   /// Returns the backedge value as a recipe. The backedge value is guaranteed
1121   /// to be a recipe.
1122   VPRecipeBase *getBackedgeRecipe() {
1123     return cast<VPRecipeBase>(getBackedgeValue()->getDef());
1124   }
1125 
1126   /// Adds a pair (\p IncomingV, \p IncomingBlock) to the phi.
1127   void addIncoming(VPValue *IncomingV, VPBasicBlock *IncomingBlock) {
1128     addOperand(IncomingV);
1129     IncomingBlocks.push_back(IncomingBlock);
1130   }
1131 
1132   /// Returns the \p I th incoming VPValue.
1133   VPValue *getIncomingValue(unsigned I) { return getOperand(I); }
1134 
1135   /// Returns the \p I th incoming VPBasicBlock.
1136   VPBasicBlock *getIncomingBlock(unsigned I) { return IncomingBlocks[I]; }
1137 };
1138 
1139 /// A recipe for handling first-order recurrence phis. The start value is the
1140 /// first operand of the recipe and the incoming value from the backedge is the
1141 /// second operand.
1142 struct VPFirstOrderRecurrencePHIRecipe : public VPWidenPHIRecipe {
1143   VPFirstOrderRecurrencePHIRecipe(PHINode *Phi, VPValue &Start)
1144       : VPWidenPHIRecipe(VPVFirstOrderRecurrencePHISC,
1145                          VPFirstOrderRecurrencePHISC, Phi, &Start) {}
1146 
1147   /// Method to support type inquiry through isa, cast, and dyn_cast.
1148   static inline bool classof(const VPRecipeBase *R) {
1149     return R->getVPDefID() == VPRecipeBase::VPFirstOrderRecurrencePHISC;
1150   }
1151   static inline bool classof(const VPWidenPHIRecipe *D) {
1152     return D->getVPDefID() == VPRecipeBase::VPFirstOrderRecurrencePHISC;
1153   }
1154   static inline bool classof(const VPValue *V) {
1155     return V->getVPValueID() == VPValue::VPVFirstOrderRecurrencePHISC;
1156   }
1157 
1158   void execute(VPTransformState &State) override;
1159 
1160 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1161   /// Print the recipe.
1162   void print(raw_ostream &O, const Twine &Indent,
1163              VPSlotTracker &SlotTracker) const override;
1164 #endif
1165 };
1166 
1167 /// A recipe for handling reduction phis. The start value is the first operand
1168 /// of the recipe and the incoming value from the backedge is the second
1169 /// operand.
1170 class VPReductionPHIRecipe : public VPWidenPHIRecipe {
1171   /// Descriptor for the reduction.
1172   RecurrenceDescriptor &RdxDesc;
1173 
1174   /// The phi is part of an in-loop reduction.
1175   bool IsInLoop;
1176 
1177   /// The phi is part of an ordered reduction. Requires IsInLoop to be true.
1178   bool IsOrdered;
1179 
1180 public:
1181   /// Create a new VPReductionPHIRecipe for the reduction \p Phi described by \p
1182   /// RdxDesc.
1183   VPReductionPHIRecipe(PHINode *Phi, RecurrenceDescriptor &RdxDesc,
1184                        VPValue &Start, bool IsInLoop = false,
1185                        bool IsOrdered = false)
1186       : VPWidenPHIRecipe(VPVReductionPHISC, VPReductionPHISC, Phi, &Start),
1187         RdxDesc(RdxDesc), IsInLoop(IsInLoop), IsOrdered(IsOrdered) {
1188     assert((!IsOrdered || IsInLoop) && "IsOrdered requires IsInLoop");
1189   }
1190 
1191   ~VPReductionPHIRecipe() override = default;
1192 
1193   /// Method to support type inquiry through isa, cast, and dyn_cast.
1194   static inline bool classof(const VPRecipeBase *R) {
1195     return R->getVPDefID() == VPRecipeBase::VPReductionPHISC;
1196   }
1197   static inline bool classof(const VPValue *V) {
1198     return V->getVPValueID() == VPValue::VPVReductionPHISC;
1199   }
1200   static inline bool classof(const VPWidenPHIRecipe *R) {
1201     return R->getVPDefID() == VPRecipeBase::VPReductionPHISC;
1202   }
1203 
1204   /// Generate the phi/select nodes.
1205   void execute(VPTransformState &State) override;
1206 
1207 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1208   /// Print the recipe.
1209   void print(raw_ostream &O, const Twine &Indent,
1210              VPSlotTracker &SlotTracker) const override;
1211 #endif
1212 
1213   RecurrenceDescriptor &getRecurrenceDescriptor() { return RdxDesc; }
1214 
1215   /// Returns true, if the phi is part of an ordered reduction.
1216   bool isOrdered() const { return IsOrdered; }
1217 
1218   /// Returns true, if the phi is part of an in-loop reduction.
1219   bool isInLoop() const { return IsInLoop; }
1220 };
1221 
1222 /// A recipe for vectorizing a phi-node as a sequence of mask-based select
1223 /// instructions.
1224 class VPBlendRecipe : public VPRecipeBase, public VPValue {
1225   PHINode *Phi;
1226 
1227 public:
1228   /// The blend operation is a User of the incoming values and of their
1229   /// respective masks, ordered [I0, M0, I1, M1, ...]. Note that a single value
1230   /// might be incoming with a full mask for which there is no VPValue.
1231   VPBlendRecipe(PHINode *Phi, ArrayRef<VPValue *> Operands)
1232       : VPRecipeBase(VPBlendSC, Operands),
1233         VPValue(VPValue::VPVBlendSC, Phi, this), Phi(Phi) {
1234     assert(Operands.size() > 0 &&
1235            ((Operands.size() == 1) || (Operands.size() % 2 == 0)) &&
1236            "Expected either a single incoming value or a positive even number "
1237            "of operands");
1238   }
1239 
1240   /// Method to support type inquiry through isa, cast, and dyn_cast.
1241   static inline bool classof(const VPDef *D) {
1242     return D->getVPDefID() == VPRecipeBase::VPBlendSC;
1243   }
1244 
1245   /// Return the number of incoming values, taking into account that a single
1246   /// incoming value has no mask.
1247   unsigned getNumIncomingValues() const { return (getNumOperands() + 1) / 2; }
1248 
1249   /// Return incoming value number \p Idx.
1250   VPValue *getIncomingValue(unsigned Idx) const { return getOperand(Idx * 2); }
1251 
1252   /// Return mask number \p Idx.
1253   VPValue *getMask(unsigned Idx) const { return getOperand(Idx * 2 + 1); }
1254 
1255   /// Generate the phi/select nodes.
1256   void execute(VPTransformState &State) override;
1257 
1258 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1259   /// Print the recipe.
1260   void print(raw_ostream &O, const Twine &Indent,
1261              VPSlotTracker &SlotTracker) const override;
1262 #endif
1263 };
1264 
1265 /// VPInterleaveRecipe is a recipe for transforming an interleave group of load
1266 /// or stores into one wide load/store and shuffles. The first operand of a
1267 /// VPInterleave recipe is the address, followed by the stored values, followed
1268 /// by an optional mask.
1269 class VPInterleaveRecipe : public VPRecipeBase {
1270   const InterleaveGroup<Instruction> *IG;
1271 
1272   bool HasMask = false;
1273 
1274 public:
1275   VPInterleaveRecipe(const InterleaveGroup<Instruction> *IG, VPValue *Addr,
1276                      ArrayRef<VPValue *> StoredValues, VPValue *Mask)
1277       : VPRecipeBase(VPInterleaveSC, {Addr}), IG(IG) {
1278     for (unsigned i = 0; i < IG->getFactor(); ++i)
1279       if (Instruction *I = IG->getMember(i)) {
1280         if (I->getType()->isVoidTy())
1281           continue;
1282         new VPValue(I, this);
1283       }
1284 
1285     for (auto *SV : StoredValues)
1286       addOperand(SV);
1287     if (Mask) {
1288       HasMask = true;
1289       addOperand(Mask);
1290     }
1291   }
1292   ~VPInterleaveRecipe() override = default;
1293 
1294   /// Method to support type inquiry through isa, cast, and dyn_cast.
1295   static inline bool classof(const VPDef *D) {
1296     return D->getVPDefID() == VPRecipeBase::VPInterleaveSC;
1297   }
1298 
1299   /// Return the address accessed by this recipe.
1300   VPValue *getAddr() const {
1301     return getOperand(0); // Address is the 1st, mandatory operand.
1302   }
1303 
1304   /// Return the mask used by this recipe. Note that a full mask is represented
1305   /// by a nullptr.
1306   VPValue *getMask() const {
1307     // Mask is optional and therefore the last, currently 2nd operand.
1308     return HasMask ? getOperand(getNumOperands() - 1) : nullptr;
1309   }
1310 
1311   /// Return the VPValues stored by this interleave group. If it is a load
1312   /// interleave group, return an empty ArrayRef.
1313   ArrayRef<VPValue *> getStoredValues() const {
1314     // The first operand is the address, followed by the stored values, followed
1315     // by an optional mask.
1316     return ArrayRef<VPValue *>(op_begin(), getNumOperands())
1317         .slice(1, getNumStoreOperands());
1318   }
1319 
1320   /// Generate the wide load or store, and shuffles.
1321   void execute(VPTransformState &State) override;
1322 
1323 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1324   /// Print the recipe.
1325   void print(raw_ostream &O, const Twine &Indent,
1326              VPSlotTracker &SlotTracker) const override;
1327 #endif
1328 
1329   const InterleaveGroup<Instruction> *getInterleaveGroup() { return IG; }
1330 
1331   /// Returns the number of stored operands of this interleave group. Returns 0
1332   /// for load interleave groups.
1333   unsigned getNumStoreOperands() const {
1334     return getNumOperands() - (HasMask ? 2 : 1);
1335   }
1336 };
1337 
1338 /// A recipe to represent inloop reduction operations, performing a reduction on
1339 /// a vector operand into a scalar value, and adding the result to a chain.
1340 /// The Operands are {ChainOp, VecOp, [Condition]}.
1341 class VPReductionRecipe : public VPRecipeBase, public VPValue {
1342   /// The recurrence decriptor for the reduction in question.
1343   RecurrenceDescriptor *RdxDesc;
1344   /// Pointer to the TTI, needed to create the target reduction
1345   const TargetTransformInfo *TTI;
1346 
1347 public:
1348   VPReductionRecipe(RecurrenceDescriptor *R, Instruction *I, VPValue *ChainOp,
1349                     VPValue *VecOp, VPValue *CondOp,
1350                     const TargetTransformInfo *TTI)
1351       : VPRecipeBase(VPRecipeBase::VPReductionSC, {ChainOp, VecOp}),
1352         VPValue(VPValue::VPVReductionSC, I, this), RdxDesc(R), TTI(TTI) {
1353     if (CondOp)
1354       addOperand(CondOp);
1355   }
1356 
1357   ~VPReductionRecipe() override = default;
1358 
1359   /// Method to support type inquiry through isa, cast, and dyn_cast.
1360   static inline bool classof(const VPValue *V) {
1361     return V->getVPValueID() == VPValue::VPVReductionSC;
1362   }
1363 
1364   /// Generate the reduction in the loop
1365   void execute(VPTransformState &State) override;
1366 
1367 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1368   /// Print the recipe.
1369   void print(raw_ostream &O, const Twine &Indent,
1370              VPSlotTracker &SlotTracker) const override;
1371 #endif
1372 
1373   /// The VPValue of the scalar Chain being accumulated.
1374   VPValue *getChainOp() const { return getOperand(0); }
1375   /// The VPValue of the vector value to be reduced.
1376   VPValue *getVecOp() const { return getOperand(1); }
1377   /// The VPValue of the condition for the block.
1378   VPValue *getCondOp() const {
1379     return getNumOperands() > 2 ? getOperand(2) : nullptr;
1380   }
1381 };
1382 
1383 /// VPReplicateRecipe replicates a given instruction producing multiple scalar
1384 /// copies of the original scalar type, one per lane, instead of producing a
1385 /// single copy of widened type for all lanes. If the instruction is known to be
1386 /// uniform only one copy, per lane zero, will be generated.
1387 class VPReplicateRecipe : public VPRecipeBase, public VPValue {
1388   /// Indicator if only a single replica per lane is needed.
1389   bool IsUniform;
1390 
1391   /// Indicator if the replicas are also predicated.
1392   bool IsPredicated;
1393 
1394   /// Indicator if the scalar values should also be packed into a vector.
1395   bool AlsoPack;
1396 
1397 public:
1398   template <typename IterT>
1399   VPReplicateRecipe(Instruction *I, iterator_range<IterT> Operands,
1400                     bool IsUniform, bool IsPredicated = false)
1401       : VPRecipeBase(VPReplicateSC, Operands), VPValue(VPVReplicateSC, I, this),
1402         IsUniform(IsUniform), IsPredicated(IsPredicated) {
1403     // Retain the previous behavior of predicateInstructions(), where an
1404     // insert-element of a predicated instruction got hoisted into the
1405     // predicated basic block iff it was its only user. This is achieved by
1406     // having predicated instructions also pack their values into a vector by
1407     // default unless they have a replicated user which uses their scalar value.
1408     AlsoPack = IsPredicated && !I->use_empty();
1409   }
1410 
1411   ~VPReplicateRecipe() override = default;
1412 
1413   /// Method to support type inquiry through isa, cast, and dyn_cast.
1414   static inline bool classof(const VPDef *D) {
1415     return D->getVPDefID() == VPRecipeBase::VPReplicateSC;
1416   }
1417 
1418   static inline bool classof(const VPValue *V) {
1419     return V->getVPValueID() == VPValue::VPVReplicateSC;
1420   }
1421 
1422   /// Generate replicas of the desired Ingredient. Replicas will be generated
1423   /// for all parts and lanes unless a specific part and lane are specified in
1424   /// the \p State.
1425   void execute(VPTransformState &State) override;
1426 
1427   void setAlsoPack(bool Pack) { AlsoPack = Pack; }
1428 
1429 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1430   /// Print the recipe.
1431   void print(raw_ostream &O, const Twine &Indent,
1432              VPSlotTracker &SlotTracker) const override;
1433 #endif
1434 
1435   bool isUniform() const { return IsUniform; }
1436 
1437   bool isPacked() const { return AlsoPack; }
1438 
1439   bool isPredicated() const { return IsPredicated; }
1440 };
1441 
1442 /// A recipe for generating conditional branches on the bits of a mask.
1443 class VPBranchOnMaskRecipe : public VPRecipeBase {
1444 public:
1445   VPBranchOnMaskRecipe(VPValue *BlockInMask)
1446       : VPRecipeBase(VPBranchOnMaskSC, {}) {
1447     if (BlockInMask) // nullptr means all-one mask.
1448       addOperand(BlockInMask);
1449   }
1450 
1451   /// Method to support type inquiry through isa, cast, and dyn_cast.
1452   static inline bool classof(const VPDef *D) {
1453     return D->getVPDefID() == VPRecipeBase::VPBranchOnMaskSC;
1454   }
1455 
1456   /// Generate the extraction of the appropriate bit from the block mask and the
1457   /// conditional branch.
1458   void execute(VPTransformState &State) override;
1459 
1460 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1461   /// Print the recipe.
1462   void print(raw_ostream &O, const Twine &Indent,
1463              VPSlotTracker &SlotTracker) const override {
1464     O << Indent << "BRANCH-ON-MASK ";
1465     if (VPValue *Mask = getMask())
1466       Mask->printAsOperand(O, SlotTracker);
1467     else
1468       O << " All-One";
1469   }
1470 #endif
1471 
1472   /// Return the mask used by this recipe. Note that a full mask is represented
1473   /// by a nullptr.
1474   VPValue *getMask() const {
1475     assert(getNumOperands() <= 1 && "should have either 0 or 1 operands");
1476     // Mask is optional.
1477     return getNumOperands() == 1 ? getOperand(0) : nullptr;
1478   }
1479 };
1480 
1481 /// VPPredInstPHIRecipe is a recipe for generating the phi nodes needed when
1482 /// control converges back from a Branch-on-Mask. The phi nodes are needed in
1483 /// order to merge values that are set under such a branch and feed their uses.
1484 /// The phi nodes can be scalar or vector depending on the users of the value.
1485 /// This recipe works in concert with VPBranchOnMaskRecipe.
1486 class VPPredInstPHIRecipe : public VPRecipeBase, public VPValue {
1487 public:
1488   /// Construct a VPPredInstPHIRecipe given \p PredInst whose value needs a phi
1489   /// nodes after merging back from a Branch-on-Mask.
1490   VPPredInstPHIRecipe(VPValue *PredV)
1491       : VPRecipeBase(VPPredInstPHISC, PredV),
1492         VPValue(VPValue::VPVPredInstPHI, nullptr, this) {}
1493   ~VPPredInstPHIRecipe() override = default;
1494 
1495   /// Method to support type inquiry through isa, cast, and dyn_cast.
1496   static inline bool classof(const VPDef *D) {
1497     return D->getVPDefID() == VPRecipeBase::VPPredInstPHISC;
1498   }
1499 
1500   /// Generates phi nodes for live-outs as needed to retain SSA form.
1501   void execute(VPTransformState &State) override;
1502 
1503 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1504   /// Print the recipe.
1505   void print(raw_ostream &O, const Twine &Indent,
1506              VPSlotTracker &SlotTracker) const override;
1507 #endif
1508 };
1509 
1510 /// A Recipe for widening load/store operations.
1511 /// The recipe uses the following VPValues:
1512 /// - For load: Address, optional mask
1513 /// - For store: Address, stored value, optional mask
1514 /// TODO: We currently execute only per-part unless a specific instance is
1515 /// provided.
1516 class VPWidenMemoryInstructionRecipe : public VPRecipeBase, public VPValue {
1517   Instruction &Ingredient;
1518 
1519   // Whether the loaded-from / stored-to addresses are consecutive.
1520   bool Consecutive;
1521 
1522   // Whether the consecutive loaded/stored addresses are in reverse order.
1523   bool Reverse;
1524 
1525   void setMask(VPValue *Mask) {
1526     if (!Mask)
1527       return;
1528     addOperand(Mask);
1529   }
1530 
1531   bool isMasked() const {
1532     return isStore() ? getNumOperands() == 3 : getNumOperands() == 2;
1533   }
1534 
1535 public:
1536   VPWidenMemoryInstructionRecipe(LoadInst &Load, VPValue *Addr, VPValue *Mask,
1537                                  bool Consecutive, bool Reverse)
1538       : VPRecipeBase(VPWidenMemoryInstructionSC, {Addr}),
1539         VPValue(VPValue::VPVMemoryInstructionSC, &Load, this), Ingredient(Load),
1540         Consecutive(Consecutive), Reverse(Reverse) {
1541     assert((Consecutive || !Reverse) && "Reverse implies consecutive");
1542     setMask(Mask);
1543   }
1544 
1545   VPWidenMemoryInstructionRecipe(StoreInst &Store, VPValue *Addr,
1546                                  VPValue *StoredValue, VPValue *Mask,
1547                                  bool Consecutive, bool Reverse)
1548       : VPRecipeBase(VPWidenMemoryInstructionSC, {Addr, StoredValue}),
1549         VPValue(VPValue::VPVMemoryInstructionSC, &Store, this),
1550         Ingredient(Store), Consecutive(Consecutive), Reverse(Reverse) {
1551     assert((Consecutive || !Reverse) && "Reverse implies consecutive");
1552     setMask(Mask);
1553   }
1554 
1555   /// Method to support type inquiry through isa, cast, and dyn_cast.
1556   static inline bool classof(const VPDef *D) {
1557     return D->getVPDefID() == VPRecipeBase::VPWidenMemoryInstructionSC;
1558   }
1559 
1560   /// Return the address accessed by this recipe.
1561   VPValue *getAddr() const {
1562     return getOperand(0); // Address is the 1st, mandatory operand.
1563   }
1564 
1565   /// Return the mask used by this recipe. Note that a full mask is represented
1566   /// by a nullptr.
1567   VPValue *getMask() const {
1568     // Mask is optional and therefore the last operand.
1569     return isMasked() ? getOperand(getNumOperands() - 1) : nullptr;
1570   }
1571 
1572   /// Returns true if this recipe is a store.
1573   bool isStore() const { return isa<StoreInst>(Ingredient); }
1574 
1575   /// Return the address accessed by this recipe.
1576   VPValue *getStoredValue() const {
1577     assert(isStore() && "Stored value only available for store instructions");
1578     return getOperand(1); // Stored value is the 2nd, mandatory operand.
1579   }
1580 
1581   // Return whether the loaded-from / stored-to addresses are consecutive.
1582   bool isConsecutive() const { return Consecutive; }
1583 
1584   // Return whether the consecutive loaded/stored addresses are in reverse
1585   // order.
1586   bool isReverse() const { return Reverse; }
1587 
1588   /// Generate the wide load/store.
1589   void execute(VPTransformState &State) override;
1590 
1591 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1592   /// Print the recipe.
1593   void print(raw_ostream &O, const Twine &Indent,
1594              VPSlotTracker &SlotTracker) const override;
1595 #endif
1596 };
1597 
1598 /// A Recipe for widening the canonical induction variable of the vector loop.
1599 class VPWidenCanonicalIVRecipe : public VPRecipeBase, public VPValue {
1600 public:
1601   VPWidenCanonicalIVRecipe()
1602       : VPRecipeBase(VPWidenCanonicalIVSC, {}),
1603         VPValue(VPValue::VPVWidenCanonicalIVSC, nullptr, this) {}
1604 
1605   ~VPWidenCanonicalIVRecipe() override = default;
1606 
1607   /// Method to support type inquiry through isa, cast, and dyn_cast.
1608   static inline bool classof(const VPDef *D) {
1609     return D->getVPDefID() == VPRecipeBase::VPWidenCanonicalIVSC;
1610   }
1611 
1612   /// Generate a canonical vector induction variable of the vector loop, with
1613   /// start = {<Part*VF, Part*VF+1, ..., Part*VF+VF-1> for 0 <= Part < UF}, and
1614   /// step = <VF*UF, VF*UF, ..., VF*UF>.
1615   void execute(VPTransformState &State) override;
1616 
1617 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1618   /// Print the recipe.
1619   void print(raw_ostream &O, const Twine &Indent,
1620              VPSlotTracker &SlotTracker) const override;
1621 #endif
1622 };
1623 
1624 /// VPBasicBlock serves as the leaf of the Hierarchical Control-Flow Graph. It
1625 /// holds a sequence of zero or more VPRecipe's each representing a sequence of
1626 /// output IR instructions. All PHI-like recipes must come before any non-PHI recipes.
1627 class VPBasicBlock : public VPBlockBase {
1628 public:
1629   using RecipeListTy = iplist<VPRecipeBase>;
1630 
1631 private:
1632   /// The VPRecipes held in the order of output instructions to generate.
1633   RecipeListTy Recipes;
1634 
1635 public:
1636   VPBasicBlock(const Twine &Name = "", VPRecipeBase *Recipe = nullptr)
1637       : VPBlockBase(VPBasicBlockSC, Name.str()) {
1638     if (Recipe)
1639       appendRecipe(Recipe);
1640   }
1641 
1642   ~VPBasicBlock() override {
1643     while (!Recipes.empty())
1644       Recipes.pop_back();
1645   }
1646 
1647   /// Instruction iterators...
1648   using iterator = RecipeListTy::iterator;
1649   using const_iterator = RecipeListTy::const_iterator;
1650   using reverse_iterator = RecipeListTy::reverse_iterator;
1651   using const_reverse_iterator = RecipeListTy::const_reverse_iterator;
1652 
1653   //===--------------------------------------------------------------------===//
1654   /// Recipe iterator methods
1655   ///
1656   inline iterator begin() { return Recipes.begin(); }
1657   inline const_iterator begin() const { return Recipes.begin(); }
1658   inline iterator end() { return Recipes.end(); }
1659   inline const_iterator end() const { return Recipes.end(); }
1660 
1661   inline reverse_iterator rbegin() { return Recipes.rbegin(); }
1662   inline const_reverse_iterator rbegin() const { return Recipes.rbegin(); }
1663   inline reverse_iterator rend() { return Recipes.rend(); }
1664   inline const_reverse_iterator rend() const { return Recipes.rend(); }
1665 
1666   inline size_t size() const { return Recipes.size(); }
1667   inline bool empty() const { return Recipes.empty(); }
1668   inline const VPRecipeBase &front() const { return Recipes.front(); }
1669   inline VPRecipeBase &front() { return Recipes.front(); }
1670   inline const VPRecipeBase &back() const { return Recipes.back(); }
1671   inline VPRecipeBase &back() { return Recipes.back(); }
1672 
1673   /// Returns a reference to the list of recipes.
1674   RecipeListTy &getRecipeList() { return Recipes; }
1675 
1676   /// Returns a pointer to a member of the recipe list.
1677   static RecipeListTy VPBasicBlock::*getSublistAccess(VPRecipeBase *) {
1678     return &VPBasicBlock::Recipes;
1679   }
1680 
1681   /// Method to support type inquiry through isa, cast, and dyn_cast.
1682   static inline bool classof(const VPBlockBase *V) {
1683     return V->getVPBlockID() == VPBlockBase::VPBasicBlockSC;
1684   }
1685 
1686   void insert(VPRecipeBase *Recipe, iterator InsertPt) {
1687     assert(Recipe && "No recipe to append.");
1688     assert(!Recipe->Parent && "Recipe already in VPlan");
1689     Recipe->Parent = this;
1690     Recipes.insert(InsertPt, Recipe);
1691   }
1692 
1693   /// Augment the existing recipes of a VPBasicBlock with an additional
1694   /// \p Recipe as the last recipe.
1695   void appendRecipe(VPRecipeBase *Recipe) { insert(Recipe, end()); }
1696 
1697   /// The method which generates the output IR instructions that correspond to
1698   /// this VPBasicBlock, thereby "executing" the VPlan.
1699   void execute(struct VPTransformState *State) override;
1700 
1701   /// Return the position of the first non-phi node recipe in the block.
1702   iterator getFirstNonPhi();
1703 
1704   /// Returns an iterator range over the PHI-like recipes in the block.
1705   iterator_range<iterator> phis() {
1706     return make_range(begin(), getFirstNonPhi());
1707   }
1708 
1709   void dropAllReferences(VPValue *NewValue) override;
1710 
1711   /// Split current block at \p SplitAt by inserting a new block between the
1712   /// current block and its successors and moving all recipes starting at
1713   /// SplitAt to the new block. Returns the new block.
1714   VPBasicBlock *splitAt(iterator SplitAt);
1715 
1716 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1717   /// Print this VPBsicBlock to \p O, prefixing all lines with \p Indent. \p
1718   /// SlotTracker is used to print unnamed VPValue's using consequtive numbers.
1719   ///
1720   /// Note that the numbering is applied to the whole VPlan, so printing
1721   /// individual blocks is consistent with the whole VPlan printing.
1722   void print(raw_ostream &O, const Twine &Indent,
1723              VPSlotTracker &SlotTracker) const override;
1724   using VPBlockBase::print; // Get the print(raw_stream &O) version.
1725 #endif
1726 
1727 private:
1728   /// Create an IR BasicBlock to hold the output instructions generated by this
1729   /// VPBasicBlock, and return it. Update the CFGState accordingly.
1730   BasicBlock *createEmptyBasicBlock(VPTransformState::CFGState &CFG);
1731 };
1732 
1733 /// VPRegionBlock represents a collection of VPBasicBlocks and VPRegionBlocks
1734 /// which form a Single-Entry-Single-Exit subgraph of the output IR CFG.
1735 /// A VPRegionBlock may indicate that its contents are to be replicated several
1736 /// times. This is designed to support predicated scalarization, in which a
1737 /// scalar if-then code structure needs to be generated VF * UF times. Having
1738 /// this replication indicator helps to keep a single model for multiple
1739 /// candidate VF's. The actual replication takes place only once the desired VF
1740 /// and UF have been determined.
1741 class VPRegionBlock : public VPBlockBase {
1742   /// Hold the Single Entry of the SESE region modelled by the VPRegionBlock.
1743   VPBlockBase *Entry;
1744 
1745   /// Hold the Single Exit of the SESE region modelled by the VPRegionBlock.
1746   VPBlockBase *Exit;
1747 
1748   /// An indicator whether this region is to generate multiple replicated
1749   /// instances of output IR corresponding to its VPBlockBases.
1750   bool IsReplicator;
1751 
1752 public:
1753   VPRegionBlock(VPBlockBase *Entry, VPBlockBase *Exit,
1754                 const std::string &Name = "", bool IsReplicator = false)
1755       : VPBlockBase(VPRegionBlockSC, Name), Entry(Entry), Exit(Exit),
1756         IsReplicator(IsReplicator) {
1757     assert(Entry->getPredecessors().empty() && "Entry block has predecessors.");
1758     assert(Exit->getSuccessors().empty() && "Exit block has successors.");
1759     Entry->setParent(this);
1760     Exit->setParent(this);
1761   }
1762   VPRegionBlock(const std::string &Name = "", bool IsReplicator = false)
1763       : VPBlockBase(VPRegionBlockSC, Name), Entry(nullptr), Exit(nullptr),
1764         IsReplicator(IsReplicator) {}
1765 
1766   ~VPRegionBlock() override {
1767     if (Entry) {
1768       VPValue DummyValue;
1769       Entry->dropAllReferences(&DummyValue);
1770       deleteCFG(Entry);
1771     }
1772   }
1773 
1774   /// Method to support type inquiry through isa, cast, and dyn_cast.
1775   static inline bool classof(const VPBlockBase *V) {
1776     return V->getVPBlockID() == VPBlockBase::VPRegionBlockSC;
1777   }
1778 
1779   const VPBlockBase *getEntry() const { return Entry; }
1780   VPBlockBase *getEntry() { return Entry; }
1781 
1782   /// Set \p EntryBlock as the entry VPBlockBase of this VPRegionBlock. \p
1783   /// EntryBlock must have no predecessors.
1784   void setEntry(VPBlockBase *EntryBlock) {
1785     assert(EntryBlock->getPredecessors().empty() &&
1786            "Entry block cannot have predecessors.");
1787     Entry = EntryBlock;
1788     EntryBlock->setParent(this);
1789   }
1790 
1791   // FIXME: DominatorTreeBase is doing 'A->getParent()->front()'. 'front' is a
1792   // specific interface of llvm::Function, instead of using
1793   // GraphTraints::getEntryNode. We should add a new template parameter to
1794   // DominatorTreeBase representing the Graph type.
1795   VPBlockBase &front() const { return *Entry; }
1796 
1797   const VPBlockBase *getExit() const { return Exit; }
1798   VPBlockBase *getExit() { return Exit; }
1799 
1800   /// Set \p ExitBlock as the exit VPBlockBase of this VPRegionBlock. \p
1801   /// ExitBlock must have no successors.
1802   void setExit(VPBlockBase *ExitBlock) {
1803     assert(ExitBlock->getSuccessors().empty() &&
1804            "Exit block cannot have successors.");
1805     Exit = ExitBlock;
1806     ExitBlock->setParent(this);
1807   }
1808 
1809   /// An indicator whether this region is to generate multiple replicated
1810   /// instances of output IR corresponding to its VPBlockBases.
1811   bool isReplicator() const { return IsReplicator; }
1812 
1813   /// The method which generates the output IR instructions that correspond to
1814   /// this VPRegionBlock, thereby "executing" the VPlan.
1815   void execute(struct VPTransformState *State) override;
1816 
1817   void dropAllReferences(VPValue *NewValue) override;
1818 
1819 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1820   /// Print this VPRegionBlock to \p O (recursively), prefixing all lines with
1821   /// \p Indent. \p SlotTracker is used to print unnamed VPValue's using
1822   /// consequtive numbers.
1823   ///
1824   /// Note that the numbering is applied to the whole VPlan, so printing
1825   /// individual regions is consistent with the whole VPlan printing.
1826   void print(raw_ostream &O, const Twine &Indent,
1827              VPSlotTracker &SlotTracker) const override;
1828   using VPBlockBase::print; // Get the print(raw_stream &O) version.
1829 #endif
1830 };
1831 
1832 //===----------------------------------------------------------------------===//
1833 // GraphTraits specializations for VPlan Hierarchical Control-Flow Graphs     //
1834 //===----------------------------------------------------------------------===//
1835 
1836 // The following set of template specializations implement GraphTraits to treat
1837 // any VPBlockBase as a node in a graph of VPBlockBases. It's important to note
1838 // that VPBlockBase traits don't recurse into VPRegioBlocks, i.e., if the
1839 // VPBlockBase is a VPRegionBlock, this specialization provides access to its
1840 // successors/predecessors but not to the blocks inside the region.
1841 
1842 template <> struct GraphTraits<VPBlockBase *> {
1843   using NodeRef = VPBlockBase *;
1844   using ChildIteratorType = SmallVectorImpl<VPBlockBase *>::iterator;
1845 
1846   static NodeRef getEntryNode(NodeRef N) { return N; }
1847 
1848   static inline ChildIteratorType child_begin(NodeRef N) {
1849     return N->getSuccessors().begin();
1850   }
1851 
1852   static inline ChildIteratorType child_end(NodeRef N) {
1853     return N->getSuccessors().end();
1854   }
1855 };
1856 
1857 template <> struct GraphTraits<const VPBlockBase *> {
1858   using NodeRef = const VPBlockBase *;
1859   using ChildIteratorType = SmallVectorImpl<VPBlockBase *>::const_iterator;
1860 
1861   static NodeRef getEntryNode(NodeRef N) { return N; }
1862 
1863   static inline ChildIteratorType child_begin(NodeRef N) {
1864     return N->getSuccessors().begin();
1865   }
1866 
1867   static inline ChildIteratorType child_end(NodeRef N) {
1868     return N->getSuccessors().end();
1869   }
1870 };
1871 
1872 // Inverse order specialization for VPBasicBlocks. Predecessors are used instead
1873 // of successors for the inverse traversal.
1874 template <> struct GraphTraits<Inverse<VPBlockBase *>> {
1875   using NodeRef = VPBlockBase *;
1876   using ChildIteratorType = SmallVectorImpl<VPBlockBase *>::iterator;
1877 
1878   static NodeRef getEntryNode(Inverse<NodeRef> B) { return B.Graph; }
1879 
1880   static inline ChildIteratorType child_begin(NodeRef N) {
1881     return N->getPredecessors().begin();
1882   }
1883 
1884   static inline ChildIteratorType child_end(NodeRef N) {
1885     return N->getPredecessors().end();
1886   }
1887 };
1888 
1889 // The following set of template specializations implement GraphTraits to
1890 // treat VPRegionBlock as a graph and recurse inside its nodes. It's important
1891 // to note that the blocks inside the VPRegionBlock are treated as VPBlockBases
1892 // (i.e., no dyn_cast is performed, VPBlockBases specialization is used), so
1893 // there won't be automatic recursion into other VPBlockBases that turn to be
1894 // VPRegionBlocks.
1895 
1896 template <>
1897 struct GraphTraits<VPRegionBlock *> : public GraphTraits<VPBlockBase *> {
1898   using GraphRef = VPRegionBlock *;
1899   using nodes_iterator = df_iterator<NodeRef>;
1900 
1901   static NodeRef getEntryNode(GraphRef N) { return N->getEntry(); }
1902 
1903   static nodes_iterator nodes_begin(GraphRef N) {
1904     return nodes_iterator::begin(N->getEntry());
1905   }
1906 
1907   static nodes_iterator nodes_end(GraphRef N) {
1908     // df_iterator::end() returns an empty iterator so the node used doesn't
1909     // matter.
1910     return nodes_iterator::end(N);
1911   }
1912 };
1913 
1914 template <>
1915 struct GraphTraits<const VPRegionBlock *>
1916     : public GraphTraits<const VPBlockBase *> {
1917   using GraphRef = const VPRegionBlock *;
1918   using nodes_iterator = df_iterator<NodeRef>;
1919 
1920   static NodeRef getEntryNode(GraphRef N) { return N->getEntry(); }
1921 
1922   static nodes_iterator nodes_begin(GraphRef N) {
1923     return nodes_iterator::begin(N->getEntry());
1924   }
1925 
1926   static nodes_iterator nodes_end(GraphRef N) {
1927     // df_iterator::end() returns an empty iterator so the node used doesn't
1928     // matter.
1929     return nodes_iterator::end(N);
1930   }
1931 };
1932 
1933 template <>
1934 struct GraphTraits<Inverse<VPRegionBlock *>>
1935     : public GraphTraits<Inverse<VPBlockBase *>> {
1936   using GraphRef = VPRegionBlock *;
1937   using nodes_iterator = df_iterator<NodeRef>;
1938 
1939   static NodeRef getEntryNode(Inverse<GraphRef> N) {
1940     return N.Graph->getExit();
1941   }
1942 
1943   static nodes_iterator nodes_begin(GraphRef N) {
1944     return nodes_iterator::begin(N->getExit());
1945   }
1946 
1947   static nodes_iterator nodes_end(GraphRef N) {
1948     // df_iterator::end() returns an empty iterator so the node used doesn't
1949     // matter.
1950     return nodes_iterator::end(N);
1951   }
1952 };
1953 
1954 /// Iterator to traverse all successors of a VPBlockBase node. This includes the
1955 /// entry node of VPRegionBlocks. Exit blocks of a region implicitly have their
1956 /// parent region's successors. This ensures all blocks in a region are visited
1957 /// before any blocks in a successor region when doing a reverse post-order
1958 // traversal of the graph.
1959 template <typename BlockPtrTy>
1960 class VPAllSuccessorsIterator
1961     : public iterator_facade_base<VPAllSuccessorsIterator<BlockPtrTy>,
1962                                   std::forward_iterator_tag, VPBlockBase> {
1963   BlockPtrTy Block;
1964   /// Index of the current successor. For VPBasicBlock nodes, this simply is the
1965   /// index for the successor array. For VPRegionBlock, SuccessorIdx == 0 is
1966   /// used for the region's entry block, and SuccessorIdx - 1 are the indices
1967   /// for the successor array.
1968   size_t SuccessorIdx;
1969 
1970   static BlockPtrTy getBlockWithSuccs(BlockPtrTy Current) {
1971     while (Current && Current->getNumSuccessors() == 0)
1972       Current = Current->getParent();
1973     return Current;
1974   }
1975 
1976   /// Templated helper to dereference successor \p SuccIdx of \p Block. Used by
1977   /// both the const and non-const operator* implementations.
1978   template <typename T1> static T1 deref(T1 Block, unsigned SuccIdx) {
1979     if (auto *R = dyn_cast<VPRegionBlock>(Block)) {
1980       if (SuccIdx == 0)
1981         return R->getEntry();
1982       SuccIdx--;
1983     }
1984 
1985     // For exit blocks, use the next parent region with successors.
1986     return getBlockWithSuccs(Block)->getSuccessors()[SuccIdx];
1987   }
1988 
1989 public:
1990   VPAllSuccessorsIterator(BlockPtrTy Block, size_t Idx = 0)
1991       : Block(Block), SuccessorIdx(Idx) {}
1992   VPAllSuccessorsIterator(const VPAllSuccessorsIterator &Other)
1993       : Block(Other.Block), SuccessorIdx(Other.SuccessorIdx) {}
1994 
1995   VPAllSuccessorsIterator &operator=(const VPAllSuccessorsIterator &R) {
1996     Block = R.Block;
1997     SuccessorIdx = R.SuccessorIdx;
1998     return *this;
1999   }
2000 
2001   static VPAllSuccessorsIterator end(BlockPtrTy Block) {
2002     BlockPtrTy ParentWithSuccs = getBlockWithSuccs(Block);
2003     unsigned NumSuccessors = ParentWithSuccs
2004                                  ? ParentWithSuccs->getNumSuccessors()
2005                                  : Block->getNumSuccessors();
2006 
2007     if (auto *R = dyn_cast<VPRegionBlock>(Block))
2008       return {R, NumSuccessors + 1};
2009     return {Block, NumSuccessors};
2010   }
2011 
2012   bool operator==(const VPAllSuccessorsIterator &R) const {
2013     return Block == R.Block && SuccessorIdx == R.SuccessorIdx;
2014   }
2015 
2016   const VPBlockBase *operator*() const { return deref(Block, SuccessorIdx); }
2017 
2018   BlockPtrTy operator*() { return deref(Block, SuccessorIdx); }
2019 
2020   VPAllSuccessorsIterator &operator++() {
2021     SuccessorIdx++;
2022     return *this;
2023   }
2024 
2025   VPAllSuccessorsIterator operator++(int X) {
2026     VPAllSuccessorsIterator Orig = *this;
2027     SuccessorIdx++;
2028     return Orig;
2029   }
2030 };
2031 
2032 /// Helper for GraphTraits specialization that traverses through VPRegionBlocks.
2033 template <typename BlockTy> class VPBlockRecursiveTraversalWrapper {
2034   BlockTy Entry;
2035 
2036 public:
2037   VPBlockRecursiveTraversalWrapper(BlockTy Entry) : Entry(Entry) {}
2038   BlockTy getEntry() { return Entry; }
2039 };
2040 
2041 /// GraphTraits specialization to recursively traverse VPBlockBase nodes,
2042 /// including traversing through VPRegionBlocks.  Exit blocks of a region
2043 /// implicitly have their parent region's successors. This ensures all blocks in
2044 /// a region are visited before any blocks in a successor region when doing a
2045 /// reverse post-order traversal of the graph.
2046 template <>
2047 struct GraphTraits<VPBlockRecursiveTraversalWrapper<VPBlockBase *>> {
2048   using NodeRef = VPBlockBase *;
2049   using ChildIteratorType = VPAllSuccessorsIterator<VPBlockBase *>;
2050 
2051   static NodeRef
2052   getEntryNode(VPBlockRecursiveTraversalWrapper<VPBlockBase *> N) {
2053     return N.getEntry();
2054   }
2055 
2056   static inline ChildIteratorType child_begin(NodeRef N) {
2057     return ChildIteratorType(N);
2058   }
2059 
2060   static inline ChildIteratorType child_end(NodeRef N) {
2061     return ChildIteratorType::end(N);
2062   }
2063 };
2064 
2065 template <>
2066 struct GraphTraits<VPBlockRecursiveTraversalWrapper<const VPBlockBase *>> {
2067   using NodeRef = const VPBlockBase *;
2068   using ChildIteratorType = VPAllSuccessorsIterator<const VPBlockBase *>;
2069 
2070   static NodeRef
2071   getEntryNode(VPBlockRecursiveTraversalWrapper<const VPBlockBase *> N) {
2072     return N.getEntry();
2073   }
2074 
2075   static inline ChildIteratorType child_begin(NodeRef N) {
2076     return ChildIteratorType(N);
2077   }
2078 
2079   static inline ChildIteratorType child_end(NodeRef N) {
2080     return ChildIteratorType::end(N);
2081   }
2082 };
2083 
2084 /// VPlan models a candidate for vectorization, encoding various decisions take
2085 /// to produce efficient output IR, including which branches, basic-blocks and
2086 /// output IR instructions to generate, and their cost. VPlan holds a
2087 /// Hierarchical-CFG of VPBasicBlocks and VPRegionBlocks rooted at an Entry
2088 /// VPBlock.
2089 class VPlan {
2090   friend class VPlanPrinter;
2091   friend class VPSlotTracker;
2092 
2093   /// Hold the single entry to the Hierarchical CFG of the VPlan.
2094   VPBlockBase *Entry;
2095 
2096   /// Holds the VFs applicable to this VPlan.
2097   SmallSetVector<ElementCount, 2> VFs;
2098 
2099   /// Holds the name of the VPlan, for printing.
2100   std::string Name;
2101 
2102   /// Holds all the external definitions created for this VPlan.
2103   // TODO: Introduce a specific representation for external definitions in
2104   // VPlan. External definitions must be immutable and hold a pointer to its
2105   // underlying IR that will be used to implement its structural comparison
2106   // (operators '==' and '<').
2107   SetVector<VPValue *> VPExternalDefs;
2108 
2109   /// Represents the backedge taken count of the original loop, for folding
2110   /// the tail.
2111   VPValue *BackedgeTakenCount = nullptr;
2112 
2113   /// Holds a mapping between Values and their corresponding VPValue inside
2114   /// VPlan.
2115   Value2VPValueTy Value2VPValue;
2116 
2117   /// Contains all VPValues that been allocated by addVPValue directly and need
2118   /// to be free when the plan's destructor is called.
2119   SmallVector<VPValue *, 16> VPValuesToFree;
2120 
2121   /// Holds the VPLoopInfo analysis for this VPlan.
2122   VPLoopInfo VPLInfo;
2123 
2124   /// Indicates whether it is safe use the Value2VPValue mapping or if the
2125   /// mapping cannot be used any longer, because it is stale.
2126   bool Value2VPValueEnabled = true;
2127 
2128 public:
2129   VPlan(VPBlockBase *Entry = nullptr) : Entry(Entry) {
2130     if (Entry)
2131       Entry->setPlan(this);
2132   }
2133 
2134   ~VPlan() {
2135     if (Entry) {
2136       VPValue DummyValue;
2137       for (VPBlockBase *Block : depth_first(Entry))
2138         Block->dropAllReferences(&DummyValue);
2139 
2140       VPBlockBase::deleteCFG(Entry);
2141     }
2142     for (VPValue *VPV : VPValuesToFree)
2143       delete VPV;
2144     if (BackedgeTakenCount)
2145       delete BackedgeTakenCount;
2146     for (VPValue *Def : VPExternalDefs)
2147       delete Def;
2148   }
2149 
2150   /// Generate the IR code for this VPlan.
2151   void execute(struct VPTransformState *State);
2152 
2153   VPBlockBase *getEntry() { return Entry; }
2154   const VPBlockBase *getEntry() const { return Entry; }
2155 
2156   VPBlockBase *setEntry(VPBlockBase *Block) {
2157     Entry = Block;
2158     Block->setPlan(this);
2159     return Entry;
2160   }
2161 
2162   /// The backedge taken count of the original loop.
2163   VPValue *getOrCreateBackedgeTakenCount() {
2164     if (!BackedgeTakenCount)
2165       BackedgeTakenCount = new VPValue();
2166     return BackedgeTakenCount;
2167   }
2168 
2169   /// Mark the plan to indicate that using Value2VPValue is not safe any
2170   /// longer, because it may be stale.
2171   void disableValue2VPValue() { Value2VPValueEnabled = false; }
2172 
2173   void addVF(ElementCount VF) { VFs.insert(VF); }
2174 
2175   bool hasVF(ElementCount VF) { return VFs.count(VF); }
2176 
2177   const std::string &getName() const { return Name; }
2178 
2179   void setName(const Twine &newName) { Name = newName.str(); }
2180 
2181   /// Add \p VPVal to the pool of external definitions if it's not already
2182   /// in the pool.
2183   void addExternalDef(VPValue *VPVal) { VPExternalDefs.insert(VPVal); }
2184 
2185   void addVPValue(Value *V) {
2186     assert(Value2VPValueEnabled &&
2187            "IR value to VPValue mapping may be out of date!");
2188     assert(V && "Trying to add a null Value to VPlan");
2189     assert(!Value2VPValue.count(V) && "Value already exists in VPlan");
2190     VPValue *VPV = new VPValue(V);
2191     Value2VPValue[V] = VPV;
2192     VPValuesToFree.push_back(VPV);
2193   }
2194 
2195   void addVPValue(Value *V, VPValue *VPV) {
2196     assert(Value2VPValueEnabled && "Value2VPValue mapping may be out of date!");
2197     assert(V && "Trying to add a null Value to VPlan");
2198     assert(!Value2VPValue.count(V) && "Value already exists in VPlan");
2199     Value2VPValue[V] = VPV;
2200   }
2201 
2202   /// Returns the VPValue for \p V. \p OverrideAllowed can be used to disable
2203   /// checking whether it is safe to query VPValues using IR Values.
2204   VPValue *getVPValue(Value *V, bool OverrideAllowed = false) {
2205     assert((OverrideAllowed || isa<Constant>(V) || Value2VPValueEnabled) &&
2206            "Value2VPValue mapping may be out of date!");
2207     assert(V && "Trying to get the VPValue of a null Value");
2208     assert(Value2VPValue.count(V) && "Value does not exist in VPlan");
2209     return Value2VPValue[V];
2210   }
2211 
2212   /// Gets the VPValue or adds a new one (if none exists yet) for \p V. \p
2213   /// OverrideAllowed can be used to disable checking whether it is safe to
2214   /// query VPValues using IR Values.
2215   VPValue *getOrAddVPValue(Value *V, bool OverrideAllowed = false) {
2216     assert((OverrideAllowed || isa<Constant>(V) || Value2VPValueEnabled) &&
2217            "Value2VPValue mapping may be out of date!");
2218     assert(V && "Trying to get or add the VPValue of a null Value");
2219     if (!Value2VPValue.count(V))
2220       addVPValue(V);
2221     return getVPValue(V);
2222   }
2223 
2224   void removeVPValueFor(Value *V) {
2225     assert(Value2VPValueEnabled &&
2226            "IR value to VPValue mapping may be out of date!");
2227     Value2VPValue.erase(V);
2228   }
2229 
2230   /// Return the VPLoopInfo analysis for this VPlan.
2231   VPLoopInfo &getVPLoopInfo() { return VPLInfo; }
2232   const VPLoopInfo &getVPLoopInfo() const { return VPLInfo; }
2233 
2234 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2235   /// Print this VPlan to \p O.
2236   void print(raw_ostream &O) const;
2237 
2238   /// Print this VPlan in DOT format to \p O.
2239   void printDOT(raw_ostream &O) const;
2240 
2241   /// Dump the plan to stderr (for debugging).
2242   LLVM_DUMP_METHOD void dump() const;
2243 #endif
2244 
2245   /// Returns a range mapping the values the range \p Operands to their
2246   /// corresponding VPValues.
2247   iterator_range<mapped_iterator<Use *, std::function<VPValue *(Value *)>>>
2248   mapToVPValues(User::op_range Operands) {
2249     std::function<VPValue *(Value *)> Fn = [this](Value *Op) {
2250       return getOrAddVPValue(Op);
2251     };
2252     return map_range(Operands, Fn);
2253   }
2254 
2255 private:
2256   /// Add to the given dominator tree the header block and every new basic block
2257   /// that was created between it and the latch block, inclusive.
2258   static void updateDominatorTree(DominatorTree *DT, BasicBlock *LoopLatchBB,
2259                                   BasicBlock *LoopPreHeaderBB,
2260                                   BasicBlock *LoopExitBB);
2261 };
2262 
2263 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2264 /// VPlanPrinter prints a given VPlan to a given output stream. The printing is
2265 /// indented and follows the dot format.
2266 class VPlanPrinter {
2267   raw_ostream &OS;
2268   const VPlan &Plan;
2269   unsigned Depth = 0;
2270   unsigned TabWidth = 2;
2271   std::string Indent;
2272   unsigned BID = 0;
2273   SmallDenseMap<const VPBlockBase *, unsigned> BlockID;
2274 
2275   VPSlotTracker SlotTracker;
2276 
2277   /// Handle indentation.
2278   void bumpIndent(int b) { Indent = std::string((Depth += b) * TabWidth, ' '); }
2279 
2280   /// Print a given \p Block of the Plan.
2281   void dumpBlock(const VPBlockBase *Block);
2282 
2283   /// Print the information related to the CFG edges going out of a given
2284   /// \p Block, followed by printing the successor blocks themselves.
2285   void dumpEdges(const VPBlockBase *Block);
2286 
2287   /// Print a given \p BasicBlock, including its VPRecipes, followed by printing
2288   /// its successor blocks.
2289   void dumpBasicBlock(const VPBasicBlock *BasicBlock);
2290 
2291   /// Print a given \p Region of the Plan.
2292   void dumpRegion(const VPRegionBlock *Region);
2293 
2294   unsigned getOrCreateBID(const VPBlockBase *Block) {
2295     return BlockID.count(Block) ? BlockID[Block] : BlockID[Block] = BID++;
2296   }
2297 
2298   Twine getOrCreateName(const VPBlockBase *Block);
2299 
2300   Twine getUID(const VPBlockBase *Block);
2301 
2302   /// Print the information related to a CFG edge between two VPBlockBases.
2303   void drawEdge(const VPBlockBase *From, const VPBlockBase *To, bool Hidden,
2304                 const Twine &Label);
2305 
2306 public:
2307   VPlanPrinter(raw_ostream &O, const VPlan &P)
2308       : OS(O), Plan(P), SlotTracker(&P) {}
2309 
2310   LLVM_DUMP_METHOD void dump();
2311 };
2312 
2313 struct VPlanIngredient {
2314   const Value *V;
2315 
2316   VPlanIngredient(const Value *V) : V(V) {}
2317 
2318   void print(raw_ostream &O) const;
2319 };
2320 
2321 inline raw_ostream &operator<<(raw_ostream &OS, const VPlanIngredient &I) {
2322   I.print(OS);
2323   return OS;
2324 }
2325 
2326 inline raw_ostream &operator<<(raw_ostream &OS, const VPlan &Plan) {
2327   Plan.print(OS);
2328   return OS;
2329 }
2330 #endif
2331 
2332 //===----------------------------------------------------------------------===//
2333 // VPlan Utilities
2334 //===----------------------------------------------------------------------===//
2335 
2336 /// Class that provides utilities for VPBlockBases in VPlan.
2337 class VPBlockUtils {
2338 public:
2339   VPBlockUtils() = delete;
2340 
2341   /// Insert disconnected VPBlockBase \p NewBlock after \p BlockPtr. Add \p
2342   /// NewBlock as successor of \p BlockPtr and \p BlockPtr as predecessor of \p
2343   /// NewBlock, and propagate \p BlockPtr parent to \p NewBlock. If \p BlockPtr
2344   /// has more than one successor, its conditional bit is propagated to \p
2345   /// NewBlock. \p NewBlock must have neither successors nor predecessors.
2346   static void insertBlockAfter(VPBlockBase *NewBlock, VPBlockBase *BlockPtr) {
2347     assert(NewBlock->getSuccessors().empty() &&
2348            "Can't insert new block with successors.");
2349     // TODO: move successors from BlockPtr to NewBlock when this functionality
2350     // is necessary. For now, setBlockSingleSuccessor will assert if BlockPtr
2351     // already has successors.
2352     BlockPtr->setOneSuccessor(NewBlock);
2353     NewBlock->setPredecessors({BlockPtr});
2354     NewBlock->setParent(BlockPtr->getParent());
2355   }
2356 
2357   /// Insert disconnected VPBlockBases \p IfTrue and \p IfFalse after \p
2358   /// BlockPtr. Add \p IfTrue and \p IfFalse as succesors of \p BlockPtr and \p
2359   /// BlockPtr as predecessor of \p IfTrue and \p IfFalse. Propagate \p BlockPtr
2360   /// parent to \p IfTrue and \p IfFalse. \p Condition is set as the successor
2361   /// selector. \p BlockPtr must have no successors and \p IfTrue and \p IfFalse
2362   /// must have neither successors nor predecessors.
2363   static void insertTwoBlocksAfter(VPBlockBase *IfTrue, VPBlockBase *IfFalse,
2364                                    VPValue *Condition, VPBlockBase *BlockPtr) {
2365     assert(IfTrue->getSuccessors().empty() &&
2366            "Can't insert IfTrue with successors.");
2367     assert(IfFalse->getSuccessors().empty() &&
2368            "Can't insert IfFalse with successors.");
2369     BlockPtr->setTwoSuccessors(IfTrue, IfFalse, Condition);
2370     IfTrue->setPredecessors({BlockPtr});
2371     IfFalse->setPredecessors({BlockPtr});
2372     IfTrue->setParent(BlockPtr->getParent());
2373     IfFalse->setParent(BlockPtr->getParent());
2374   }
2375 
2376   /// Connect VPBlockBases \p From and \p To bi-directionally. Append \p To to
2377   /// the successors of \p From and \p From to the predecessors of \p To. Both
2378   /// VPBlockBases must have the same parent, which can be null. Both
2379   /// VPBlockBases can be already connected to other VPBlockBases.
2380   static void connectBlocks(VPBlockBase *From, VPBlockBase *To) {
2381     assert((From->getParent() == To->getParent()) &&
2382            "Can't connect two block with different parents");
2383     assert(From->getNumSuccessors() < 2 &&
2384            "Blocks can't have more than two successors.");
2385     From->appendSuccessor(To);
2386     To->appendPredecessor(From);
2387   }
2388 
2389   /// Disconnect VPBlockBases \p From and \p To bi-directionally. Remove \p To
2390   /// from the successors of \p From and \p From from the predecessors of \p To.
2391   static void disconnectBlocks(VPBlockBase *From, VPBlockBase *To) {
2392     assert(To && "Successor to disconnect is null.");
2393     From->removeSuccessor(To);
2394     To->removePredecessor(From);
2395   }
2396 
2397   /// Returns true if the edge \p FromBlock -> \p ToBlock is a back-edge.
2398   static bool isBackEdge(const VPBlockBase *FromBlock,
2399                          const VPBlockBase *ToBlock, const VPLoopInfo *VPLI) {
2400     assert(FromBlock->getParent() == ToBlock->getParent() &&
2401            FromBlock->getParent() && "Must be in same region");
2402     const VPLoop *FromLoop = VPLI->getLoopFor(FromBlock);
2403     const VPLoop *ToLoop = VPLI->getLoopFor(ToBlock);
2404     if (!FromLoop || !ToLoop || FromLoop != ToLoop)
2405       return false;
2406 
2407     // A back-edge is a branch from the loop latch to its header.
2408     return ToLoop->isLoopLatch(FromBlock) && ToBlock == ToLoop->getHeader();
2409   }
2410 
2411   /// Returns true if \p Block is a loop latch
2412   static bool blockIsLoopLatch(const VPBlockBase *Block,
2413                                const VPLoopInfo *VPLInfo) {
2414     if (const VPLoop *ParentVPL = VPLInfo->getLoopFor(Block))
2415       return ParentVPL->isLoopLatch(Block);
2416 
2417     return false;
2418   }
2419 
2420   /// Count and return the number of succesors of \p PredBlock excluding any
2421   /// backedges.
2422   static unsigned countSuccessorsNoBE(VPBlockBase *PredBlock,
2423                                       VPLoopInfo *VPLI) {
2424     unsigned Count = 0;
2425     for (VPBlockBase *SuccBlock : PredBlock->getSuccessors()) {
2426       if (!VPBlockUtils::isBackEdge(PredBlock, SuccBlock, VPLI))
2427         Count++;
2428     }
2429     return Count;
2430   }
2431 
2432   /// Return an iterator range over \p Range which only includes \p BlockTy
2433   /// blocks. The accesses are casted to \p BlockTy.
2434   template <typename BlockTy, typename T>
2435   static auto blocksOnly(const T &Range) {
2436     // Create BaseTy with correct const-ness based on BlockTy.
2437     using BaseTy =
2438         typename std::conditional<std::is_const<BlockTy>::value,
2439                                   const VPBlockBase, VPBlockBase>::type;
2440 
2441     // We need to first create an iterator range over (const) BlocktTy & instead
2442     // of (const) BlockTy * for filter_range to work properly.
2443     auto Mapped =
2444         map_range(Range, [](BaseTy *Block) -> BaseTy & { return *Block; });
2445     auto Filter = make_filter_range(
2446         Mapped, [](BaseTy &Block) { return isa<BlockTy>(&Block); });
2447     return map_range(Filter, [](BaseTy &Block) -> BlockTy * {
2448       return cast<BlockTy>(&Block);
2449     });
2450   }
2451 };
2452 
2453 class VPInterleavedAccessInfo {
2454   DenseMap<VPInstruction *, InterleaveGroup<VPInstruction> *>
2455       InterleaveGroupMap;
2456 
2457   /// Type for mapping of instruction based interleave groups to VPInstruction
2458   /// interleave groups
2459   using Old2NewTy = DenseMap<InterleaveGroup<Instruction> *,
2460                              InterleaveGroup<VPInstruction> *>;
2461 
2462   /// Recursively \p Region and populate VPlan based interleave groups based on
2463   /// \p IAI.
2464   void visitRegion(VPRegionBlock *Region, Old2NewTy &Old2New,
2465                    InterleavedAccessInfo &IAI);
2466   /// Recursively traverse \p Block and populate VPlan based interleave groups
2467   /// based on \p IAI.
2468   void visitBlock(VPBlockBase *Block, Old2NewTy &Old2New,
2469                   InterleavedAccessInfo &IAI);
2470 
2471 public:
2472   VPInterleavedAccessInfo(VPlan &Plan, InterleavedAccessInfo &IAI);
2473 
2474   ~VPInterleavedAccessInfo() {
2475     SmallPtrSet<InterleaveGroup<VPInstruction> *, 4> DelSet;
2476     // Avoid releasing a pointer twice.
2477     for (auto &I : InterleaveGroupMap)
2478       DelSet.insert(I.second);
2479     for (auto *Ptr : DelSet)
2480       delete Ptr;
2481   }
2482 
2483   /// Get the interleave group that \p Instr belongs to.
2484   ///
2485   /// \returns nullptr if doesn't have such group.
2486   InterleaveGroup<VPInstruction> *
2487   getInterleaveGroup(VPInstruction *Instr) const {
2488     return InterleaveGroupMap.lookup(Instr);
2489   }
2490 };
2491 
2492 /// Class that maps (parts of) an existing VPlan to trees of combined
2493 /// VPInstructions.
2494 class VPlanSlp {
2495   enum class OpMode { Failed, Load, Opcode };
2496 
2497   /// A DenseMapInfo implementation for using SmallVector<VPValue *, 4> as
2498   /// DenseMap keys.
2499   struct BundleDenseMapInfo {
2500     static SmallVector<VPValue *, 4> getEmptyKey() {
2501       return {reinterpret_cast<VPValue *>(-1)};
2502     }
2503 
2504     static SmallVector<VPValue *, 4> getTombstoneKey() {
2505       return {reinterpret_cast<VPValue *>(-2)};
2506     }
2507 
2508     static unsigned getHashValue(const SmallVector<VPValue *, 4> &V) {
2509       return static_cast<unsigned>(hash_combine_range(V.begin(), V.end()));
2510     }
2511 
2512     static bool isEqual(const SmallVector<VPValue *, 4> &LHS,
2513                         const SmallVector<VPValue *, 4> &RHS) {
2514       return LHS == RHS;
2515     }
2516   };
2517 
2518   /// Mapping of values in the original VPlan to a combined VPInstruction.
2519   DenseMap<SmallVector<VPValue *, 4>, VPInstruction *, BundleDenseMapInfo>
2520       BundleToCombined;
2521 
2522   VPInterleavedAccessInfo &IAI;
2523 
2524   /// Basic block to operate on. For now, only instructions in a single BB are
2525   /// considered.
2526   const VPBasicBlock &BB;
2527 
2528   /// Indicates whether we managed to combine all visited instructions or not.
2529   bool CompletelySLP = true;
2530 
2531   /// Width of the widest combined bundle in bits.
2532   unsigned WidestBundleBits = 0;
2533 
2534   using MultiNodeOpTy =
2535       typename std::pair<VPInstruction *, SmallVector<VPValue *, 4>>;
2536 
2537   // Input operand bundles for the current multi node. Each multi node operand
2538   // bundle contains values not matching the multi node's opcode. They will
2539   // be reordered in reorderMultiNodeOps, once we completed building a
2540   // multi node.
2541   SmallVector<MultiNodeOpTy, 4> MultiNodeOps;
2542 
2543   /// Indicates whether we are building a multi node currently.
2544   bool MultiNodeActive = false;
2545 
2546   /// Check if we can vectorize Operands together.
2547   bool areVectorizable(ArrayRef<VPValue *> Operands) const;
2548 
2549   /// Add combined instruction \p New for the bundle \p Operands.
2550   void addCombined(ArrayRef<VPValue *> Operands, VPInstruction *New);
2551 
2552   /// Indicate we hit a bundle we failed to combine. Returns nullptr for now.
2553   VPInstruction *markFailed();
2554 
2555   /// Reorder operands in the multi node to maximize sequential memory access
2556   /// and commutative operations.
2557   SmallVector<MultiNodeOpTy, 4> reorderMultiNodeOps();
2558 
2559   /// Choose the best candidate to use for the lane after \p Last. The set of
2560   /// candidates to choose from are values with an opcode matching \p Last's
2561   /// or loads consecutive to \p Last.
2562   std::pair<OpMode, VPValue *> getBest(OpMode Mode, VPValue *Last,
2563                                        SmallPtrSetImpl<VPValue *> &Candidates,
2564                                        VPInterleavedAccessInfo &IAI);
2565 
2566 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2567   /// Print bundle \p Values to dbgs().
2568   void dumpBundle(ArrayRef<VPValue *> Values);
2569 #endif
2570 
2571 public:
2572   VPlanSlp(VPInterleavedAccessInfo &IAI, VPBasicBlock &BB) : IAI(IAI), BB(BB) {}
2573 
2574   ~VPlanSlp() = default;
2575 
2576   /// Tries to build an SLP tree rooted at \p Operands and returns a
2577   /// VPInstruction combining \p Operands, if they can be combined.
2578   VPInstruction *buildGraph(ArrayRef<VPValue *> Operands);
2579 
2580   /// Return the width of the widest combined bundle in bits.
2581   unsigned getWidestBundleBits() const { return WidestBundleBits; }
2582 
2583   /// Return true if all visited instruction can be combined.
2584   bool isCompletelySLP() const { return CompletelySLP; }
2585 };
2586 } // end namespace llvm
2587 
2588 #endif // LLVM_TRANSFORMS_VECTORIZE_VPLAN_H
2589