xref: /freebsd-src/contrib/llvm-project/llvm/lib/Transforms/Vectorize/VPlan.h (revision 5e801ac66d24704442eba426ed13c3effb8a34e7)
1 //===- VPlan.h - Represent A Vectorizer Plan --------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 /// \file
10 /// This file contains the declarations of the Vectorization Plan base classes:
11 /// 1. VPBasicBlock and VPRegionBlock that inherit from a common pure virtual
12 ///    VPBlockBase, together implementing a Hierarchical CFG;
13 /// 2. Specializations of GraphTraits that allow VPBlockBase graphs to be
14 ///    treated as proper graphs for generic algorithms;
15 /// 3. Pure virtual VPRecipeBase serving as the base class for recipes contained
16 ///    within VPBasicBlocks;
17 /// 4. VPInstruction, a concrete Recipe and VPUser modeling a single planned
18 ///    instruction;
19 /// 5. The VPlan class holding a candidate for vectorization;
20 /// 6. The VPlanPrinter class providing a way to print a plan in dot format;
21 /// These are documented in docs/VectorizationPlan.rst.
22 //
23 //===----------------------------------------------------------------------===//
24 
25 #ifndef LLVM_TRANSFORMS_VECTORIZE_VPLAN_H
26 #define LLVM_TRANSFORMS_VECTORIZE_VPLAN_H
27 
28 #include "VPlanLoopInfo.h"
29 #include "VPlanValue.h"
30 #include "llvm/ADT/DenseMap.h"
31 #include "llvm/ADT/DepthFirstIterator.h"
32 #include "llvm/ADT/GraphTraits.h"
33 #include "llvm/ADT/Optional.h"
34 #include "llvm/ADT/SmallBitVector.h"
35 #include "llvm/ADT/SmallPtrSet.h"
36 #include "llvm/ADT/SmallSet.h"
37 #include "llvm/ADT/SmallVector.h"
38 #include "llvm/ADT/Twine.h"
39 #include "llvm/ADT/ilist.h"
40 #include "llvm/ADT/ilist_node.h"
41 #include "llvm/Analysis/VectorUtils.h"
42 #include "llvm/IR/IRBuilder.h"
43 #include "llvm/Support/InstructionCost.h"
44 #include <algorithm>
45 #include <cassert>
46 #include <cstddef>
47 #include <map>
48 #include <string>
49 
50 namespace llvm {
51 
52 class BasicBlock;
53 class DominatorTree;
54 class InnerLoopVectorizer;
55 class LoopInfo;
56 class raw_ostream;
57 class RecurrenceDescriptor;
58 class Value;
59 class VPBasicBlock;
60 class VPRegionBlock;
61 class VPlan;
62 class VPlanSlp;
63 
64 /// Returns a calculation for the total number of elements for a given \p VF.
65 /// For fixed width vectors this value is a constant, whereas for scalable
66 /// vectors it is an expression determined at runtime.
67 Value *getRuntimeVF(IRBuilder<> &B, Type *Ty, ElementCount VF);
68 
69 /// A range of powers-of-2 vectorization factors with fixed start and
70 /// adjustable end. The range includes start and excludes end, e.g.,:
71 /// [1, 9) = {1, 2, 4, 8}
72 struct VFRange {
73   // A power of 2.
74   const ElementCount Start;
75 
76   // Need not be a power of 2. If End <= Start range is empty.
77   ElementCount End;
78 
79   bool isEmpty() const {
80     return End.getKnownMinValue() <= Start.getKnownMinValue();
81   }
82 
83   VFRange(const ElementCount &Start, const ElementCount &End)
84       : Start(Start), End(End) {
85     assert(Start.isScalable() == End.isScalable() &&
86            "Both Start and End should have the same scalable flag");
87     assert(isPowerOf2_32(Start.getKnownMinValue()) &&
88            "Expected Start to be a power of 2");
89   }
90 };
91 
92 using VPlanPtr = std::unique_ptr<VPlan>;
93 
94 /// In what follows, the term "input IR" refers to code that is fed into the
95 /// vectorizer whereas the term "output IR" refers to code that is generated by
96 /// the vectorizer.
97 
98 /// VPLane provides a way to access lanes in both fixed width and scalable
99 /// vectors, where for the latter the lane index sometimes needs calculating
100 /// as a runtime expression.
101 class VPLane {
102 public:
103   /// Kind describes how to interpret Lane.
104   enum class Kind : uint8_t {
105     /// For First, Lane is the index into the first N elements of a
106     /// fixed-vector <N x <ElTy>> or a scalable vector <vscale x N x <ElTy>>.
107     First,
108     /// For ScalableLast, Lane is the offset from the start of the last
109     /// N-element subvector in a scalable vector <vscale x N x <ElTy>>. For
110     /// example, a Lane of 0 corresponds to lane `(vscale - 1) * N`, a Lane of
111     /// 1 corresponds to `((vscale - 1) * N) + 1`, etc.
112     ScalableLast
113   };
114 
115 private:
116   /// in [0..VF)
117   unsigned Lane;
118 
119   /// Indicates how the Lane should be interpreted, as described above.
120   Kind LaneKind;
121 
122 public:
123   VPLane(unsigned Lane, Kind LaneKind) : Lane(Lane), LaneKind(LaneKind) {}
124 
125   static VPLane getFirstLane() { return VPLane(0, VPLane::Kind::First); }
126 
127   static VPLane getLastLaneForVF(const ElementCount &VF) {
128     unsigned LaneOffset = VF.getKnownMinValue() - 1;
129     Kind LaneKind;
130     if (VF.isScalable())
131       // In this case 'LaneOffset' refers to the offset from the start of the
132       // last subvector with VF.getKnownMinValue() elements.
133       LaneKind = VPLane::Kind::ScalableLast;
134     else
135       LaneKind = VPLane::Kind::First;
136     return VPLane(LaneOffset, LaneKind);
137   }
138 
139   /// Returns a compile-time known value for the lane index and asserts if the
140   /// lane can only be calculated at runtime.
141   unsigned getKnownLane() const {
142     assert(LaneKind == Kind::First);
143     return Lane;
144   }
145 
146   /// Returns an expression describing the lane index that can be used at
147   /// runtime.
148   Value *getAsRuntimeExpr(IRBuilder<> &Builder, const ElementCount &VF) const;
149 
150   /// Returns the Kind of lane offset.
151   Kind getKind() const { return LaneKind; }
152 
153   /// Returns true if this is the first lane of the whole vector.
154   bool isFirstLane() const { return Lane == 0 && LaneKind == Kind::First; }
155 
156   /// Maps the lane to a cache index based on \p VF.
157   unsigned mapToCacheIndex(const ElementCount &VF) const {
158     switch (LaneKind) {
159     case VPLane::Kind::ScalableLast:
160       assert(VF.isScalable() && Lane < VF.getKnownMinValue());
161       return VF.getKnownMinValue() + Lane;
162     default:
163       assert(Lane < VF.getKnownMinValue());
164       return Lane;
165     }
166   }
167 
168   /// Returns the maxmimum number of lanes that we are able to consider
169   /// caching for \p VF.
170   static unsigned getNumCachedLanes(const ElementCount &VF) {
171     return VF.getKnownMinValue() * (VF.isScalable() ? 2 : 1);
172   }
173 };
174 
175 /// VPIteration represents a single point in the iteration space of the output
176 /// (vectorized and/or unrolled) IR loop.
177 struct VPIteration {
178   /// in [0..UF)
179   unsigned Part;
180 
181   VPLane Lane;
182 
183   VPIteration(unsigned Part, unsigned Lane,
184               VPLane::Kind Kind = VPLane::Kind::First)
185       : Part(Part), Lane(Lane, Kind) {}
186 
187   VPIteration(unsigned Part, const VPLane &Lane) : Part(Part), Lane(Lane) {}
188 
189   bool isFirstIteration() const { return Part == 0 && Lane.isFirstLane(); }
190 };
191 
192 /// VPTransformState holds information passed down when "executing" a VPlan,
193 /// needed for generating the output IR.
194 struct VPTransformState {
195   VPTransformState(ElementCount VF, unsigned UF, LoopInfo *LI,
196                    DominatorTree *DT, IRBuilder<> &Builder,
197                    InnerLoopVectorizer *ILV, VPlan *Plan)
198       : VF(VF), UF(UF), Instance(), LI(LI), DT(DT), Builder(Builder), ILV(ILV),
199         Plan(Plan) {}
200 
201   /// The chosen Vectorization and Unroll Factors of the loop being vectorized.
202   ElementCount VF;
203   unsigned UF;
204 
205   /// Hold the indices to generate specific scalar instructions. Null indicates
206   /// that all instances are to be generated, using either scalar or vector
207   /// instructions.
208   Optional<VPIteration> Instance;
209 
210   struct DataState {
211     /// A type for vectorized values in the new loop. Each value from the
212     /// original loop, when vectorized, is represented by UF vector values in
213     /// the new unrolled loop, where UF is the unroll factor.
214     typedef SmallVector<Value *, 2> PerPartValuesTy;
215 
216     DenseMap<VPValue *, PerPartValuesTy> PerPartOutput;
217 
218     using ScalarsPerPartValuesTy = SmallVector<SmallVector<Value *, 4>, 2>;
219     DenseMap<VPValue *, ScalarsPerPartValuesTy> PerPartScalars;
220   } Data;
221 
222   /// Get the generated Value for a given VPValue and a given Part. Note that
223   /// as some Defs are still created by ILV and managed in its ValueMap, this
224   /// method will delegate the call to ILV in such cases in order to provide
225   /// callers a consistent API.
226   /// \see set.
227   Value *get(VPValue *Def, unsigned Part);
228 
229   /// Get the generated Value for a given VPValue and given Part and Lane.
230   Value *get(VPValue *Def, const VPIteration &Instance);
231 
232   bool hasVectorValue(VPValue *Def, unsigned Part) {
233     auto I = Data.PerPartOutput.find(Def);
234     return I != Data.PerPartOutput.end() && Part < I->second.size() &&
235            I->second[Part];
236   }
237 
238   bool hasAnyVectorValue(VPValue *Def) const {
239     return Data.PerPartOutput.find(Def) != Data.PerPartOutput.end();
240   }
241 
242   bool hasScalarValue(VPValue *Def, VPIteration Instance) {
243     auto I = Data.PerPartScalars.find(Def);
244     if (I == Data.PerPartScalars.end())
245       return false;
246     unsigned CacheIdx = Instance.Lane.mapToCacheIndex(VF);
247     return Instance.Part < I->second.size() &&
248            CacheIdx < I->second[Instance.Part].size() &&
249            I->second[Instance.Part][CacheIdx];
250   }
251 
252   /// Set the generated Value for a given VPValue and a given Part.
253   void set(VPValue *Def, Value *V, unsigned Part) {
254     if (!Data.PerPartOutput.count(Def)) {
255       DataState::PerPartValuesTy Entry(UF);
256       Data.PerPartOutput[Def] = Entry;
257     }
258     Data.PerPartOutput[Def][Part] = V;
259   }
260   /// Reset an existing vector value for \p Def and a given \p Part.
261   void reset(VPValue *Def, Value *V, unsigned Part) {
262     auto Iter = Data.PerPartOutput.find(Def);
263     assert(Iter != Data.PerPartOutput.end() &&
264            "need to overwrite existing value");
265     Iter->second[Part] = V;
266   }
267 
268   /// Set the generated scalar \p V for \p Def and the given \p Instance.
269   void set(VPValue *Def, Value *V, const VPIteration &Instance) {
270     auto Iter = Data.PerPartScalars.insert({Def, {}});
271     auto &PerPartVec = Iter.first->second;
272     while (PerPartVec.size() <= Instance.Part)
273       PerPartVec.emplace_back();
274     auto &Scalars = PerPartVec[Instance.Part];
275     unsigned CacheIdx = Instance.Lane.mapToCacheIndex(VF);
276     while (Scalars.size() <= CacheIdx)
277       Scalars.push_back(nullptr);
278     assert(!Scalars[CacheIdx] && "should overwrite existing value");
279     Scalars[CacheIdx] = V;
280   }
281 
282   /// Reset an existing scalar value for \p Def and a given \p Instance.
283   void reset(VPValue *Def, Value *V, const VPIteration &Instance) {
284     auto Iter = Data.PerPartScalars.find(Def);
285     assert(Iter != Data.PerPartScalars.end() &&
286            "need to overwrite existing value");
287     assert(Instance.Part < Iter->second.size() &&
288            "need to overwrite existing value");
289     unsigned CacheIdx = Instance.Lane.mapToCacheIndex(VF);
290     assert(CacheIdx < Iter->second[Instance.Part].size() &&
291            "need to overwrite existing value");
292     Iter->second[Instance.Part][CacheIdx] = V;
293   }
294 
295   /// Hold state information used when constructing the CFG of the output IR,
296   /// traversing the VPBasicBlocks and generating corresponding IR BasicBlocks.
297   struct CFGState {
298     /// The previous VPBasicBlock visited. Initially set to null.
299     VPBasicBlock *PrevVPBB = nullptr;
300 
301     /// The previous IR BasicBlock created or used. Initially set to the new
302     /// header BasicBlock.
303     BasicBlock *PrevBB = nullptr;
304 
305     /// The last IR BasicBlock in the output IR. Set to the new latch
306     /// BasicBlock, used for placing the newly created BasicBlocks.
307     BasicBlock *LastBB = nullptr;
308 
309     /// The IR BasicBlock that is the preheader of the vector loop in the output
310     /// IR.
311     /// FIXME: The vector preheader should also be modeled in VPlan, so any code
312     /// that needs to be added to the preheader gets directly generated by
313     /// VPlan. There should be no need to manage a pointer to the IR BasicBlock.
314     BasicBlock *VectorPreHeader = nullptr;
315 
316     /// A mapping of each VPBasicBlock to the corresponding BasicBlock. In case
317     /// of replication, maps the BasicBlock of the last replica created.
318     SmallDenseMap<VPBasicBlock *, BasicBlock *> VPBB2IRBB;
319 
320     /// Vector of VPBasicBlocks whose terminator instruction needs to be fixed
321     /// up at the end of vector code generation.
322     SmallVector<VPBasicBlock *, 8> VPBBsToFix;
323 
324     CFGState() = default;
325   } CFG;
326 
327   /// Hold a pointer to LoopInfo to register new basic blocks in the loop.
328   LoopInfo *LI;
329 
330   /// Hold a pointer to Dominator Tree to register new basic blocks in the loop.
331   DominatorTree *DT;
332 
333   /// Hold a reference to the IRBuilder used to generate output IR code.
334   IRBuilder<> &Builder;
335 
336   VPValue2ValueTy VPValue2Value;
337 
338   /// Hold the canonical scalar IV of the vector loop (start=0, step=VF*UF).
339   Value *CanonicalIV = nullptr;
340 
341   /// Hold the trip count of the scalar loop.
342   Value *TripCount = nullptr;
343 
344   /// Hold a pointer to InnerLoopVectorizer to reuse its IR generation methods.
345   InnerLoopVectorizer *ILV;
346 
347   /// Pointer to the VPlan code is generated for.
348   VPlan *Plan;
349 };
350 
351 /// VPUsers instance used by VPBlockBase to manage CondBit and the block
352 /// predicate. Currently VPBlockUsers are used in VPBlockBase for historical
353 /// reasons, but in the future the only VPUsers should either be recipes or
354 /// live-outs.VPBlockBase uses.
355 struct VPBlockUser : public VPUser {
356   VPBlockUser() : VPUser({}, VPUserID::Block) {}
357 
358   VPValue *getSingleOperandOrNull() {
359     if (getNumOperands() == 1)
360       return getOperand(0);
361 
362     return nullptr;
363   }
364   const VPValue *getSingleOperandOrNull() const {
365     if (getNumOperands() == 1)
366       return getOperand(0);
367 
368     return nullptr;
369   }
370 
371   void resetSingleOpUser(VPValue *NewVal) {
372     assert(getNumOperands() <= 1 && "Didn't expect more than one operand!");
373     if (!NewVal) {
374       if (getNumOperands() == 1)
375         removeLastOperand();
376       return;
377     }
378 
379     if (getNumOperands() == 1)
380       setOperand(0, NewVal);
381     else
382       addOperand(NewVal);
383   }
384 };
385 
386 /// VPBlockBase is the building block of the Hierarchical Control-Flow Graph.
387 /// A VPBlockBase can be either a VPBasicBlock or a VPRegionBlock.
388 class VPBlockBase {
389   friend class VPBlockUtils;
390 
391   const unsigned char SubclassID; ///< Subclass identifier (for isa/dyn_cast).
392 
393   /// An optional name for the block.
394   std::string Name;
395 
396   /// The immediate VPRegionBlock which this VPBlockBase belongs to, or null if
397   /// it is a topmost VPBlockBase.
398   VPRegionBlock *Parent = nullptr;
399 
400   /// List of predecessor blocks.
401   SmallVector<VPBlockBase *, 1> Predecessors;
402 
403   /// List of successor blocks.
404   SmallVector<VPBlockBase *, 1> Successors;
405 
406   /// Successor selector managed by a VPUser. For blocks with zero or one
407   /// successors, there is no operand. Otherwise there is exactly one operand
408   /// which is the branch condition.
409   VPBlockUser CondBitUser;
410 
411   /// If the block is predicated, its predicate is stored as an operand of this
412   /// VPUser to maintain the def-use relations. Otherwise there is no operand
413   /// here.
414   VPBlockUser PredicateUser;
415 
416   /// VPlan containing the block. Can only be set on the entry block of the
417   /// plan.
418   VPlan *Plan = nullptr;
419 
420   /// Add \p Successor as the last successor to this block.
421   void appendSuccessor(VPBlockBase *Successor) {
422     assert(Successor && "Cannot add nullptr successor!");
423     Successors.push_back(Successor);
424   }
425 
426   /// Add \p Predecessor as the last predecessor to this block.
427   void appendPredecessor(VPBlockBase *Predecessor) {
428     assert(Predecessor && "Cannot add nullptr predecessor!");
429     Predecessors.push_back(Predecessor);
430   }
431 
432   /// Remove \p Predecessor from the predecessors of this block.
433   void removePredecessor(VPBlockBase *Predecessor) {
434     auto Pos = find(Predecessors, Predecessor);
435     assert(Pos && "Predecessor does not exist");
436     Predecessors.erase(Pos);
437   }
438 
439   /// Remove \p Successor from the successors of this block.
440   void removeSuccessor(VPBlockBase *Successor) {
441     auto Pos = find(Successors, Successor);
442     assert(Pos && "Successor does not exist");
443     Successors.erase(Pos);
444   }
445 
446 protected:
447   VPBlockBase(const unsigned char SC, const std::string &N)
448       : SubclassID(SC), Name(N) {}
449 
450 public:
451   /// An enumeration for keeping track of the concrete subclass of VPBlockBase
452   /// that are actually instantiated. Values of this enumeration are kept in the
453   /// SubclassID field of the VPBlockBase objects. They are used for concrete
454   /// type identification.
455   using VPBlockTy = enum { VPBasicBlockSC, VPRegionBlockSC };
456 
457   using VPBlocksTy = SmallVectorImpl<VPBlockBase *>;
458 
459   virtual ~VPBlockBase() = default;
460 
461   const std::string &getName() const { return Name; }
462 
463   void setName(const Twine &newName) { Name = newName.str(); }
464 
465   /// \return an ID for the concrete type of this object.
466   /// This is used to implement the classof checks. This should not be used
467   /// for any other purpose, as the values may change as LLVM evolves.
468   unsigned getVPBlockID() const { return SubclassID; }
469 
470   VPRegionBlock *getParent() { return Parent; }
471   const VPRegionBlock *getParent() const { return Parent; }
472 
473   /// \return A pointer to the plan containing the current block.
474   VPlan *getPlan();
475   const VPlan *getPlan() const;
476 
477   /// Sets the pointer of the plan containing the block. The block must be the
478   /// entry block into the VPlan.
479   void setPlan(VPlan *ParentPlan);
480 
481   void setParent(VPRegionBlock *P) { Parent = P; }
482 
483   /// \return the VPBasicBlock that is the entry of this VPBlockBase,
484   /// recursively, if the latter is a VPRegionBlock. Otherwise, if this
485   /// VPBlockBase is a VPBasicBlock, it is returned.
486   const VPBasicBlock *getEntryBasicBlock() const;
487   VPBasicBlock *getEntryBasicBlock();
488 
489   /// \return the VPBasicBlock that is the exit of this VPBlockBase,
490   /// recursively, if the latter is a VPRegionBlock. Otherwise, if this
491   /// VPBlockBase is a VPBasicBlock, it is returned.
492   const VPBasicBlock *getExitBasicBlock() const;
493   VPBasicBlock *getExitBasicBlock();
494 
495   const VPBlocksTy &getSuccessors() const { return Successors; }
496   VPBlocksTy &getSuccessors() { return Successors; }
497 
498   const VPBlocksTy &getPredecessors() const { return Predecessors; }
499   VPBlocksTy &getPredecessors() { return Predecessors; }
500 
501   /// \return the successor of this VPBlockBase if it has a single successor.
502   /// Otherwise return a null pointer.
503   VPBlockBase *getSingleSuccessor() const {
504     return (Successors.size() == 1 ? *Successors.begin() : nullptr);
505   }
506 
507   /// \return the predecessor of this VPBlockBase if it has a single
508   /// predecessor. Otherwise return a null pointer.
509   VPBlockBase *getSinglePredecessor() const {
510     return (Predecessors.size() == 1 ? *Predecessors.begin() : nullptr);
511   }
512 
513   size_t getNumSuccessors() const { return Successors.size(); }
514   size_t getNumPredecessors() const { return Predecessors.size(); }
515 
516   /// An Enclosing Block of a block B is any block containing B, including B
517   /// itself. \return the closest enclosing block starting from "this", which
518   /// has successors. \return the root enclosing block if all enclosing blocks
519   /// have no successors.
520   VPBlockBase *getEnclosingBlockWithSuccessors();
521 
522   /// \return the closest enclosing block starting from "this", which has
523   /// predecessors. \return the root enclosing block if all enclosing blocks
524   /// have no predecessors.
525   VPBlockBase *getEnclosingBlockWithPredecessors();
526 
527   /// \return the successors either attached directly to this VPBlockBase or, if
528   /// this VPBlockBase is the exit block of a VPRegionBlock and has no
529   /// successors of its own, search recursively for the first enclosing
530   /// VPRegionBlock that has successors and return them. If no such
531   /// VPRegionBlock exists, return the (empty) successors of the topmost
532   /// VPBlockBase reached.
533   const VPBlocksTy &getHierarchicalSuccessors() {
534     return getEnclosingBlockWithSuccessors()->getSuccessors();
535   }
536 
537   /// \return the hierarchical successor of this VPBlockBase if it has a single
538   /// hierarchical successor. Otherwise return a null pointer.
539   VPBlockBase *getSingleHierarchicalSuccessor() {
540     return getEnclosingBlockWithSuccessors()->getSingleSuccessor();
541   }
542 
543   /// \return the predecessors either attached directly to this VPBlockBase or,
544   /// if this VPBlockBase is the entry block of a VPRegionBlock and has no
545   /// predecessors of its own, search recursively for the first enclosing
546   /// VPRegionBlock that has predecessors and return them. If no such
547   /// VPRegionBlock exists, return the (empty) predecessors of the topmost
548   /// VPBlockBase reached.
549   const VPBlocksTy &getHierarchicalPredecessors() {
550     return getEnclosingBlockWithPredecessors()->getPredecessors();
551   }
552 
553   /// \return the hierarchical predecessor of this VPBlockBase if it has a
554   /// single hierarchical predecessor. Otherwise return a null pointer.
555   VPBlockBase *getSingleHierarchicalPredecessor() {
556     return getEnclosingBlockWithPredecessors()->getSinglePredecessor();
557   }
558 
559   /// \return the condition bit selecting the successor.
560   VPValue *getCondBit();
561   /// \return the condition bit selecting the successor.
562   const VPValue *getCondBit() const;
563   /// Set the condition bit selecting the successor.
564   void setCondBit(VPValue *CV);
565 
566   /// \return the block's predicate.
567   VPValue *getPredicate();
568   /// \return the block's predicate.
569   const VPValue *getPredicate() const;
570   /// Set the block's predicate.
571   void setPredicate(VPValue *Pred);
572 
573   /// Set a given VPBlockBase \p Successor as the single successor of this
574   /// VPBlockBase. This VPBlockBase is not added as predecessor of \p Successor.
575   /// This VPBlockBase must have no successors.
576   void setOneSuccessor(VPBlockBase *Successor) {
577     assert(Successors.empty() && "Setting one successor when others exist.");
578     appendSuccessor(Successor);
579   }
580 
581   /// Set two given VPBlockBases \p IfTrue and \p IfFalse to be the two
582   /// successors of this VPBlockBase. \p Condition is set as the successor
583   /// selector. This VPBlockBase is not added as predecessor of \p IfTrue or \p
584   /// IfFalse. This VPBlockBase must have no successors.
585   void setTwoSuccessors(VPBlockBase *IfTrue, VPBlockBase *IfFalse,
586                         VPValue *Condition) {
587     assert(Successors.empty() && "Setting two successors when others exist.");
588     assert(Condition && "Setting two successors without condition!");
589     setCondBit(Condition);
590     appendSuccessor(IfTrue);
591     appendSuccessor(IfFalse);
592   }
593 
594   /// Set each VPBasicBlock in \p NewPreds as predecessor of this VPBlockBase.
595   /// This VPBlockBase must have no predecessors. This VPBlockBase is not added
596   /// as successor of any VPBasicBlock in \p NewPreds.
597   void setPredecessors(ArrayRef<VPBlockBase *> NewPreds) {
598     assert(Predecessors.empty() && "Block predecessors already set.");
599     for (auto *Pred : NewPreds)
600       appendPredecessor(Pred);
601   }
602 
603   /// Remove all the predecessor of this block.
604   void clearPredecessors() { Predecessors.clear(); }
605 
606   /// Remove all the successors of this block and set to null its condition bit
607   void clearSuccessors() {
608     Successors.clear();
609     setCondBit(nullptr);
610   }
611 
612   /// The method which generates the output IR that correspond to this
613   /// VPBlockBase, thereby "executing" the VPlan.
614   virtual void execute(struct VPTransformState *State) = 0;
615 
616   /// Delete all blocks reachable from a given VPBlockBase, inclusive.
617   static void deleteCFG(VPBlockBase *Entry);
618 
619   /// Return true if it is legal to hoist instructions into this block.
620   bool isLegalToHoistInto() {
621     // There are currently no constraints that prevent an instruction to be
622     // hoisted into a VPBlockBase.
623     return true;
624   }
625 
626   /// Replace all operands of VPUsers in the block with \p NewValue and also
627   /// replaces all uses of VPValues defined in the block with NewValue.
628   virtual void dropAllReferences(VPValue *NewValue) = 0;
629 
630 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
631   void printAsOperand(raw_ostream &OS, bool PrintType) const {
632     OS << getName();
633   }
634 
635   /// Print plain-text dump of this VPBlockBase to \p O, prefixing all lines
636   /// with \p Indent. \p SlotTracker is used to print unnamed VPValue's using
637   /// consequtive numbers.
638   ///
639   /// Note that the numbering is applied to the whole VPlan, so printing
640   /// individual blocks is consistent with the whole VPlan printing.
641   virtual void print(raw_ostream &O, const Twine &Indent,
642                      VPSlotTracker &SlotTracker) const = 0;
643 
644   /// Print plain-text dump of this VPlan to \p O.
645   void print(raw_ostream &O) const {
646     VPSlotTracker SlotTracker(getPlan());
647     print(O, "", SlotTracker);
648   }
649 
650   /// Print the successors of this block to \p O, prefixing all lines with \p
651   /// Indent.
652   void printSuccessors(raw_ostream &O, const Twine &Indent) const;
653 
654   /// Dump this VPBlockBase to dbgs().
655   LLVM_DUMP_METHOD void dump() const { print(dbgs()); }
656 #endif
657 };
658 
659 /// VPRecipeBase is a base class modeling a sequence of one or more output IR
660 /// instructions. VPRecipeBase owns the the VPValues it defines through VPDef
661 /// and is responsible for deleting its defined values. Single-value
662 /// VPRecipeBases that also inherit from VPValue must make sure to inherit from
663 /// VPRecipeBase before VPValue.
664 class VPRecipeBase : public ilist_node_with_parent<VPRecipeBase, VPBasicBlock>,
665                      public VPDef,
666                      public VPUser {
667   friend VPBasicBlock;
668   friend class VPBlockUtils;
669 
670   /// Each VPRecipe belongs to a single VPBasicBlock.
671   VPBasicBlock *Parent = nullptr;
672 
673 public:
674   VPRecipeBase(const unsigned char SC, ArrayRef<VPValue *> Operands)
675       : VPDef(SC), VPUser(Operands, VPUser::VPUserID::Recipe) {}
676 
677   template <typename IterT>
678   VPRecipeBase(const unsigned char SC, iterator_range<IterT> Operands)
679       : VPDef(SC), VPUser(Operands, VPUser::VPUserID::Recipe) {}
680   virtual ~VPRecipeBase() = default;
681 
682   /// \return the VPBasicBlock which this VPRecipe belongs to.
683   VPBasicBlock *getParent() { return Parent; }
684   const VPBasicBlock *getParent() const { return Parent; }
685 
686   /// The method which generates the output IR instructions that correspond to
687   /// this VPRecipe, thereby "executing" the VPlan.
688   virtual void execute(struct VPTransformState &State) = 0;
689 
690   /// Insert an unlinked recipe into a basic block immediately before
691   /// the specified recipe.
692   void insertBefore(VPRecipeBase *InsertPos);
693 
694   /// Insert an unlinked Recipe into a basic block immediately after
695   /// the specified Recipe.
696   void insertAfter(VPRecipeBase *InsertPos);
697 
698   /// Unlink this recipe from its current VPBasicBlock and insert it into
699   /// the VPBasicBlock that MovePos lives in, right after MovePos.
700   void moveAfter(VPRecipeBase *MovePos);
701 
702   /// Unlink this recipe and insert into BB before I.
703   ///
704   /// \pre I is a valid iterator into BB.
705   void moveBefore(VPBasicBlock &BB, iplist<VPRecipeBase>::iterator I);
706 
707   /// This method unlinks 'this' from the containing basic block, but does not
708   /// delete it.
709   void removeFromParent();
710 
711   /// This method unlinks 'this' from the containing basic block and deletes it.
712   ///
713   /// \returns an iterator pointing to the element after the erased one
714   iplist<VPRecipeBase>::iterator eraseFromParent();
715 
716   /// Returns the underlying instruction, if the recipe is a VPValue or nullptr
717   /// otherwise.
718   Instruction *getUnderlyingInstr() {
719     return cast<Instruction>(getVPSingleValue()->getUnderlyingValue());
720   }
721   const Instruction *getUnderlyingInstr() const {
722     return cast<Instruction>(getVPSingleValue()->getUnderlyingValue());
723   }
724 
725   /// Method to support type inquiry through isa, cast, and dyn_cast.
726   static inline bool classof(const VPDef *D) {
727     // All VPDefs are also VPRecipeBases.
728     return true;
729   }
730 
731   static inline bool classof(const VPUser *U) {
732     return U->getVPUserID() == VPUser::VPUserID::Recipe;
733   }
734 
735   /// Returns true if the recipe may have side-effects.
736   bool mayHaveSideEffects() const;
737 
738   /// Returns true for PHI-like recipes.
739   bool isPhi() const {
740     return getVPDefID() >= VPFirstPHISC && getVPDefID() <= VPLastPHISC;
741   }
742 
743   /// Returns true if the recipe may read from memory.
744   bool mayReadFromMemory() const;
745 
746   /// Returns true if the recipe may write to memory.
747   bool mayWriteToMemory() const;
748 
749   /// Returns true if the recipe may read from or write to memory.
750   bool mayReadOrWriteMemory() const {
751     return mayReadFromMemory() || mayWriteToMemory();
752   }
753 };
754 
755 inline bool VPUser::classof(const VPDef *Def) {
756   return Def->getVPDefID() == VPRecipeBase::VPInstructionSC ||
757          Def->getVPDefID() == VPRecipeBase::VPWidenSC ||
758          Def->getVPDefID() == VPRecipeBase::VPWidenCallSC ||
759          Def->getVPDefID() == VPRecipeBase::VPWidenSelectSC ||
760          Def->getVPDefID() == VPRecipeBase::VPWidenGEPSC ||
761          Def->getVPDefID() == VPRecipeBase::VPBlendSC ||
762          Def->getVPDefID() == VPRecipeBase::VPInterleaveSC ||
763          Def->getVPDefID() == VPRecipeBase::VPReplicateSC ||
764          Def->getVPDefID() == VPRecipeBase::VPReductionSC ||
765          Def->getVPDefID() == VPRecipeBase::VPBranchOnMaskSC ||
766          Def->getVPDefID() == VPRecipeBase::VPWidenMemoryInstructionSC;
767 }
768 
769 /// This is a concrete Recipe that models a single VPlan-level instruction.
770 /// While as any Recipe it may generate a sequence of IR instructions when
771 /// executed, these instructions would always form a single-def expression as
772 /// the VPInstruction is also a single def-use vertex.
773 class VPInstruction : public VPRecipeBase, public VPValue {
774   friend class VPlanSlp;
775 
776 public:
777   /// VPlan opcodes, extending LLVM IR with idiomatics instructions.
778   enum {
779     FirstOrderRecurrenceSplice =
780         Instruction::OtherOpsEnd + 1, // Combines the incoming and previous
781                                       // values of a first-order recurrence.
782     Not,
783     ICmpULE,
784     SLPLoad,
785     SLPStore,
786     ActiveLaneMask,
787   };
788 
789 private:
790   typedef unsigned char OpcodeTy;
791   OpcodeTy Opcode;
792 
793   /// Utility method serving execute(): generates a single instance of the
794   /// modeled instruction.
795   void generateInstruction(VPTransformState &State, unsigned Part);
796 
797 protected:
798   void setUnderlyingInstr(Instruction *I) { setUnderlyingValue(I); }
799 
800 public:
801   VPInstruction(unsigned Opcode, ArrayRef<VPValue *> Operands)
802       : VPRecipeBase(VPRecipeBase::VPInstructionSC, Operands),
803         VPValue(VPValue::VPVInstructionSC, nullptr, this), Opcode(Opcode) {}
804 
805   VPInstruction(unsigned Opcode, ArrayRef<VPInstruction *> Operands)
806       : VPRecipeBase(VPRecipeBase::VPInstructionSC, {}),
807         VPValue(VPValue::VPVInstructionSC, nullptr, this), Opcode(Opcode) {
808     for (auto *I : Operands)
809       addOperand(I->getVPSingleValue());
810   }
811 
812   VPInstruction(unsigned Opcode, std::initializer_list<VPValue *> Operands)
813       : VPInstruction(Opcode, ArrayRef<VPValue *>(Operands)) {}
814 
815   /// Method to support type inquiry through isa, cast, and dyn_cast.
816   static inline bool classof(const VPValue *V) {
817     return V->getVPValueID() == VPValue::VPVInstructionSC;
818   }
819 
820   VPInstruction *clone() const {
821     SmallVector<VPValue *, 2> Operands(operands());
822     return new VPInstruction(Opcode, Operands);
823   }
824 
825   /// Method to support type inquiry through isa, cast, and dyn_cast.
826   static inline bool classof(const VPDef *R) {
827     return R->getVPDefID() == VPRecipeBase::VPInstructionSC;
828   }
829 
830   unsigned getOpcode() const { return Opcode; }
831 
832   /// Generate the instruction.
833   /// TODO: We currently execute only per-part unless a specific instance is
834   /// provided.
835   void execute(VPTransformState &State) override;
836 
837 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
838   /// Print the VPInstruction to \p O.
839   void print(raw_ostream &O, const Twine &Indent,
840              VPSlotTracker &SlotTracker) const override;
841 
842   /// Print the VPInstruction to dbgs() (for debugging).
843   LLVM_DUMP_METHOD void dump() const;
844 #endif
845 
846   /// Return true if this instruction may modify memory.
847   bool mayWriteToMemory() const {
848     // TODO: we can use attributes of the called function to rule out memory
849     //       modifications.
850     return Opcode == Instruction::Store || Opcode == Instruction::Call ||
851            Opcode == Instruction::Invoke || Opcode == SLPStore;
852   }
853 
854   bool hasResult() const {
855     // CallInst may or may not have a result, depending on the called function.
856     // Conservatively return calls have results for now.
857     switch (getOpcode()) {
858     case Instruction::Ret:
859     case Instruction::Br:
860     case Instruction::Store:
861     case Instruction::Switch:
862     case Instruction::IndirectBr:
863     case Instruction::Resume:
864     case Instruction::CatchRet:
865     case Instruction::Unreachable:
866     case Instruction::Fence:
867     case Instruction::AtomicRMW:
868       return false;
869     default:
870       return true;
871     }
872   }
873 };
874 
875 /// VPWidenRecipe is a recipe for producing a copy of vector type its
876 /// ingredient. This recipe covers most of the traditional vectorization cases
877 /// where each ingredient transforms into a vectorized version of itself.
878 class VPWidenRecipe : public VPRecipeBase, public VPValue {
879 public:
880   template <typename IterT>
881   VPWidenRecipe(Instruction &I, iterator_range<IterT> Operands)
882       : VPRecipeBase(VPRecipeBase::VPWidenSC, Operands),
883         VPValue(VPValue::VPVWidenSC, &I, this) {}
884 
885   ~VPWidenRecipe() override = default;
886 
887   /// Method to support type inquiry through isa, cast, and dyn_cast.
888   static inline bool classof(const VPDef *D) {
889     return D->getVPDefID() == VPRecipeBase::VPWidenSC;
890   }
891   static inline bool classof(const VPValue *V) {
892     return V->getVPValueID() == VPValue::VPVWidenSC;
893   }
894 
895   /// Produce widened copies of all Ingredients.
896   void execute(VPTransformState &State) override;
897 
898 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
899   /// Print the recipe.
900   void print(raw_ostream &O, const Twine &Indent,
901              VPSlotTracker &SlotTracker) const override;
902 #endif
903 };
904 
905 /// A recipe for widening Call instructions.
906 class VPWidenCallRecipe : public VPRecipeBase, public VPValue {
907 
908 public:
909   template <typename IterT>
910   VPWidenCallRecipe(CallInst &I, iterator_range<IterT> CallArguments)
911       : VPRecipeBase(VPRecipeBase::VPWidenCallSC, CallArguments),
912         VPValue(VPValue::VPVWidenCallSC, &I, this) {}
913 
914   ~VPWidenCallRecipe() override = default;
915 
916   /// Method to support type inquiry through isa, cast, and dyn_cast.
917   static inline bool classof(const VPDef *D) {
918     return D->getVPDefID() == VPRecipeBase::VPWidenCallSC;
919   }
920 
921   /// Produce a widened version of the call instruction.
922   void execute(VPTransformState &State) override;
923 
924 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
925   /// Print the recipe.
926   void print(raw_ostream &O, const Twine &Indent,
927              VPSlotTracker &SlotTracker) const override;
928 #endif
929 };
930 
931 /// A recipe for widening select instructions.
932 class VPWidenSelectRecipe : public VPRecipeBase, public VPValue {
933 
934   /// Is the condition of the select loop invariant?
935   bool InvariantCond;
936 
937 public:
938   template <typename IterT>
939   VPWidenSelectRecipe(SelectInst &I, iterator_range<IterT> Operands,
940                       bool InvariantCond)
941       : VPRecipeBase(VPRecipeBase::VPWidenSelectSC, Operands),
942         VPValue(VPValue::VPVWidenSelectSC, &I, this),
943         InvariantCond(InvariantCond) {}
944 
945   ~VPWidenSelectRecipe() override = default;
946 
947   /// Method to support type inquiry through isa, cast, and dyn_cast.
948   static inline bool classof(const VPDef *D) {
949     return D->getVPDefID() == VPRecipeBase::VPWidenSelectSC;
950   }
951 
952   /// Produce a widened version of the select instruction.
953   void execute(VPTransformState &State) override;
954 
955 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
956   /// Print the recipe.
957   void print(raw_ostream &O, const Twine &Indent,
958              VPSlotTracker &SlotTracker) const override;
959 #endif
960 };
961 
962 /// A recipe for handling GEP instructions.
963 class VPWidenGEPRecipe : public VPRecipeBase, public VPValue {
964   bool IsPtrLoopInvariant;
965   SmallBitVector IsIndexLoopInvariant;
966 
967 public:
968   template <typename IterT>
969   VPWidenGEPRecipe(GetElementPtrInst *GEP, iterator_range<IterT> Operands)
970       : VPRecipeBase(VPRecipeBase::VPWidenGEPSC, Operands),
971         VPValue(VPWidenGEPSC, GEP, this),
972         IsIndexLoopInvariant(GEP->getNumIndices(), false) {}
973 
974   template <typename IterT>
975   VPWidenGEPRecipe(GetElementPtrInst *GEP, iterator_range<IterT> Operands,
976                    Loop *OrigLoop)
977       : VPRecipeBase(VPRecipeBase::VPWidenGEPSC, Operands),
978         VPValue(VPValue::VPVWidenGEPSC, GEP, this),
979         IsIndexLoopInvariant(GEP->getNumIndices(), false) {
980     IsPtrLoopInvariant = OrigLoop->isLoopInvariant(GEP->getPointerOperand());
981     for (auto Index : enumerate(GEP->indices()))
982       IsIndexLoopInvariant[Index.index()] =
983           OrigLoop->isLoopInvariant(Index.value().get());
984   }
985   ~VPWidenGEPRecipe() override = default;
986 
987   /// Method to support type inquiry through isa, cast, and dyn_cast.
988   static inline bool classof(const VPDef *D) {
989     return D->getVPDefID() == VPRecipeBase::VPWidenGEPSC;
990   }
991 
992   /// Generate the gep nodes.
993   void execute(VPTransformState &State) override;
994 
995 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
996   /// Print the recipe.
997   void print(raw_ostream &O, const Twine &Indent,
998              VPSlotTracker &SlotTracker) const override;
999 #endif
1000 };
1001 
1002 /// A recipe for handling phi nodes of integer and floating-point inductions,
1003 /// producing their vector and scalar values.
1004 class VPWidenIntOrFpInductionRecipe : public VPRecipeBase {
1005   PHINode *IV;
1006 
1007 public:
1008   VPWidenIntOrFpInductionRecipe(PHINode *IV, VPValue *Start, Instruction *Cast,
1009                                 TruncInst *Trunc = nullptr)
1010       : VPRecipeBase(VPWidenIntOrFpInductionSC, {Start}), IV(IV) {
1011     if (Trunc)
1012       new VPValue(Trunc, this);
1013     else
1014       new VPValue(IV, this);
1015 
1016     if (Cast)
1017       new VPValue(Cast, this);
1018   }
1019   ~VPWidenIntOrFpInductionRecipe() override = default;
1020 
1021   /// Method to support type inquiry through isa, cast, and dyn_cast.
1022   static inline bool classof(const VPDef *D) {
1023     return D->getVPDefID() == VPRecipeBase::VPWidenIntOrFpInductionSC;
1024   }
1025 
1026   /// Generate the vectorized and scalarized versions of the phi node as
1027   /// needed by their users.
1028   void execute(VPTransformState &State) override;
1029 
1030 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1031   /// Print the recipe.
1032   void print(raw_ostream &O, const Twine &Indent,
1033              VPSlotTracker &SlotTracker) const override;
1034 #endif
1035 
1036   /// Returns the start value of the induction.
1037   VPValue *getStartValue() { return getOperand(0); }
1038 
1039   /// Returns the cast VPValue, if one is attached, or nullptr otherwise.
1040   VPValue *getCastValue() {
1041     if (getNumDefinedValues() != 2)
1042       return nullptr;
1043     return getVPValue(1);
1044   }
1045 
1046   /// Returns the first defined value as TruncInst, if it is one or nullptr
1047   /// otherwise.
1048   TruncInst *getTruncInst() {
1049     return dyn_cast_or_null<TruncInst>(getVPValue(0)->getUnderlyingValue());
1050   }
1051   const TruncInst *getTruncInst() const {
1052     return dyn_cast_or_null<TruncInst>(getVPValue(0)->getUnderlyingValue());
1053   }
1054 };
1055 
1056 /// A recipe for handling first order recurrences and pointer inductions. For
1057 /// first-order recurrences, the start value is the first operand of the recipe
1058 /// and the incoming value from the backedge is the second operand. It also
1059 /// serves as base class for VPReductionPHIRecipe. In the VPlan native path, all
1060 /// incoming VPValues & VPBasicBlock pairs are managed in the recipe directly.
1061 class VPWidenPHIRecipe : public VPRecipeBase, public VPValue {
1062   /// List of incoming blocks. Only used in the VPlan native path.
1063   SmallVector<VPBasicBlock *, 2> IncomingBlocks;
1064 
1065 protected:
1066   VPWidenPHIRecipe(unsigned char VPVID, unsigned char VPDefID, PHINode *Phi,
1067                    VPValue *Start = nullptr)
1068       : VPRecipeBase(VPDefID, {}), VPValue(VPVID, Phi, this) {
1069     if (Start)
1070       addOperand(Start);
1071   }
1072 
1073 public:
1074   /// Create a VPWidenPHIRecipe for \p Phi
1075   VPWidenPHIRecipe(PHINode *Phi)
1076       : VPWidenPHIRecipe(VPVWidenPHISC, VPWidenPHISC, Phi) {}
1077 
1078   /// Create a new VPWidenPHIRecipe for \p Phi with start value \p Start.
1079   VPWidenPHIRecipe(PHINode *Phi, VPValue &Start) : VPWidenPHIRecipe(Phi) {
1080     addOperand(&Start);
1081   }
1082 
1083   ~VPWidenPHIRecipe() override = default;
1084 
1085   /// Method to support type inquiry through isa, cast, and dyn_cast.
1086   static inline bool classof(const VPRecipeBase *B) {
1087     return B->getVPDefID() == VPRecipeBase::VPWidenPHISC ||
1088            B->getVPDefID() == VPRecipeBase::VPFirstOrderRecurrencePHISC ||
1089            B->getVPDefID() == VPRecipeBase::VPReductionPHISC;
1090   }
1091   static inline bool classof(const VPValue *V) {
1092     return V->getVPValueID() == VPValue::VPVWidenPHISC ||
1093            V->getVPValueID() == VPValue::VPVFirstOrderRecurrencePHISC ||
1094            V->getVPValueID() == VPValue::VPVReductionPHISC;
1095   }
1096 
1097   /// Generate the phi/select nodes.
1098   void execute(VPTransformState &State) override;
1099 
1100 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1101   /// Print the recipe.
1102   void print(raw_ostream &O, const Twine &Indent,
1103              VPSlotTracker &SlotTracker) const override;
1104 #endif
1105 
1106   /// Returns the start value of the phi, if it is a reduction or first-order
1107   /// recurrence.
1108   VPValue *getStartValue() {
1109     return getNumOperands() == 0 ? nullptr : getOperand(0);
1110   }
1111 
1112   /// Returns the incoming value from the loop backedge, if it is a reduction or
1113   /// first-order recurrence.
1114   VPValue *getBackedgeValue() {
1115     return getOperand(1);
1116   }
1117 
1118   /// Returns the backedge value as a recipe. The backedge value is guaranteed
1119   /// to be a recipe.
1120   VPRecipeBase *getBackedgeRecipe() {
1121     return cast<VPRecipeBase>(getBackedgeValue()->getDef());
1122   }
1123 
1124   /// Adds a pair (\p IncomingV, \p IncomingBlock) to the phi.
1125   void addIncoming(VPValue *IncomingV, VPBasicBlock *IncomingBlock) {
1126     addOperand(IncomingV);
1127     IncomingBlocks.push_back(IncomingBlock);
1128   }
1129 
1130   /// Returns the \p I th incoming VPValue.
1131   VPValue *getIncomingValue(unsigned I) { return getOperand(I); }
1132 
1133   /// Returns the \p I th incoming VPBasicBlock.
1134   VPBasicBlock *getIncomingBlock(unsigned I) { return IncomingBlocks[I]; }
1135 };
1136 
1137 /// A recipe for handling first-order recurrence phis. The start value is the
1138 /// first operand of the recipe and the incoming value from the backedge is the
1139 /// second operand.
1140 struct VPFirstOrderRecurrencePHIRecipe : public VPWidenPHIRecipe {
1141   VPFirstOrderRecurrencePHIRecipe(PHINode *Phi, VPValue &Start)
1142       : VPWidenPHIRecipe(VPVFirstOrderRecurrencePHISC,
1143                          VPFirstOrderRecurrencePHISC, Phi, &Start) {}
1144 
1145   /// Method to support type inquiry through isa, cast, and dyn_cast.
1146   static inline bool classof(const VPRecipeBase *R) {
1147     return R->getVPDefID() == VPRecipeBase::VPFirstOrderRecurrencePHISC;
1148   }
1149   static inline bool classof(const VPWidenPHIRecipe *D) {
1150     return D->getVPDefID() == VPRecipeBase::VPFirstOrderRecurrencePHISC;
1151   }
1152   static inline bool classof(const VPValue *V) {
1153     return V->getVPValueID() == VPValue::VPVFirstOrderRecurrencePHISC;
1154   }
1155 
1156   void execute(VPTransformState &State) override;
1157 
1158 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1159   /// Print the recipe.
1160   void print(raw_ostream &O, const Twine &Indent,
1161              VPSlotTracker &SlotTracker) const override;
1162 #endif
1163 };
1164 
1165 /// A recipe for handling reduction phis. The start value is the first operand
1166 /// of the recipe and the incoming value from the backedge is the second
1167 /// operand.
1168 class VPReductionPHIRecipe : public VPWidenPHIRecipe {
1169   /// Descriptor for the reduction.
1170   RecurrenceDescriptor &RdxDesc;
1171 
1172   /// The phi is part of an in-loop reduction.
1173   bool IsInLoop;
1174 
1175   /// The phi is part of an ordered reduction. Requires IsInLoop to be true.
1176   bool IsOrdered;
1177 
1178 public:
1179   /// Create a new VPReductionPHIRecipe for the reduction \p Phi described by \p
1180   /// RdxDesc.
1181   VPReductionPHIRecipe(PHINode *Phi, RecurrenceDescriptor &RdxDesc,
1182                        VPValue &Start, bool IsInLoop = false,
1183                        bool IsOrdered = false)
1184       : VPWidenPHIRecipe(VPVReductionPHISC, VPReductionPHISC, Phi, &Start),
1185         RdxDesc(RdxDesc), IsInLoop(IsInLoop), IsOrdered(IsOrdered) {
1186     assert((!IsOrdered || IsInLoop) && "IsOrdered requires IsInLoop");
1187   }
1188 
1189   ~VPReductionPHIRecipe() override = default;
1190 
1191   /// Method to support type inquiry through isa, cast, and dyn_cast.
1192   static inline bool classof(const VPRecipeBase *R) {
1193     return R->getVPDefID() == VPRecipeBase::VPReductionPHISC;
1194   }
1195   static inline bool classof(const VPValue *V) {
1196     return V->getVPValueID() == VPValue::VPVReductionPHISC;
1197   }
1198   static inline bool classof(const VPWidenPHIRecipe *R) {
1199     return R->getVPDefID() == VPRecipeBase::VPReductionPHISC;
1200   }
1201 
1202   /// Generate the phi/select nodes.
1203   void execute(VPTransformState &State) override;
1204 
1205 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1206   /// Print the recipe.
1207   void print(raw_ostream &O, const Twine &Indent,
1208              VPSlotTracker &SlotTracker) const override;
1209 #endif
1210 
1211   RecurrenceDescriptor &getRecurrenceDescriptor() { return RdxDesc; }
1212 
1213   /// Returns true, if the phi is part of an ordered reduction.
1214   bool isOrdered() const { return IsOrdered; }
1215 
1216   /// Returns true, if the phi is part of an in-loop reduction.
1217   bool isInLoop() const { return IsInLoop; }
1218 };
1219 
1220 /// A recipe for vectorizing a phi-node as a sequence of mask-based select
1221 /// instructions.
1222 class VPBlendRecipe : public VPRecipeBase, public VPValue {
1223   PHINode *Phi;
1224 
1225 public:
1226   /// The blend operation is a User of the incoming values and of their
1227   /// respective masks, ordered [I0, M0, I1, M1, ...]. Note that a single value
1228   /// might be incoming with a full mask for which there is no VPValue.
1229   VPBlendRecipe(PHINode *Phi, ArrayRef<VPValue *> Operands)
1230       : VPRecipeBase(VPBlendSC, Operands),
1231         VPValue(VPValue::VPVBlendSC, Phi, this), Phi(Phi) {
1232     assert(Operands.size() > 0 &&
1233            ((Operands.size() == 1) || (Operands.size() % 2 == 0)) &&
1234            "Expected either a single incoming value or a positive even number "
1235            "of operands");
1236   }
1237 
1238   /// Method to support type inquiry through isa, cast, and dyn_cast.
1239   static inline bool classof(const VPDef *D) {
1240     return D->getVPDefID() == VPRecipeBase::VPBlendSC;
1241   }
1242 
1243   /// Return the number of incoming values, taking into account that a single
1244   /// incoming value has no mask.
1245   unsigned getNumIncomingValues() const { return (getNumOperands() + 1) / 2; }
1246 
1247   /// Return incoming value number \p Idx.
1248   VPValue *getIncomingValue(unsigned Idx) const { return getOperand(Idx * 2); }
1249 
1250   /// Return mask number \p Idx.
1251   VPValue *getMask(unsigned Idx) const { return getOperand(Idx * 2 + 1); }
1252 
1253   /// Generate the phi/select nodes.
1254   void execute(VPTransformState &State) override;
1255 
1256 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1257   /// Print the recipe.
1258   void print(raw_ostream &O, const Twine &Indent,
1259              VPSlotTracker &SlotTracker) const override;
1260 #endif
1261 };
1262 
1263 /// VPInterleaveRecipe is a recipe for transforming an interleave group of load
1264 /// or stores into one wide load/store and shuffles. The first operand of a
1265 /// VPInterleave recipe is the address, followed by the stored values, followed
1266 /// by an optional mask.
1267 class VPInterleaveRecipe : public VPRecipeBase {
1268   const InterleaveGroup<Instruction> *IG;
1269 
1270   bool HasMask = false;
1271 
1272 public:
1273   VPInterleaveRecipe(const InterleaveGroup<Instruction> *IG, VPValue *Addr,
1274                      ArrayRef<VPValue *> StoredValues, VPValue *Mask)
1275       : VPRecipeBase(VPInterleaveSC, {Addr}), IG(IG) {
1276     for (unsigned i = 0; i < IG->getFactor(); ++i)
1277       if (Instruction *I = IG->getMember(i)) {
1278         if (I->getType()->isVoidTy())
1279           continue;
1280         new VPValue(I, this);
1281       }
1282 
1283     for (auto *SV : StoredValues)
1284       addOperand(SV);
1285     if (Mask) {
1286       HasMask = true;
1287       addOperand(Mask);
1288     }
1289   }
1290   ~VPInterleaveRecipe() override = default;
1291 
1292   /// Method to support type inquiry through isa, cast, and dyn_cast.
1293   static inline bool classof(const VPDef *D) {
1294     return D->getVPDefID() == VPRecipeBase::VPInterleaveSC;
1295   }
1296 
1297   /// Return the address accessed by this recipe.
1298   VPValue *getAddr() const {
1299     return getOperand(0); // Address is the 1st, mandatory operand.
1300   }
1301 
1302   /// Return the mask used by this recipe. Note that a full mask is represented
1303   /// by a nullptr.
1304   VPValue *getMask() const {
1305     // Mask is optional and therefore the last, currently 2nd operand.
1306     return HasMask ? getOperand(getNumOperands() - 1) : nullptr;
1307   }
1308 
1309   /// Return the VPValues stored by this interleave group. If it is a load
1310   /// interleave group, return an empty ArrayRef.
1311   ArrayRef<VPValue *> getStoredValues() const {
1312     // The first operand is the address, followed by the stored values, followed
1313     // by an optional mask.
1314     return ArrayRef<VPValue *>(op_begin(), getNumOperands())
1315         .slice(1, getNumStoreOperands());
1316   }
1317 
1318   /// Generate the wide load or store, and shuffles.
1319   void execute(VPTransformState &State) override;
1320 
1321 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1322   /// Print the recipe.
1323   void print(raw_ostream &O, const Twine &Indent,
1324              VPSlotTracker &SlotTracker) const override;
1325 #endif
1326 
1327   const InterleaveGroup<Instruction> *getInterleaveGroup() { return IG; }
1328 
1329   /// Returns the number of stored operands of this interleave group. Returns 0
1330   /// for load interleave groups.
1331   unsigned getNumStoreOperands() const {
1332     return getNumOperands() - (HasMask ? 2 : 1);
1333   }
1334 };
1335 
1336 /// A recipe to represent inloop reduction operations, performing a reduction on
1337 /// a vector operand into a scalar value, and adding the result to a chain.
1338 /// The Operands are {ChainOp, VecOp, [Condition]}.
1339 class VPReductionRecipe : public VPRecipeBase, public VPValue {
1340   /// The recurrence decriptor for the reduction in question.
1341   RecurrenceDescriptor *RdxDesc;
1342   /// Pointer to the TTI, needed to create the target reduction
1343   const TargetTransformInfo *TTI;
1344 
1345 public:
1346   VPReductionRecipe(RecurrenceDescriptor *R, Instruction *I, VPValue *ChainOp,
1347                     VPValue *VecOp, VPValue *CondOp,
1348                     const TargetTransformInfo *TTI)
1349       : VPRecipeBase(VPRecipeBase::VPReductionSC, {ChainOp, VecOp}),
1350         VPValue(VPValue::VPVReductionSC, I, this), RdxDesc(R), TTI(TTI) {
1351     if (CondOp)
1352       addOperand(CondOp);
1353   }
1354 
1355   ~VPReductionRecipe() override = default;
1356 
1357   /// Method to support type inquiry through isa, cast, and dyn_cast.
1358   static inline bool classof(const VPValue *V) {
1359     return V->getVPValueID() == VPValue::VPVReductionSC;
1360   }
1361 
1362   /// Generate the reduction in the loop
1363   void execute(VPTransformState &State) override;
1364 
1365 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1366   /// Print the recipe.
1367   void print(raw_ostream &O, const Twine &Indent,
1368              VPSlotTracker &SlotTracker) const override;
1369 #endif
1370 
1371   /// The VPValue of the scalar Chain being accumulated.
1372   VPValue *getChainOp() const { return getOperand(0); }
1373   /// The VPValue of the vector value to be reduced.
1374   VPValue *getVecOp() const { return getOperand(1); }
1375   /// The VPValue of the condition for the block.
1376   VPValue *getCondOp() const {
1377     return getNumOperands() > 2 ? getOperand(2) : nullptr;
1378   }
1379 };
1380 
1381 /// VPReplicateRecipe replicates a given instruction producing multiple scalar
1382 /// copies of the original scalar type, one per lane, instead of producing a
1383 /// single copy of widened type for all lanes. If the instruction is known to be
1384 /// uniform only one copy, per lane zero, will be generated.
1385 class VPReplicateRecipe : public VPRecipeBase, public VPValue {
1386   /// Indicator if only a single replica per lane is needed.
1387   bool IsUniform;
1388 
1389   /// Indicator if the replicas are also predicated.
1390   bool IsPredicated;
1391 
1392   /// Indicator if the scalar values should also be packed into a vector.
1393   bool AlsoPack;
1394 
1395 public:
1396   template <typename IterT>
1397   VPReplicateRecipe(Instruction *I, iterator_range<IterT> Operands,
1398                     bool IsUniform, bool IsPredicated = false)
1399       : VPRecipeBase(VPReplicateSC, Operands), VPValue(VPVReplicateSC, I, this),
1400         IsUniform(IsUniform), IsPredicated(IsPredicated) {
1401     // Retain the previous behavior of predicateInstructions(), where an
1402     // insert-element of a predicated instruction got hoisted into the
1403     // predicated basic block iff it was its only user. This is achieved by
1404     // having predicated instructions also pack their values into a vector by
1405     // default unless they have a replicated user which uses their scalar value.
1406     AlsoPack = IsPredicated && !I->use_empty();
1407   }
1408 
1409   ~VPReplicateRecipe() override = default;
1410 
1411   /// Method to support type inquiry through isa, cast, and dyn_cast.
1412   static inline bool classof(const VPDef *D) {
1413     return D->getVPDefID() == VPRecipeBase::VPReplicateSC;
1414   }
1415 
1416   static inline bool classof(const VPValue *V) {
1417     return V->getVPValueID() == VPValue::VPVReplicateSC;
1418   }
1419 
1420   /// Generate replicas of the desired Ingredient. Replicas will be generated
1421   /// for all parts and lanes unless a specific part and lane are specified in
1422   /// the \p State.
1423   void execute(VPTransformState &State) override;
1424 
1425   void setAlsoPack(bool Pack) { AlsoPack = Pack; }
1426 
1427 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1428   /// Print the recipe.
1429   void print(raw_ostream &O, const Twine &Indent,
1430              VPSlotTracker &SlotTracker) const override;
1431 #endif
1432 
1433   bool isUniform() const { return IsUniform; }
1434 
1435   bool isPacked() const { return AlsoPack; }
1436 
1437   bool isPredicated() const { return IsPredicated; }
1438 };
1439 
1440 /// A recipe for generating conditional branches on the bits of a mask.
1441 class VPBranchOnMaskRecipe : public VPRecipeBase {
1442 public:
1443   VPBranchOnMaskRecipe(VPValue *BlockInMask)
1444       : VPRecipeBase(VPBranchOnMaskSC, {}) {
1445     if (BlockInMask) // nullptr means all-one mask.
1446       addOperand(BlockInMask);
1447   }
1448 
1449   /// Method to support type inquiry through isa, cast, and dyn_cast.
1450   static inline bool classof(const VPDef *D) {
1451     return D->getVPDefID() == VPRecipeBase::VPBranchOnMaskSC;
1452   }
1453 
1454   /// Generate the extraction of the appropriate bit from the block mask and the
1455   /// conditional branch.
1456   void execute(VPTransformState &State) override;
1457 
1458 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1459   /// Print the recipe.
1460   void print(raw_ostream &O, const Twine &Indent,
1461              VPSlotTracker &SlotTracker) const override {
1462     O << Indent << "BRANCH-ON-MASK ";
1463     if (VPValue *Mask = getMask())
1464       Mask->printAsOperand(O, SlotTracker);
1465     else
1466       O << " All-One";
1467   }
1468 #endif
1469 
1470   /// Return the mask used by this recipe. Note that a full mask is represented
1471   /// by a nullptr.
1472   VPValue *getMask() const {
1473     assert(getNumOperands() <= 1 && "should have either 0 or 1 operands");
1474     // Mask is optional.
1475     return getNumOperands() == 1 ? getOperand(0) : nullptr;
1476   }
1477 };
1478 
1479 /// VPPredInstPHIRecipe is a recipe for generating the phi nodes needed when
1480 /// control converges back from a Branch-on-Mask. The phi nodes are needed in
1481 /// order to merge values that are set under such a branch and feed their uses.
1482 /// The phi nodes can be scalar or vector depending on the users of the value.
1483 /// This recipe works in concert with VPBranchOnMaskRecipe.
1484 class VPPredInstPHIRecipe : public VPRecipeBase, public VPValue {
1485 public:
1486   /// Construct a VPPredInstPHIRecipe given \p PredInst whose value needs a phi
1487   /// nodes after merging back from a Branch-on-Mask.
1488   VPPredInstPHIRecipe(VPValue *PredV)
1489       : VPRecipeBase(VPPredInstPHISC, PredV),
1490         VPValue(VPValue::VPVPredInstPHI, nullptr, this) {}
1491   ~VPPredInstPHIRecipe() override = default;
1492 
1493   /// Method to support type inquiry through isa, cast, and dyn_cast.
1494   static inline bool classof(const VPDef *D) {
1495     return D->getVPDefID() == VPRecipeBase::VPPredInstPHISC;
1496   }
1497 
1498   /// Generates phi nodes for live-outs as needed to retain SSA form.
1499   void execute(VPTransformState &State) override;
1500 
1501 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1502   /// Print the recipe.
1503   void print(raw_ostream &O, const Twine &Indent,
1504              VPSlotTracker &SlotTracker) const override;
1505 #endif
1506 };
1507 
1508 /// A Recipe for widening load/store operations.
1509 /// The recipe uses the following VPValues:
1510 /// - For load: Address, optional mask
1511 /// - For store: Address, stored value, optional mask
1512 /// TODO: We currently execute only per-part unless a specific instance is
1513 /// provided.
1514 class VPWidenMemoryInstructionRecipe : public VPRecipeBase {
1515   Instruction &Ingredient;
1516 
1517   // Whether the loaded-from / stored-to addresses are consecutive.
1518   bool Consecutive;
1519 
1520   // Whether the consecutive loaded/stored addresses are in reverse order.
1521   bool Reverse;
1522 
1523   void setMask(VPValue *Mask) {
1524     if (!Mask)
1525       return;
1526     addOperand(Mask);
1527   }
1528 
1529   bool isMasked() const {
1530     return isStore() ? getNumOperands() == 3 : getNumOperands() == 2;
1531   }
1532 
1533 public:
1534   VPWidenMemoryInstructionRecipe(LoadInst &Load, VPValue *Addr, VPValue *Mask,
1535                                  bool Consecutive, bool Reverse)
1536       : VPRecipeBase(VPWidenMemoryInstructionSC, {Addr}), Ingredient(Load),
1537         Consecutive(Consecutive), Reverse(Reverse) {
1538     assert((Consecutive || !Reverse) && "Reverse implies consecutive");
1539     new VPValue(VPValue::VPVMemoryInstructionSC, &Load, this);
1540     setMask(Mask);
1541   }
1542 
1543   VPWidenMemoryInstructionRecipe(StoreInst &Store, VPValue *Addr,
1544                                  VPValue *StoredValue, VPValue *Mask,
1545                                  bool Consecutive, bool Reverse)
1546       : VPRecipeBase(VPWidenMemoryInstructionSC, {Addr, StoredValue}),
1547         Ingredient(Store), Consecutive(Consecutive), Reverse(Reverse) {
1548     assert((Consecutive || !Reverse) && "Reverse implies consecutive");
1549     setMask(Mask);
1550   }
1551 
1552   /// Method to support type inquiry through isa, cast, and dyn_cast.
1553   static inline bool classof(const VPDef *D) {
1554     return D->getVPDefID() == VPRecipeBase::VPWidenMemoryInstructionSC;
1555   }
1556 
1557   /// Return the address accessed by this recipe.
1558   VPValue *getAddr() const {
1559     return getOperand(0); // Address is the 1st, mandatory operand.
1560   }
1561 
1562   /// Return the mask used by this recipe. Note that a full mask is represented
1563   /// by a nullptr.
1564   VPValue *getMask() const {
1565     // Mask is optional and therefore the last operand.
1566     return isMasked() ? getOperand(getNumOperands() - 1) : nullptr;
1567   }
1568 
1569   /// Returns true if this recipe is a store.
1570   bool isStore() const { return isa<StoreInst>(Ingredient); }
1571 
1572   /// Return the address accessed by this recipe.
1573   VPValue *getStoredValue() const {
1574     assert(isStore() && "Stored value only available for store instructions");
1575     return getOperand(1); // Stored value is the 2nd, mandatory operand.
1576   }
1577 
1578   // Return whether the loaded-from / stored-to addresses are consecutive.
1579   bool isConsecutive() const { return Consecutive; }
1580 
1581   // Return whether the consecutive loaded/stored addresses are in reverse
1582   // order.
1583   bool isReverse() const { return Reverse; }
1584 
1585   /// Generate the wide load/store.
1586   void execute(VPTransformState &State) override;
1587 
1588 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1589   /// Print the recipe.
1590   void print(raw_ostream &O, const Twine &Indent,
1591              VPSlotTracker &SlotTracker) const override;
1592 #endif
1593 };
1594 
1595 /// A Recipe for widening the canonical induction variable of the vector loop.
1596 class VPWidenCanonicalIVRecipe : public VPRecipeBase, public VPValue {
1597 public:
1598   VPWidenCanonicalIVRecipe()
1599       : VPRecipeBase(VPWidenCanonicalIVSC, {}),
1600         VPValue(VPValue::VPVWidenCanonicalIVSC, nullptr, this) {}
1601 
1602   ~VPWidenCanonicalIVRecipe() override = default;
1603 
1604   /// Method to support type inquiry through isa, cast, and dyn_cast.
1605   static inline bool classof(const VPDef *D) {
1606     return D->getVPDefID() == VPRecipeBase::VPWidenCanonicalIVSC;
1607   }
1608 
1609   /// Generate a canonical vector induction variable of the vector loop, with
1610   /// start = {<Part*VF, Part*VF+1, ..., Part*VF+VF-1> for 0 <= Part < UF}, and
1611   /// step = <VF*UF, VF*UF, ..., VF*UF>.
1612   void execute(VPTransformState &State) override;
1613 
1614 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1615   /// Print the recipe.
1616   void print(raw_ostream &O, const Twine &Indent,
1617              VPSlotTracker &SlotTracker) const override;
1618 #endif
1619 };
1620 
1621 /// VPBasicBlock serves as the leaf of the Hierarchical Control-Flow Graph. It
1622 /// holds a sequence of zero or more VPRecipe's each representing a sequence of
1623 /// output IR instructions. All PHI-like recipes must come before any non-PHI recipes.
1624 class VPBasicBlock : public VPBlockBase {
1625 public:
1626   using RecipeListTy = iplist<VPRecipeBase>;
1627 
1628 private:
1629   /// The VPRecipes held in the order of output instructions to generate.
1630   RecipeListTy Recipes;
1631 
1632 public:
1633   VPBasicBlock(const Twine &Name = "", VPRecipeBase *Recipe = nullptr)
1634       : VPBlockBase(VPBasicBlockSC, Name.str()) {
1635     if (Recipe)
1636       appendRecipe(Recipe);
1637   }
1638 
1639   ~VPBasicBlock() override {
1640     while (!Recipes.empty())
1641       Recipes.pop_back();
1642   }
1643 
1644   /// Instruction iterators...
1645   using iterator = RecipeListTy::iterator;
1646   using const_iterator = RecipeListTy::const_iterator;
1647   using reverse_iterator = RecipeListTy::reverse_iterator;
1648   using const_reverse_iterator = RecipeListTy::const_reverse_iterator;
1649 
1650   //===--------------------------------------------------------------------===//
1651   /// Recipe iterator methods
1652   ///
1653   inline iterator begin() { return Recipes.begin(); }
1654   inline const_iterator begin() const { return Recipes.begin(); }
1655   inline iterator end() { return Recipes.end(); }
1656   inline const_iterator end() const { return Recipes.end(); }
1657 
1658   inline reverse_iterator rbegin() { return Recipes.rbegin(); }
1659   inline const_reverse_iterator rbegin() const { return Recipes.rbegin(); }
1660   inline reverse_iterator rend() { return Recipes.rend(); }
1661   inline const_reverse_iterator rend() const { return Recipes.rend(); }
1662 
1663   inline size_t size() const { return Recipes.size(); }
1664   inline bool empty() const { return Recipes.empty(); }
1665   inline const VPRecipeBase &front() const { return Recipes.front(); }
1666   inline VPRecipeBase &front() { return Recipes.front(); }
1667   inline const VPRecipeBase &back() const { return Recipes.back(); }
1668   inline VPRecipeBase &back() { return Recipes.back(); }
1669 
1670   /// Returns a reference to the list of recipes.
1671   RecipeListTy &getRecipeList() { return Recipes; }
1672 
1673   /// Returns a pointer to a member of the recipe list.
1674   static RecipeListTy VPBasicBlock::*getSublistAccess(VPRecipeBase *) {
1675     return &VPBasicBlock::Recipes;
1676   }
1677 
1678   /// Method to support type inquiry through isa, cast, and dyn_cast.
1679   static inline bool classof(const VPBlockBase *V) {
1680     return V->getVPBlockID() == VPBlockBase::VPBasicBlockSC;
1681   }
1682 
1683   void insert(VPRecipeBase *Recipe, iterator InsertPt) {
1684     assert(Recipe && "No recipe to append.");
1685     assert(!Recipe->Parent && "Recipe already in VPlan");
1686     Recipe->Parent = this;
1687     Recipes.insert(InsertPt, Recipe);
1688   }
1689 
1690   /// Augment the existing recipes of a VPBasicBlock with an additional
1691   /// \p Recipe as the last recipe.
1692   void appendRecipe(VPRecipeBase *Recipe) { insert(Recipe, end()); }
1693 
1694   /// The method which generates the output IR instructions that correspond to
1695   /// this VPBasicBlock, thereby "executing" the VPlan.
1696   void execute(struct VPTransformState *State) override;
1697 
1698   /// Return the position of the first non-phi node recipe in the block.
1699   iterator getFirstNonPhi();
1700 
1701   /// Returns an iterator range over the PHI-like recipes in the block.
1702   iterator_range<iterator> phis() {
1703     return make_range(begin(), getFirstNonPhi());
1704   }
1705 
1706   void dropAllReferences(VPValue *NewValue) override;
1707 
1708   /// Split current block at \p SplitAt by inserting a new block between the
1709   /// current block and its successors and moving all recipes starting at
1710   /// SplitAt to the new block. Returns the new block.
1711   VPBasicBlock *splitAt(iterator SplitAt);
1712 
1713 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1714   /// Print this VPBsicBlock to \p O, prefixing all lines with \p Indent. \p
1715   /// SlotTracker is used to print unnamed VPValue's using consequtive numbers.
1716   ///
1717   /// Note that the numbering is applied to the whole VPlan, so printing
1718   /// individual blocks is consistent with the whole VPlan printing.
1719   void print(raw_ostream &O, const Twine &Indent,
1720              VPSlotTracker &SlotTracker) const override;
1721   using VPBlockBase::print; // Get the print(raw_stream &O) version.
1722 #endif
1723 
1724 private:
1725   /// Create an IR BasicBlock to hold the output instructions generated by this
1726   /// VPBasicBlock, and return it. Update the CFGState accordingly.
1727   BasicBlock *createEmptyBasicBlock(VPTransformState::CFGState &CFG);
1728 };
1729 
1730 /// VPRegionBlock represents a collection of VPBasicBlocks and VPRegionBlocks
1731 /// which form a Single-Entry-Single-Exit subgraph of the output IR CFG.
1732 /// A VPRegionBlock may indicate that its contents are to be replicated several
1733 /// times. This is designed to support predicated scalarization, in which a
1734 /// scalar if-then code structure needs to be generated VF * UF times. Having
1735 /// this replication indicator helps to keep a single model for multiple
1736 /// candidate VF's. The actual replication takes place only once the desired VF
1737 /// and UF have been determined.
1738 class VPRegionBlock : public VPBlockBase {
1739   /// Hold the Single Entry of the SESE region modelled by the VPRegionBlock.
1740   VPBlockBase *Entry;
1741 
1742   /// Hold the Single Exit of the SESE region modelled by the VPRegionBlock.
1743   VPBlockBase *Exit;
1744 
1745   /// An indicator whether this region is to generate multiple replicated
1746   /// instances of output IR corresponding to its VPBlockBases.
1747   bool IsReplicator;
1748 
1749 public:
1750   VPRegionBlock(VPBlockBase *Entry, VPBlockBase *Exit,
1751                 const std::string &Name = "", bool IsReplicator = false)
1752       : VPBlockBase(VPRegionBlockSC, Name), Entry(Entry), Exit(Exit),
1753         IsReplicator(IsReplicator) {
1754     assert(Entry->getPredecessors().empty() && "Entry block has predecessors.");
1755     assert(Exit->getSuccessors().empty() && "Exit block has successors.");
1756     Entry->setParent(this);
1757     Exit->setParent(this);
1758   }
1759   VPRegionBlock(const std::string &Name = "", bool IsReplicator = false)
1760       : VPBlockBase(VPRegionBlockSC, Name), Entry(nullptr), Exit(nullptr),
1761         IsReplicator(IsReplicator) {}
1762 
1763   ~VPRegionBlock() override {
1764     if (Entry) {
1765       VPValue DummyValue;
1766       Entry->dropAllReferences(&DummyValue);
1767       deleteCFG(Entry);
1768     }
1769   }
1770 
1771   /// Method to support type inquiry through isa, cast, and dyn_cast.
1772   static inline bool classof(const VPBlockBase *V) {
1773     return V->getVPBlockID() == VPBlockBase::VPRegionBlockSC;
1774   }
1775 
1776   const VPBlockBase *getEntry() const { return Entry; }
1777   VPBlockBase *getEntry() { return Entry; }
1778 
1779   /// Set \p EntryBlock as the entry VPBlockBase of this VPRegionBlock. \p
1780   /// EntryBlock must have no predecessors.
1781   void setEntry(VPBlockBase *EntryBlock) {
1782     assert(EntryBlock->getPredecessors().empty() &&
1783            "Entry block cannot have predecessors.");
1784     Entry = EntryBlock;
1785     EntryBlock->setParent(this);
1786   }
1787 
1788   // FIXME: DominatorTreeBase is doing 'A->getParent()->front()'. 'front' is a
1789   // specific interface of llvm::Function, instead of using
1790   // GraphTraints::getEntryNode. We should add a new template parameter to
1791   // DominatorTreeBase representing the Graph type.
1792   VPBlockBase &front() const { return *Entry; }
1793 
1794   const VPBlockBase *getExit() const { return Exit; }
1795   VPBlockBase *getExit() { return Exit; }
1796 
1797   /// Set \p ExitBlock as the exit VPBlockBase of this VPRegionBlock. \p
1798   /// ExitBlock must have no successors.
1799   void setExit(VPBlockBase *ExitBlock) {
1800     assert(ExitBlock->getSuccessors().empty() &&
1801            "Exit block cannot have successors.");
1802     Exit = ExitBlock;
1803     ExitBlock->setParent(this);
1804   }
1805 
1806   /// An indicator whether this region is to generate multiple replicated
1807   /// instances of output IR corresponding to its VPBlockBases.
1808   bool isReplicator() const { return IsReplicator; }
1809 
1810   /// The method which generates the output IR instructions that correspond to
1811   /// this VPRegionBlock, thereby "executing" the VPlan.
1812   void execute(struct VPTransformState *State) override;
1813 
1814   void dropAllReferences(VPValue *NewValue) override;
1815 
1816 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1817   /// Print this VPRegionBlock to \p O (recursively), prefixing all lines with
1818   /// \p Indent. \p SlotTracker is used to print unnamed VPValue's using
1819   /// consequtive numbers.
1820   ///
1821   /// Note that the numbering is applied to the whole VPlan, so printing
1822   /// individual regions is consistent with the whole VPlan printing.
1823   void print(raw_ostream &O, const Twine &Indent,
1824              VPSlotTracker &SlotTracker) const override;
1825   using VPBlockBase::print; // Get the print(raw_stream &O) version.
1826 #endif
1827 };
1828 
1829 //===----------------------------------------------------------------------===//
1830 // GraphTraits specializations for VPlan Hierarchical Control-Flow Graphs     //
1831 //===----------------------------------------------------------------------===//
1832 
1833 // The following set of template specializations implement GraphTraits to treat
1834 // any VPBlockBase as a node in a graph of VPBlockBases. It's important to note
1835 // that VPBlockBase traits don't recurse into VPRegioBlocks, i.e., if the
1836 // VPBlockBase is a VPRegionBlock, this specialization provides access to its
1837 // successors/predecessors but not to the blocks inside the region.
1838 
1839 template <> struct GraphTraits<VPBlockBase *> {
1840   using NodeRef = VPBlockBase *;
1841   using ChildIteratorType = SmallVectorImpl<VPBlockBase *>::iterator;
1842 
1843   static NodeRef getEntryNode(NodeRef N) { return N; }
1844 
1845   static inline ChildIteratorType child_begin(NodeRef N) {
1846     return N->getSuccessors().begin();
1847   }
1848 
1849   static inline ChildIteratorType child_end(NodeRef N) {
1850     return N->getSuccessors().end();
1851   }
1852 };
1853 
1854 template <> struct GraphTraits<const VPBlockBase *> {
1855   using NodeRef = const VPBlockBase *;
1856   using ChildIteratorType = SmallVectorImpl<VPBlockBase *>::const_iterator;
1857 
1858   static NodeRef getEntryNode(NodeRef N) { return N; }
1859 
1860   static inline ChildIteratorType child_begin(NodeRef N) {
1861     return N->getSuccessors().begin();
1862   }
1863 
1864   static inline ChildIteratorType child_end(NodeRef N) {
1865     return N->getSuccessors().end();
1866   }
1867 };
1868 
1869 // Inverse order specialization for VPBasicBlocks. Predecessors are used instead
1870 // of successors for the inverse traversal.
1871 template <> struct GraphTraits<Inverse<VPBlockBase *>> {
1872   using NodeRef = VPBlockBase *;
1873   using ChildIteratorType = SmallVectorImpl<VPBlockBase *>::iterator;
1874 
1875   static NodeRef getEntryNode(Inverse<NodeRef> B) { return B.Graph; }
1876 
1877   static inline ChildIteratorType child_begin(NodeRef N) {
1878     return N->getPredecessors().begin();
1879   }
1880 
1881   static inline ChildIteratorType child_end(NodeRef N) {
1882     return N->getPredecessors().end();
1883   }
1884 };
1885 
1886 // The following set of template specializations implement GraphTraits to
1887 // treat VPRegionBlock as a graph and recurse inside its nodes. It's important
1888 // to note that the blocks inside the VPRegionBlock are treated as VPBlockBases
1889 // (i.e., no dyn_cast is performed, VPBlockBases specialization is used), so
1890 // there won't be automatic recursion into other VPBlockBases that turn to be
1891 // VPRegionBlocks.
1892 
1893 template <>
1894 struct GraphTraits<VPRegionBlock *> : public GraphTraits<VPBlockBase *> {
1895   using GraphRef = VPRegionBlock *;
1896   using nodes_iterator = df_iterator<NodeRef>;
1897 
1898   static NodeRef getEntryNode(GraphRef N) { return N->getEntry(); }
1899 
1900   static nodes_iterator nodes_begin(GraphRef N) {
1901     return nodes_iterator::begin(N->getEntry());
1902   }
1903 
1904   static nodes_iterator nodes_end(GraphRef N) {
1905     // df_iterator::end() returns an empty iterator so the node used doesn't
1906     // matter.
1907     return nodes_iterator::end(N);
1908   }
1909 };
1910 
1911 template <>
1912 struct GraphTraits<const VPRegionBlock *>
1913     : public GraphTraits<const VPBlockBase *> {
1914   using GraphRef = const VPRegionBlock *;
1915   using nodes_iterator = df_iterator<NodeRef>;
1916 
1917   static NodeRef getEntryNode(GraphRef N) { return N->getEntry(); }
1918 
1919   static nodes_iterator nodes_begin(GraphRef N) {
1920     return nodes_iterator::begin(N->getEntry());
1921   }
1922 
1923   static nodes_iterator nodes_end(GraphRef N) {
1924     // df_iterator::end() returns an empty iterator so the node used doesn't
1925     // matter.
1926     return nodes_iterator::end(N);
1927   }
1928 };
1929 
1930 template <>
1931 struct GraphTraits<Inverse<VPRegionBlock *>>
1932     : public GraphTraits<Inverse<VPBlockBase *>> {
1933   using GraphRef = VPRegionBlock *;
1934   using nodes_iterator = df_iterator<NodeRef>;
1935 
1936   static NodeRef getEntryNode(Inverse<GraphRef> N) {
1937     return N.Graph->getExit();
1938   }
1939 
1940   static nodes_iterator nodes_begin(GraphRef N) {
1941     return nodes_iterator::begin(N->getExit());
1942   }
1943 
1944   static nodes_iterator nodes_end(GraphRef N) {
1945     // df_iterator::end() returns an empty iterator so the node used doesn't
1946     // matter.
1947     return nodes_iterator::end(N);
1948   }
1949 };
1950 
1951 /// Iterator to traverse all successors of a VPBlockBase node. This includes the
1952 /// entry node of VPRegionBlocks. Exit blocks of a region implicitly have their
1953 /// parent region's successors. This ensures all blocks in a region are visited
1954 /// before any blocks in a successor region when doing a reverse post-order
1955 // traversal of the graph.
1956 template <typename BlockPtrTy>
1957 class VPAllSuccessorsIterator
1958     : public iterator_facade_base<VPAllSuccessorsIterator<BlockPtrTy>,
1959                                   std::forward_iterator_tag, VPBlockBase> {
1960   BlockPtrTy Block;
1961   /// Index of the current successor. For VPBasicBlock nodes, this simply is the
1962   /// index for the successor array. For VPRegionBlock, SuccessorIdx == 0 is
1963   /// used for the region's entry block, and SuccessorIdx - 1 are the indices
1964   /// for the successor array.
1965   size_t SuccessorIdx;
1966 
1967   static BlockPtrTy getBlockWithSuccs(BlockPtrTy Current) {
1968     while (Current && Current->getNumSuccessors() == 0)
1969       Current = Current->getParent();
1970     return Current;
1971   }
1972 
1973   /// Templated helper to dereference successor \p SuccIdx of \p Block. Used by
1974   /// both the const and non-const operator* implementations.
1975   template <typename T1> static T1 deref(T1 Block, unsigned SuccIdx) {
1976     if (auto *R = dyn_cast<VPRegionBlock>(Block)) {
1977       if (SuccIdx == 0)
1978         return R->getEntry();
1979       SuccIdx--;
1980     }
1981 
1982     // For exit blocks, use the next parent region with successors.
1983     return getBlockWithSuccs(Block)->getSuccessors()[SuccIdx];
1984   }
1985 
1986 public:
1987   VPAllSuccessorsIterator(BlockPtrTy Block, size_t Idx = 0)
1988       : Block(Block), SuccessorIdx(Idx) {}
1989   VPAllSuccessorsIterator(const VPAllSuccessorsIterator &Other)
1990       : Block(Other.Block), SuccessorIdx(Other.SuccessorIdx) {}
1991 
1992   VPAllSuccessorsIterator &operator=(const VPAllSuccessorsIterator &R) {
1993     Block = R.Block;
1994     SuccessorIdx = R.SuccessorIdx;
1995     return *this;
1996   }
1997 
1998   static VPAllSuccessorsIterator end(BlockPtrTy Block) {
1999     BlockPtrTy ParentWithSuccs = getBlockWithSuccs(Block);
2000     unsigned NumSuccessors = ParentWithSuccs
2001                                  ? ParentWithSuccs->getNumSuccessors()
2002                                  : Block->getNumSuccessors();
2003 
2004     if (auto *R = dyn_cast<VPRegionBlock>(Block))
2005       return {R, NumSuccessors + 1};
2006     return {Block, NumSuccessors};
2007   }
2008 
2009   bool operator==(const VPAllSuccessorsIterator &R) const {
2010     return Block == R.Block && SuccessorIdx == R.SuccessorIdx;
2011   }
2012 
2013   const VPBlockBase *operator*() const { return deref(Block, SuccessorIdx); }
2014 
2015   BlockPtrTy operator*() { return deref(Block, SuccessorIdx); }
2016 
2017   VPAllSuccessorsIterator &operator++() {
2018     SuccessorIdx++;
2019     return *this;
2020   }
2021 
2022   VPAllSuccessorsIterator operator++(int X) {
2023     VPAllSuccessorsIterator Orig = *this;
2024     SuccessorIdx++;
2025     return Orig;
2026   }
2027 };
2028 
2029 /// Helper for GraphTraits specialization that traverses through VPRegionBlocks.
2030 template <typename BlockTy> class VPBlockRecursiveTraversalWrapper {
2031   BlockTy Entry;
2032 
2033 public:
2034   VPBlockRecursiveTraversalWrapper(BlockTy Entry) : Entry(Entry) {}
2035   BlockTy getEntry() { return Entry; }
2036 };
2037 
2038 /// GraphTraits specialization to recursively traverse VPBlockBase nodes,
2039 /// including traversing through VPRegionBlocks.  Exit blocks of a region
2040 /// implicitly have their parent region's successors. This ensures all blocks in
2041 /// a region are visited before any blocks in a successor region when doing a
2042 /// reverse post-order traversal of the graph.
2043 template <>
2044 struct GraphTraits<VPBlockRecursiveTraversalWrapper<VPBlockBase *>> {
2045   using NodeRef = VPBlockBase *;
2046   using ChildIteratorType = VPAllSuccessorsIterator<VPBlockBase *>;
2047 
2048   static NodeRef
2049   getEntryNode(VPBlockRecursiveTraversalWrapper<VPBlockBase *> N) {
2050     return N.getEntry();
2051   }
2052 
2053   static inline ChildIteratorType child_begin(NodeRef N) {
2054     return ChildIteratorType(N);
2055   }
2056 
2057   static inline ChildIteratorType child_end(NodeRef N) {
2058     return ChildIteratorType::end(N);
2059   }
2060 };
2061 
2062 template <>
2063 struct GraphTraits<VPBlockRecursiveTraversalWrapper<const VPBlockBase *>> {
2064   using NodeRef = const VPBlockBase *;
2065   using ChildIteratorType = VPAllSuccessorsIterator<const VPBlockBase *>;
2066 
2067   static NodeRef
2068   getEntryNode(VPBlockRecursiveTraversalWrapper<const VPBlockBase *> N) {
2069     return N.getEntry();
2070   }
2071 
2072   static inline ChildIteratorType child_begin(NodeRef N) {
2073     return ChildIteratorType(N);
2074   }
2075 
2076   static inline ChildIteratorType child_end(NodeRef N) {
2077     return ChildIteratorType::end(N);
2078   }
2079 };
2080 
2081 /// VPlan models a candidate for vectorization, encoding various decisions take
2082 /// to produce efficient output IR, including which branches, basic-blocks and
2083 /// output IR instructions to generate, and their cost. VPlan holds a
2084 /// Hierarchical-CFG of VPBasicBlocks and VPRegionBlocks rooted at an Entry
2085 /// VPBlock.
2086 class VPlan {
2087   friend class VPlanPrinter;
2088   friend class VPSlotTracker;
2089 
2090   /// Hold the single entry to the Hierarchical CFG of the VPlan.
2091   VPBlockBase *Entry;
2092 
2093   /// Holds the VFs applicable to this VPlan.
2094   SmallSetVector<ElementCount, 2> VFs;
2095 
2096   /// Holds the name of the VPlan, for printing.
2097   std::string Name;
2098 
2099   /// Holds all the external definitions created for this VPlan.
2100   // TODO: Introduce a specific representation for external definitions in
2101   // VPlan. External definitions must be immutable and hold a pointer to its
2102   // underlying IR that will be used to implement its structural comparison
2103   // (operators '==' and '<').
2104   SetVector<VPValue *> VPExternalDefs;
2105 
2106   /// Represents the backedge taken count of the original loop, for folding
2107   /// the tail.
2108   VPValue *BackedgeTakenCount = nullptr;
2109 
2110   /// Holds a mapping between Values and their corresponding VPValue inside
2111   /// VPlan.
2112   Value2VPValueTy Value2VPValue;
2113 
2114   /// Contains all VPValues that been allocated by addVPValue directly and need
2115   /// to be free when the plan's destructor is called.
2116   SmallVector<VPValue *, 16> VPValuesToFree;
2117 
2118   /// Holds the VPLoopInfo analysis for this VPlan.
2119   VPLoopInfo VPLInfo;
2120 
2121   /// Indicates whether it is safe use the Value2VPValue mapping or if the
2122   /// mapping cannot be used any longer, because it is stale.
2123   bool Value2VPValueEnabled = true;
2124 
2125 public:
2126   VPlan(VPBlockBase *Entry = nullptr) : Entry(Entry) {
2127     if (Entry)
2128       Entry->setPlan(this);
2129   }
2130 
2131   ~VPlan() {
2132     if (Entry) {
2133       VPValue DummyValue;
2134       for (VPBlockBase *Block : depth_first(Entry))
2135         Block->dropAllReferences(&DummyValue);
2136 
2137       VPBlockBase::deleteCFG(Entry);
2138     }
2139     for (VPValue *VPV : VPValuesToFree)
2140       delete VPV;
2141     if (BackedgeTakenCount)
2142       delete BackedgeTakenCount;
2143     for (VPValue *Def : VPExternalDefs)
2144       delete Def;
2145   }
2146 
2147   /// Generate the IR code for this VPlan.
2148   void execute(struct VPTransformState *State);
2149 
2150   VPBlockBase *getEntry() { return Entry; }
2151   const VPBlockBase *getEntry() const { return Entry; }
2152 
2153   VPBlockBase *setEntry(VPBlockBase *Block) {
2154     Entry = Block;
2155     Block->setPlan(this);
2156     return Entry;
2157   }
2158 
2159   /// The backedge taken count of the original loop.
2160   VPValue *getOrCreateBackedgeTakenCount() {
2161     if (!BackedgeTakenCount)
2162       BackedgeTakenCount = new VPValue();
2163     return BackedgeTakenCount;
2164   }
2165 
2166   /// Mark the plan to indicate that using Value2VPValue is not safe any
2167   /// longer, because it may be stale.
2168   void disableValue2VPValue() { Value2VPValueEnabled = false; }
2169 
2170   void addVF(ElementCount VF) { VFs.insert(VF); }
2171 
2172   bool hasVF(ElementCount VF) { return VFs.count(VF); }
2173 
2174   const std::string &getName() const { return Name; }
2175 
2176   void setName(const Twine &newName) { Name = newName.str(); }
2177 
2178   /// Add \p VPVal to the pool of external definitions if it's not already
2179   /// in the pool.
2180   void addExternalDef(VPValue *VPVal) { VPExternalDefs.insert(VPVal); }
2181 
2182   void addVPValue(Value *V) {
2183     assert(Value2VPValueEnabled &&
2184            "IR value to VPValue mapping may be out of date!");
2185     assert(V && "Trying to add a null Value to VPlan");
2186     assert(!Value2VPValue.count(V) && "Value already exists in VPlan");
2187     VPValue *VPV = new VPValue(V);
2188     Value2VPValue[V] = VPV;
2189     VPValuesToFree.push_back(VPV);
2190   }
2191 
2192   void addVPValue(Value *V, VPValue *VPV) {
2193     assert(Value2VPValueEnabled && "Value2VPValue mapping may be out of date!");
2194     assert(V && "Trying to add a null Value to VPlan");
2195     assert(!Value2VPValue.count(V) && "Value already exists in VPlan");
2196     Value2VPValue[V] = VPV;
2197   }
2198 
2199   /// Returns the VPValue for \p V. \p OverrideAllowed can be used to disable
2200   /// checking whether it is safe to query VPValues using IR Values.
2201   VPValue *getVPValue(Value *V, bool OverrideAllowed = false) {
2202     assert((OverrideAllowed || isa<Constant>(V) || Value2VPValueEnabled) &&
2203            "Value2VPValue mapping may be out of date!");
2204     assert(V && "Trying to get the VPValue of a null Value");
2205     assert(Value2VPValue.count(V) && "Value does not exist in VPlan");
2206     return Value2VPValue[V];
2207   }
2208 
2209   /// Gets the VPValue or adds a new one (if none exists yet) for \p V. \p
2210   /// OverrideAllowed can be used to disable checking whether it is safe to
2211   /// query VPValues using IR Values.
2212   VPValue *getOrAddVPValue(Value *V, bool OverrideAllowed = false) {
2213     assert((OverrideAllowed || isa<Constant>(V) || Value2VPValueEnabled) &&
2214            "Value2VPValue mapping may be out of date!");
2215     assert(V && "Trying to get or add the VPValue of a null Value");
2216     if (!Value2VPValue.count(V))
2217       addVPValue(V);
2218     return getVPValue(V);
2219   }
2220 
2221   void removeVPValueFor(Value *V) {
2222     assert(Value2VPValueEnabled &&
2223            "IR value to VPValue mapping may be out of date!");
2224     Value2VPValue.erase(V);
2225   }
2226 
2227   /// Return the VPLoopInfo analysis for this VPlan.
2228   VPLoopInfo &getVPLoopInfo() { return VPLInfo; }
2229   const VPLoopInfo &getVPLoopInfo() const { return VPLInfo; }
2230 
2231 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2232   /// Print this VPlan to \p O.
2233   void print(raw_ostream &O) const;
2234 
2235   /// Print this VPlan in DOT format to \p O.
2236   void printDOT(raw_ostream &O) const;
2237 
2238   /// Dump the plan to stderr (for debugging).
2239   LLVM_DUMP_METHOD void dump() const;
2240 #endif
2241 
2242   /// Returns a range mapping the values the range \p Operands to their
2243   /// corresponding VPValues.
2244   iterator_range<mapped_iterator<Use *, std::function<VPValue *(Value *)>>>
2245   mapToVPValues(User::op_range Operands) {
2246     std::function<VPValue *(Value *)> Fn = [this](Value *Op) {
2247       return getOrAddVPValue(Op);
2248     };
2249     return map_range(Operands, Fn);
2250   }
2251 
2252 private:
2253   /// Add to the given dominator tree the header block and every new basic block
2254   /// that was created between it and the latch block, inclusive.
2255   static void updateDominatorTree(DominatorTree *DT, BasicBlock *LoopLatchBB,
2256                                   BasicBlock *LoopPreHeaderBB,
2257                                   BasicBlock *LoopExitBB);
2258 };
2259 
2260 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2261 /// VPlanPrinter prints a given VPlan to a given output stream. The printing is
2262 /// indented and follows the dot format.
2263 class VPlanPrinter {
2264   raw_ostream &OS;
2265   const VPlan &Plan;
2266   unsigned Depth = 0;
2267   unsigned TabWidth = 2;
2268   std::string Indent;
2269   unsigned BID = 0;
2270   SmallDenseMap<const VPBlockBase *, unsigned> BlockID;
2271 
2272   VPSlotTracker SlotTracker;
2273 
2274   /// Handle indentation.
2275   void bumpIndent(int b) { Indent = std::string((Depth += b) * TabWidth, ' '); }
2276 
2277   /// Print a given \p Block of the Plan.
2278   void dumpBlock(const VPBlockBase *Block);
2279 
2280   /// Print the information related to the CFG edges going out of a given
2281   /// \p Block, followed by printing the successor blocks themselves.
2282   void dumpEdges(const VPBlockBase *Block);
2283 
2284   /// Print a given \p BasicBlock, including its VPRecipes, followed by printing
2285   /// its successor blocks.
2286   void dumpBasicBlock(const VPBasicBlock *BasicBlock);
2287 
2288   /// Print a given \p Region of the Plan.
2289   void dumpRegion(const VPRegionBlock *Region);
2290 
2291   unsigned getOrCreateBID(const VPBlockBase *Block) {
2292     return BlockID.count(Block) ? BlockID[Block] : BlockID[Block] = BID++;
2293   }
2294 
2295   Twine getOrCreateName(const VPBlockBase *Block);
2296 
2297   Twine getUID(const VPBlockBase *Block);
2298 
2299   /// Print the information related to a CFG edge between two VPBlockBases.
2300   void drawEdge(const VPBlockBase *From, const VPBlockBase *To, bool Hidden,
2301                 const Twine &Label);
2302 
2303 public:
2304   VPlanPrinter(raw_ostream &O, const VPlan &P)
2305       : OS(O), Plan(P), SlotTracker(&P) {}
2306 
2307   LLVM_DUMP_METHOD void dump();
2308 };
2309 
2310 struct VPlanIngredient {
2311   const Value *V;
2312 
2313   VPlanIngredient(const Value *V) : V(V) {}
2314 
2315   void print(raw_ostream &O) const;
2316 };
2317 
2318 inline raw_ostream &operator<<(raw_ostream &OS, const VPlanIngredient &I) {
2319   I.print(OS);
2320   return OS;
2321 }
2322 
2323 inline raw_ostream &operator<<(raw_ostream &OS, const VPlan &Plan) {
2324   Plan.print(OS);
2325   return OS;
2326 }
2327 #endif
2328 
2329 //===----------------------------------------------------------------------===//
2330 // VPlan Utilities
2331 //===----------------------------------------------------------------------===//
2332 
2333 /// Class that provides utilities for VPBlockBases in VPlan.
2334 class VPBlockUtils {
2335 public:
2336   VPBlockUtils() = delete;
2337 
2338   /// Insert disconnected VPBlockBase \p NewBlock after \p BlockPtr. Add \p
2339   /// NewBlock as successor of \p BlockPtr and \p BlockPtr as predecessor of \p
2340   /// NewBlock, and propagate \p BlockPtr parent to \p NewBlock. If \p BlockPtr
2341   /// has more than one successor, its conditional bit is propagated to \p
2342   /// NewBlock. \p NewBlock must have neither successors nor predecessors.
2343   static void insertBlockAfter(VPBlockBase *NewBlock, VPBlockBase *BlockPtr) {
2344     assert(NewBlock->getSuccessors().empty() &&
2345            "Can't insert new block with successors.");
2346     // TODO: move successors from BlockPtr to NewBlock when this functionality
2347     // is necessary. For now, setBlockSingleSuccessor will assert if BlockPtr
2348     // already has successors.
2349     BlockPtr->setOneSuccessor(NewBlock);
2350     NewBlock->setPredecessors({BlockPtr});
2351     NewBlock->setParent(BlockPtr->getParent());
2352   }
2353 
2354   /// Insert disconnected VPBlockBases \p IfTrue and \p IfFalse after \p
2355   /// BlockPtr. Add \p IfTrue and \p IfFalse as succesors of \p BlockPtr and \p
2356   /// BlockPtr as predecessor of \p IfTrue and \p IfFalse. Propagate \p BlockPtr
2357   /// parent to \p IfTrue and \p IfFalse. \p Condition is set as the successor
2358   /// selector. \p BlockPtr must have no successors and \p IfTrue and \p IfFalse
2359   /// must have neither successors nor predecessors.
2360   static void insertTwoBlocksAfter(VPBlockBase *IfTrue, VPBlockBase *IfFalse,
2361                                    VPValue *Condition, VPBlockBase *BlockPtr) {
2362     assert(IfTrue->getSuccessors().empty() &&
2363            "Can't insert IfTrue with successors.");
2364     assert(IfFalse->getSuccessors().empty() &&
2365            "Can't insert IfFalse with successors.");
2366     BlockPtr->setTwoSuccessors(IfTrue, IfFalse, Condition);
2367     IfTrue->setPredecessors({BlockPtr});
2368     IfFalse->setPredecessors({BlockPtr});
2369     IfTrue->setParent(BlockPtr->getParent());
2370     IfFalse->setParent(BlockPtr->getParent());
2371   }
2372 
2373   /// Connect VPBlockBases \p From and \p To bi-directionally. Append \p To to
2374   /// the successors of \p From and \p From to the predecessors of \p To. Both
2375   /// VPBlockBases must have the same parent, which can be null. Both
2376   /// VPBlockBases can be already connected to other VPBlockBases.
2377   static void connectBlocks(VPBlockBase *From, VPBlockBase *To) {
2378     assert((From->getParent() == To->getParent()) &&
2379            "Can't connect two block with different parents");
2380     assert(From->getNumSuccessors() < 2 &&
2381            "Blocks can't have more than two successors.");
2382     From->appendSuccessor(To);
2383     To->appendPredecessor(From);
2384   }
2385 
2386   /// Disconnect VPBlockBases \p From and \p To bi-directionally. Remove \p To
2387   /// from the successors of \p From and \p From from the predecessors of \p To.
2388   static void disconnectBlocks(VPBlockBase *From, VPBlockBase *To) {
2389     assert(To && "Successor to disconnect is null.");
2390     From->removeSuccessor(To);
2391     To->removePredecessor(From);
2392   }
2393 
2394   /// Returns true if the edge \p FromBlock -> \p ToBlock is a back-edge.
2395   static bool isBackEdge(const VPBlockBase *FromBlock,
2396                          const VPBlockBase *ToBlock, const VPLoopInfo *VPLI) {
2397     assert(FromBlock->getParent() == ToBlock->getParent() &&
2398            FromBlock->getParent() && "Must be in same region");
2399     const VPLoop *FromLoop = VPLI->getLoopFor(FromBlock);
2400     const VPLoop *ToLoop = VPLI->getLoopFor(ToBlock);
2401     if (!FromLoop || !ToLoop || FromLoop != ToLoop)
2402       return false;
2403 
2404     // A back-edge is a branch from the loop latch to its header.
2405     return ToLoop->isLoopLatch(FromBlock) && ToBlock == ToLoop->getHeader();
2406   }
2407 
2408   /// Returns true if \p Block is a loop latch
2409   static bool blockIsLoopLatch(const VPBlockBase *Block,
2410                                const VPLoopInfo *VPLInfo) {
2411     if (const VPLoop *ParentVPL = VPLInfo->getLoopFor(Block))
2412       return ParentVPL->isLoopLatch(Block);
2413 
2414     return false;
2415   }
2416 
2417   /// Count and return the number of succesors of \p PredBlock excluding any
2418   /// backedges.
2419   static unsigned countSuccessorsNoBE(VPBlockBase *PredBlock,
2420                                       VPLoopInfo *VPLI) {
2421     unsigned Count = 0;
2422     for (VPBlockBase *SuccBlock : PredBlock->getSuccessors()) {
2423       if (!VPBlockUtils::isBackEdge(PredBlock, SuccBlock, VPLI))
2424         Count++;
2425     }
2426     return Count;
2427   }
2428 
2429   /// Return an iterator range over \p Range which only includes \p BlockTy
2430   /// blocks. The accesses are casted to \p BlockTy.
2431   template <typename BlockTy, typename T>
2432   static auto blocksOnly(const T &Range) {
2433     // Create BaseTy with correct const-ness based on BlockTy.
2434     using BaseTy =
2435         typename std::conditional<std::is_const<BlockTy>::value,
2436                                   const VPBlockBase, VPBlockBase>::type;
2437 
2438     // We need to first create an iterator range over (const) BlocktTy & instead
2439     // of (const) BlockTy * for filter_range to work properly.
2440     auto Mapped =
2441         map_range(Range, [](BaseTy *Block) -> BaseTy & { return *Block; });
2442     auto Filter = make_filter_range(
2443         Mapped, [](BaseTy &Block) { return isa<BlockTy>(&Block); });
2444     return map_range(Filter, [](BaseTy &Block) -> BlockTy * {
2445       return cast<BlockTy>(&Block);
2446     });
2447   }
2448 };
2449 
2450 class VPInterleavedAccessInfo {
2451   DenseMap<VPInstruction *, InterleaveGroup<VPInstruction> *>
2452       InterleaveGroupMap;
2453 
2454   /// Type for mapping of instruction based interleave groups to VPInstruction
2455   /// interleave groups
2456   using Old2NewTy = DenseMap<InterleaveGroup<Instruction> *,
2457                              InterleaveGroup<VPInstruction> *>;
2458 
2459   /// Recursively \p Region and populate VPlan based interleave groups based on
2460   /// \p IAI.
2461   void visitRegion(VPRegionBlock *Region, Old2NewTy &Old2New,
2462                    InterleavedAccessInfo &IAI);
2463   /// Recursively traverse \p Block and populate VPlan based interleave groups
2464   /// based on \p IAI.
2465   void visitBlock(VPBlockBase *Block, Old2NewTy &Old2New,
2466                   InterleavedAccessInfo &IAI);
2467 
2468 public:
2469   VPInterleavedAccessInfo(VPlan &Plan, InterleavedAccessInfo &IAI);
2470 
2471   ~VPInterleavedAccessInfo() {
2472     SmallPtrSet<InterleaveGroup<VPInstruction> *, 4> DelSet;
2473     // Avoid releasing a pointer twice.
2474     for (auto &I : InterleaveGroupMap)
2475       DelSet.insert(I.second);
2476     for (auto *Ptr : DelSet)
2477       delete Ptr;
2478   }
2479 
2480   /// Get the interleave group that \p Instr belongs to.
2481   ///
2482   /// \returns nullptr if doesn't have such group.
2483   InterleaveGroup<VPInstruction> *
2484   getInterleaveGroup(VPInstruction *Instr) const {
2485     return InterleaveGroupMap.lookup(Instr);
2486   }
2487 };
2488 
2489 /// Class that maps (parts of) an existing VPlan to trees of combined
2490 /// VPInstructions.
2491 class VPlanSlp {
2492   enum class OpMode { Failed, Load, Opcode };
2493 
2494   /// A DenseMapInfo implementation for using SmallVector<VPValue *, 4> as
2495   /// DenseMap keys.
2496   struct BundleDenseMapInfo {
2497     static SmallVector<VPValue *, 4> getEmptyKey() {
2498       return {reinterpret_cast<VPValue *>(-1)};
2499     }
2500 
2501     static SmallVector<VPValue *, 4> getTombstoneKey() {
2502       return {reinterpret_cast<VPValue *>(-2)};
2503     }
2504 
2505     static unsigned getHashValue(const SmallVector<VPValue *, 4> &V) {
2506       return static_cast<unsigned>(hash_combine_range(V.begin(), V.end()));
2507     }
2508 
2509     static bool isEqual(const SmallVector<VPValue *, 4> &LHS,
2510                         const SmallVector<VPValue *, 4> &RHS) {
2511       return LHS == RHS;
2512     }
2513   };
2514 
2515   /// Mapping of values in the original VPlan to a combined VPInstruction.
2516   DenseMap<SmallVector<VPValue *, 4>, VPInstruction *, BundleDenseMapInfo>
2517       BundleToCombined;
2518 
2519   VPInterleavedAccessInfo &IAI;
2520 
2521   /// Basic block to operate on. For now, only instructions in a single BB are
2522   /// considered.
2523   const VPBasicBlock &BB;
2524 
2525   /// Indicates whether we managed to combine all visited instructions or not.
2526   bool CompletelySLP = true;
2527 
2528   /// Width of the widest combined bundle in bits.
2529   unsigned WidestBundleBits = 0;
2530 
2531   using MultiNodeOpTy =
2532       typename std::pair<VPInstruction *, SmallVector<VPValue *, 4>>;
2533 
2534   // Input operand bundles for the current multi node. Each multi node operand
2535   // bundle contains values not matching the multi node's opcode. They will
2536   // be reordered in reorderMultiNodeOps, once we completed building a
2537   // multi node.
2538   SmallVector<MultiNodeOpTy, 4> MultiNodeOps;
2539 
2540   /// Indicates whether we are building a multi node currently.
2541   bool MultiNodeActive = false;
2542 
2543   /// Check if we can vectorize Operands together.
2544   bool areVectorizable(ArrayRef<VPValue *> Operands) const;
2545 
2546   /// Add combined instruction \p New for the bundle \p Operands.
2547   void addCombined(ArrayRef<VPValue *> Operands, VPInstruction *New);
2548 
2549   /// Indicate we hit a bundle we failed to combine. Returns nullptr for now.
2550   VPInstruction *markFailed();
2551 
2552   /// Reorder operands in the multi node to maximize sequential memory access
2553   /// and commutative operations.
2554   SmallVector<MultiNodeOpTy, 4> reorderMultiNodeOps();
2555 
2556   /// Choose the best candidate to use for the lane after \p Last. The set of
2557   /// candidates to choose from are values with an opcode matching \p Last's
2558   /// or loads consecutive to \p Last.
2559   std::pair<OpMode, VPValue *> getBest(OpMode Mode, VPValue *Last,
2560                                        SmallPtrSetImpl<VPValue *> &Candidates,
2561                                        VPInterleavedAccessInfo &IAI);
2562 
2563 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2564   /// Print bundle \p Values to dbgs().
2565   void dumpBundle(ArrayRef<VPValue *> Values);
2566 #endif
2567 
2568 public:
2569   VPlanSlp(VPInterleavedAccessInfo &IAI, VPBasicBlock &BB) : IAI(IAI), BB(BB) {}
2570 
2571   ~VPlanSlp() = default;
2572 
2573   /// Tries to build an SLP tree rooted at \p Operands and returns a
2574   /// VPInstruction combining \p Operands, if they can be combined.
2575   VPInstruction *buildGraph(ArrayRef<VPValue *> Operands);
2576 
2577   /// Return the width of the widest combined bundle in bits.
2578   unsigned getWidestBundleBits() const { return WidestBundleBits; }
2579 
2580   /// Return true if all visited instruction can be combined.
2581   bool isCompletelySLP() const { return CompletelySLP; }
2582 };
2583 } // end namespace llvm
2584 
2585 #endif // LLVM_TRANSFORMS_VECTORIZE_VPLAN_H
2586