xref: /llvm-project/llvm/lib/Transforms/Vectorize/VPlan.cpp (revision 8ec406757cb92f352888a3d4092397f2b5a7d1d9)
1 //===- VPlan.cpp - Vectorizer Plan ----------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 ///
9 /// \file
10 /// This is the LLVM vectorization plan. It represents a candidate for
11 /// vectorization, allowing to plan and optimize how to vectorize a given loop
12 /// before generating LLVM-IR.
13 /// The vectorizer uses vectorization plans to estimate the costs of potential
14 /// candidates and if profitable to execute the desired plan, generating vector
15 /// LLVM-IR code.
16 ///
17 //===----------------------------------------------------------------------===//
18 
19 #include "VPlan.h"
20 #include "LoopVectorizationPlanner.h"
21 #include "VPlanCFG.h"
22 #include "VPlanDominatorTree.h"
23 #include "VPlanPatternMatch.h"
24 #include "VPlanTransforms.h"
25 #include "VPlanUtils.h"
26 #include "llvm/ADT/PostOrderIterator.h"
27 #include "llvm/ADT/STLExtras.h"
28 #include "llvm/ADT/SmallVector.h"
29 #include "llvm/ADT/StringExtras.h"
30 #include "llvm/ADT/Twine.h"
31 #include "llvm/Analysis/DomTreeUpdater.h"
32 #include "llvm/Analysis/LoopInfo.h"
33 #include "llvm/IR/BasicBlock.h"
34 #include "llvm/IR/CFG.h"
35 #include "llvm/IR/IRBuilder.h"
36 #include "llvm/IR/Instruction.h"
37 #include "llvm/IR/Instructions.h"
38 #include "llvm/IR/Type.h"
39 #include "llvm/IR/Value.h"
40 #include "llvm/Support/Casting.h"
41 #include "llvm/Support/CommandLine.h"
42 #include "llvm/Support/Debug.h"
43 #include "llvm/Support/GraphWriter.h"
44 #include "llvm/Support/raw_ostream.h"
45 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
46 #include "llvm/Transforms/Utils/LoopVersioning.h"
47 #include "llvm/Transforms/Utils/ScalarEvolutionExpander.h"
48 #include <cassert>
49 #include <string>
50 #include <vector>
51 
52 using namespace llvm;
53 using namespace llvm::VPlanPatternMatch;
54 
55 namespace llvm {
56 extern cl::opt<bool> EnableVPlanNativePath;
57 }
58 extern cl::opt<unsigned> ForceTargetInstructionCost;
59 
60 static cl::opt<bool> PrintVPlansInDotFormat(
61     "vplan-print-in-dot-format", cl::Hidden,
62     cl::desc("Use dot format instead of plain text when dumping VPlans"));
63 
64 #define DEBUG_TYPE "vplan"
65 
66 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
67 raw_ostream &llvm::operator<<(raw_ostream &OS, const VPValue &V) {
68   const VPInstruction *Instr = dyn_cast<VPInstruction>(&V);
69   VPSlotTracker SlotTracker(
70       (Instr && Instr->getParent()) ? Instr->getParent()->getPlan() : nullptr);
71   V.print(OS, SlotTracker);
72   return OS;
73 }
74 #endif
75 
76 Value *VPLane::getAsRuntimeExpr(IRBuilderBase &Builder,
77                                 const ElementCount &VF) const {
78   switch (LaneKind) {
79   case VPLane::Kind::ScalableLast:
80     // Lane = RuntimeVF - VF.getKnownMinValue() + Lane
81     return Builder.CreateSub(getRuntimeVF(Builder, Builder.getInt32Ty(), VF),
82                              Builder.getInt32(VF.getKnownMinValue() - Lane));
83   case VPLane::Kind::First:
84     return Builder.getInt32(Lane);
85   }
86   llvm_unreachable("Unknown lane kind");
87 }
88 
89 VPValue::VPValue(const unsigned char SC, Value *UV, VPDef *Def)
90     : SubclassID(SC), UnderlyingVal(UV), Def(Def) {
91   if (Def)
92     Def->addDefinedValue(this);
93 }
94 
95 VPValue::~VPValue() {
96   assert(Users.empty() && "trying to delete a VPValue with remaining users");
97   if (Def)
98     Def->removeDefinedValue(this);
99 }
100 
101 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
102 void VPValue::print(raw_ostream &OS, VPSlotTracker &SlotTracker) const {
103   if (const VPRecipeBase *R = dyn_cast_or_null<VPRecipeBase>(Def))
104     R->print(OS, "", SlotTracker);
105   else
106     printAsOperand(OS, SlotTracker);
107 }
108 
109 void VPValue::dump() const {
110   const VPRecipeBase *Instr = dyn_cast_or_null<VPRecipeBase>(this->Def);
111   VPSlotTracker SlotTracker(
112       (Instr && Instr->getParent()) ? Instr->getParent()->getPlan() : nullptr);
113   print(dbgs(), SlotTracker);
114   dbgs() << "\n";
115 }
116 
117 void VPDef::dump() const {
118   const VPRecipeBase *Instr = dyn_cast_or_null<VPRecipeBase>(this);
119   VPSlotTracker SlotTracker(
120       (Instr && Instr->getParent()) ? Instr->getParent()->getPlan() : nullptr);
121   print(dbgs(), "", SlotTracker);
122   dbgs() << "\n";
123 }
124 #endif
125 
126 VPRecipeBase *VPValue::getDefiningRecipe() {
127   return cast_or_null<VPRecipeBase>(Def);
128 }
129 
130 const VPRecipeBase *VPValue::getDefiningRecipe() const {
131   return cast_or_null<VPRecipeBase>(Def);
132 }
133 
134 // Get the top-most entry block of \p Start. This is the entry block of the
135 // containing VPlan. This function is templated to support both const and non-const blocks
136 template <typename T> static T *getPlanEntry(T *Start) {
137   T *Next = Start;
138   T *Current = Start;
139   while ((Next = Next->getParent()))
140     Current = Next;
141 
142   SmallSetVector<T *, 8> WorkList;
143   WorkList.insert(Current);
144 
145   for (unsigned i = 0; i < WorkList.size(); i++) {
146     T *Current = WorkList[i];
147     if (Current->getNumPredecessors() == 0)
148       return Current;
149     auto &Predecessors = Current->getPredecessors();
150     WorkList.insert(Predecessors.begin(), Predecessors.end());
151   }
152 
153   llvm_unreachable("VPlan without any entry node without predecessors");
154 }
155 
156 VPlan *VPBlockBase::getPlan() { return getPlanEntry(this)->Plan; }
157 
158 const VPlan *VPBlockBase::getPlan() const { return getPlanEntry(this)->Plan; }
159 
160 /// \return the VPBasicBlock that is the entry of Block, possibly indirectly.
161 const VPBasicBlock *VPBlockBase::getEntryBasicBlock() const {
162   const VPBlockBase *Block = this;
163   while (const VPRegionBlock *Region = dyn_cast<VPRegionBlock>(Block))
164     Block = Region->getEntry();
165   return cast<VPBasicBlock>(Block);
166 }
167 
168 VPBasicBlock *VPBlockBase::getEntryBasicBlock() {
169   VPBlockBase *Block = this;
170   while (VPRegionBlock *Region = dyn_cast<VPRegionBlock>(Block))
171     Block = Region->getEntry();
172   return cast<VPBasicBlock>(Block);
173 }
174 
175 void VPBlockBase::setPlan(VPlan *ParentPlan) {
176   assert(
177       (ParentPlan->getEntry() == this || ParentPlan->getPreheader() == this) &&
178       "Can only set plan on its entry or preheader block.");
179   Plan = ParentPlan;
180 }
181 
182 /// \return the VPBasicBlock that is the exit of Block, possibly indirectly.
183 const VPBasicBlock *VPBlockBase::getExitingBasicBlock() const {
184   const VPBlockBase *Block = this;
185   while (const VPRegionBlock *Region = dyn_cast<VPRegionBlock>(Block))
186     Block = Region->getExiting();
187   return cast<VPBasicBlock>(Block);
188 }
189 
190 VPBasicBlock *VPBlockBase::getExitingBasicBlock() {
191   VPBlockBase *Block = this;
192   while (VPRegionBlock *Region = dyn_cast<VPRegionBlock>(Block))
193     Block = Region->getExiting();
194   return cast<VPBasicBlock>(Block);
195 }
196 
197 VPBlockBase *VPBlockBase::getEnclosingBlockWithSuccessors() {
198   if (!Successors.empty() || !Parent)
199     return this;
200   assert(Parent->getExiting() == this &&
201          "Block w/o successors not the exiting block of its parent.");
202   return Parent->getEnclosingBlockWithSuccessors();
203 }
204 
205 VPBlockBase *VPBlockBase::getEnclosingBlockWithPredecessors() {
206   if (!Predecessors.empty() || !Parent)
207     return this;
208   assert(Parent->getEntry() == this &&
209          "Block w/o predecessors not the entry of its parent.");
210   return Parent->getEnclosingBlockWithPredecessors();
211 }
212 
213 void VPBlockBase::deleteCFG(VPBlockBase *Entry) {
214   for (VPBlockBase *Block : to_vector(vp_depth_first_shallow(Entry)))
215     delete Block;
216 }
217 
218 VPBasicBlock::iterator VPBasicBlock::getFirstNonPhi() {
219   iterator It = begin();
220   while (It != end() && It->isPhi())
221     It++;
222   return It;
223 }
224 
225 VPTransformState::VPTransformState(ElementCount VF, unsigned UF, LoopInfo *LI,
226                                    DominatorTree *DT, IRBuilderBase &Builder,
227                                    InnerLoopVectorizer *ILV, VPlan *Plan)
228     : VF(VF), UF(UF), CFG(DT), LI(LI), Builder(Builder), ILV(ILV), Plan(Plan),
229       LVer(nullptr), TypeAnalysis(Plan->getCanonicalIV()->getScalarType()) {}
230 
231 Value *VPTransformState::get(VPValue *Def, const VPIteration &Instance) {
232   if (Def->isLiveIn())
233     return Def->getLiveInIRValue();
234 
235   if (hasScalarValue(Def, Instance)) {
236     return Data
237         .PerPartScalars[Def][Instance.Part][Instance.Lane.mapToCacheIndex(VF)];
238   }
239   if (!Instance.Lane.isFirstLane() &&
240       vputils::isUniformAfterVectorization(Def) &&
241       hasScalarValue(Def, {Instance.Part, VPLane::getFirstLane()})) {
242     return Data.PerPartScalars[Def][Instance.Part][0];
243   }
244 
245   assert(hasVectorValue(Def, Instance.Part));
246   auto *VecPart = Data.PerPartOutput[Def][Instance.Part];
247   if (!VecPart->getType()->isVectorTy()) {
248     assert(Instance.Lane.isFirstLane() && "cannot get lane > 0 for scalar");
249     return VecPart;
250   }
251   // TODO: Cache created scalar values.
252   Value *Lane = Instance.Lane.getAsRuntimeExpr(Builder, VF);
253   auto *Extract = Builder.CreateExtractElement(VecPart, Lane);
254   // set(Def, Extract, Instance);
255   return Extract;
256 }
257 
258 Value *VPTransformState::get(VPValue *Def, unsigned Part, bool NeedsScalar) {
259   if (NeedsScalar) {
260     assert((VF.isScalar() || Def->isLiveIn() || hasVectorValue(Def, Part) ||
261             !vputils::onlyFirstLaneUsed(Def) ||
262             (hasScalarValue(Def, VPIteration(Part, 0)) &&
263              Data.PerPartScalars[Def][Part].size() == 1)) &&
264            "Trying to access a single scalar per part but has multiple scalars "
265            "per part.");
266     return get(Def, VPIteration(Part, 0));
267   }
268 
269   // If Values have been set for this Def return the one relevant for \p Part.
270   if (hasVectorValue(Def, Part))
271     return Data.PerPartOutput[Def][Part];
272 
273   auto GetBroadcastInstrs = [this, Def](Value *V) {
274     bool SafeToHoist = Def->isDefinedOutsideLoopRegions();
275     if (VF.isScalar())
276       return V;
277     // Place the code for broadcasting invariant variables in the new preheader.
278     IRBuilder<>::InsertPointGuard Guard(Builder);
279     if (SafeToHoist) {
280       BasicBlock *LoopVectorPreHeader = CFG.VPBB2IRBB[cast<VPBasicBlock>(
281           Plan->getVectorLoopRegion()->getSinglePredecessor())];
282       if (LoopVectorPreHeader)
283         Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
284     }
285 
286     // Place the code for broadcasting invariant variables in the new preheader.
287     // Broadcast the scalar into all locations in the vector.
288     Value *Shuf = Builder.CreateVectorSplat(VF, V, "broadcast");
289 
290     return Shuf;
291   };
292 
293   if (!hasScalarValue(Def, {Part, 0})) {
294     assert(Def->isLiveIn() && "expected a live-in");
295     if (Part != 0)
296       return get(Def, 0);
297     Value *IRV = Def->getLiveInIRValue();
298     Value *B = GetBroadcastInstrs(IRV);
299     set(Def, B, Part);
300     return B;
301   }
302 
303   Value *ScalarValue = get(Def, {Part, 0});
304   // If we aren't vectorizing, we can just copy the scalar map values over
305   // to the vector map.
306   if (VF.isScalar()) {
307     set(Def, ScalarValue, Part);
308     return ScalarValue;
309   }
310 
311   bool IsUniform = vputils::isUniformAfterVectorization(Def);
312 
313   unsigned LastLane = IsUniform ? 0 : VF.getKnownMinValue() - 1;
314   // Check if there is a scalar value for the selected lane.
315   if (!hasScalarValue(Def, {Part, LastLane})) {
316     // At the moment, VPWidenIntOrFpInductionRecipes, VPScalarIVStepsRecipes and
317     // VPExpandSCEVRecipes can also be uniform.
318     assert((isa<VPWidenIntOrFpInductionRecipe>(Def->getDefiningRecipe()) ||
319             isa<VPScalarIVStepsRecipe>(Def->getDefiningRecipe()) ||
320             isa<VPExpandSCEVRecipe>(Def->getDefiningRecipe())) &&
321            "unexpected recipe found to be invariant");
322     IsUniform = true;
323     LastLane = 0;
324   }
325 
326   auto *LastInst = cast<Instruction>(get(Def, {Part, LastLane}));
327   // Set the insert point after the last scalarized instruction or after the
328   // last PHI, if LastInst is a PHI. This ensures the insertelement sequence
329   // will directly follow the scalar definitions.
330   auto OldIP = Builder.saveIP();
331   auto NewIP =
332       isa<PHINode>(LastInst)
333           ? BasicBlock::iterator(LastInst->getParent()->getFirstNonPHI())
334           : std::next(BasicBlock::iterator(LastInst));
335   Builder.SetInsertPoint(&*NewIP);
336 
337   // However, if we are vectorizing, we need to construct the vector values.
338   // If the value is known to be uniform after vectorization, we can just
339   // broadcast the scalar value corresponding to lane zero for each unroll
340   // iteration. Otherwise, we construct the vector values using
341   // insertelement instructions. Since the resulting vectors are stored in
342   // State, we will only generate the insertelements once.
343   Value *VectorValue = nullptr;
344   if (IsUniform) {
345     VectorValue = GetBroadcastInstrs(ScalarValue);
346     set(Def, VectorValue, Part);
347   } else {
348     // Initialize packing with insertelements to start from undef.
349     assert(!VF.isScalable() && "VF is assumed to be non scalable.");
350     Value *Undef = PoisonValue::get(VectorType::get(LastInst->getType(), VF));
351     set(Def, Undef, Part);
352     for (unsigned Lane = 0; Lane < VF.getKnownMinValue(); ++Lane)
353       packScalarIntoVectorValue(Def, {Part, Lane});
354     VectorValue = get(Def, Part);
355   }
356   Builder.restoreIP(OldIP);
357   return VectorValue;
358 }
359 
360 BasicBlock *VPTransformState::CFGState::getPreheaderBBFor(VPRecipeBase *R) {
361   VPRegionBlock *LoopRegion = R->getParent()->getEnclosingLoopRegion();
362   return VPBB2IRBB[LoopRegion->getPreheaderVPBB()];
363 }
364 
365 void VPTransformState::addNewMetadata(Instruction *To,
366                                       const Instruction *Orig) {
367   // If the loop was versioned with memchecks, add the corresponding no-alias
368   // metadata.
369   if (LVer && (isa<LoadInst>(Orig) || isa<StoreInst>(Orig)))
370     LVer->annotateInstWithNoAlias(To, Orig);
371 }
372 
373 void VPTransformState::addMetadata(Value *To, Instruction *From) {
374   // No source instruction to transfer metadata from?
375   if (!From)
376     return;
377 
378   if (Instruction *ToI = dyn_cast<Instruction>(To)) {
379     propagateMetadata(ToI, From);
380     addNewMetadata(ToI, From);
381   }
382 }
383 
384 void VPTransformState::setDebugLocFrom(DebugLoc DL) {
385   const DILocation *DIL = DL;
386   // When a FSDiscriminator is enabled, we don't need to add the multiply
387   // factors to the discriminators.
388   if (DIL &&
389       Builder.GetInsertBlock()
390           ->getParent()
391           ->shouldEmitDebugInfoForProfiling() &&
392       !EnableFSDiscriminator) {
393     // FIXME: For scalable vectors, assume vscale=1.
394     unsigned UF = Plan->getUF();
395     auto NewDIL =
396         DIL->cloneByMultiplyingDuplicationFactor(UF * VF.getKnownMinValue());
397     if (NewDIL)
398       Builder.SetCurrentDebugLocation(*NewDIL);
399     else
400       LLVM_DEBUG(dbgs() << "Failed to create new discriminator: "
401                         << DIL->getFilename() << " Line: " << DIL->getLine());
402   } else
403     Builder.SetCurrentDebugLocation(DIL);
404 }
405 
406 void VPTransformState::packScalarIntoVectorValue(VPValue *Def,
407                                                  const VPIteration &Instance) {
408   Value *ScalarInst = get(Def, Instance);
409   Value *VectorValue = get(Def, Instance.Part);
410   VectorValue = Builder.CreateInsertElement(
411       VectorValue, ScalarInst, Instance.Lane.getAsRuntimeExpr(Builder, VF));
412   set(Def, VectorValue, Instance.Part);
413 }
414 
415 BasicBlock *
416 VPBasicBlock::createEmptyBasicBlock(VPTransformState::CFGState &CFG) {
417   // BB stands for IR BasicBlocks. VPBB stands for VPlan VPBasicBlocks.
418   // Pred stands for Predessor. Prev stands for Previous - last visited/created.
419   BasicBlock *PrevBB = CFG.PrevBB;
420   BasicBlock *NewBB = BasicBlock::Create(PrevBB->getContext(), getName(),
421                                          PrevBB->getParent(), CFG.ExitBB);
422   LLVM_DEBUG(dbgs() << "LV: created " << NewBB->getName() << '\n');
423 
424   // Hook up the new basic block to its predecessors.
425   for (VPBlockBase *PredVPBlock : getHierarchicalPredecessors()) {
426     VPBasicBlock *PredVPBB = PredVPBlock->getExitingBasicBlock();
427     auto &PredVPSuccessors = PredVPBB->getHierarchicalSuccessors();
428     BasicBlock *PredBB = CFG.VPBB2IRBB[PredVPBB];
429 
430     assert(PredBB && "Predecessor basic-block not found building successor.");
431     auto *PredBBTerminator = PredBB->getTerminator();
432     LLVM_DEBUG(dbgs() << "LV: draw edge from" << PredBB->getName() << '\n');
433 
434     auto *TermBr = dyn_cast<BranchInst>(PredBBTerminator);
435     if (isa<UnreachableInst>(PredBBTerminator)) {
436       assert(PredVPSuccessors.size() == 1 &&
437              "Predecessor ending w/o branch must have single successor.");
438       DebugLoc DL = PredBBTerminator->getDebugLoc();
439       PredBBTerminator->eraseFromParent();
440       auto *Br = BranchInst::Create(NewBB, PredBB);
441       Br->setDebugLoc(DL);
442     } else if (TermBr && !TermBr->isConditional()) {
443       TermBr->setSuccessor(0, NewBB);
444     } else {
445       // Set each forward successor here when it is created, excluding
446       // backedges. A backward successor is set when the branch is created.
447       unsigned idx = PredVPSuccessors.front() == this ? 0 : 1;
448       assert(!TermBr->getSuccessor(idx) &&
449              "Trying to reset an existing successor block.");
450       TermBr->setSuccessor(idx, NewBB);
451     }
452     CFG.DTU.applyUpdates({{DominatorTree::Insert, PredBB, NewBB}});
453   }
454   return NewBB;
455 }
456 
457 void VPIRBasicBlock::execute(VPTransformState *State) {
458   assert(getHierarchicalSuccessors().size() <= 2 &&
459          "VPIRBasicBlock can have at most two successors at the moment!");
460   State->Builder.SetInsertPoint(getIRBasicBlock()->getTerminator());
461   executeRecipes(State, getIRBasicBlock());
462   if (getSingleSuccessor()) {
463     assert(isa<UnreachableInst>(getIRBasicBlock()->getTerminator()));
464     auto *Br = State->Builder.CreateBr(getIRBasicBlock());
465     Br->setOperand(0, nullptr);
466     getIRBasicBlock()->getTerminator()->eraseFromParent();
467   }
468 
469   for (VPBlockBase *PredVPBlock : getHierarchicalPredecessors()) {
470     VPBasicBlock *PredVPBB = PredVPBlock->getExitingBasicBlock();
471     BasicBlock *PredBB = State->CFG.VPBB2IRBB[PredVPBB];
472     assert(PredBB && "Predecessor basic-block not found building successor.");
473     LLVM_DEBUG(dbgs() << "LV: draw edge from" << PredBB->getName() << '\n');
474 
475     auto *PredBBTerminator = PredBB->getTerminator();
476     auto *TermBr = cast<BranchInst>(PredBBTerminator);
477     // Set each forward successor here when it is created, excluding
478     // backedges. A backward successor is set when the branch is created.
479     const auto &PredVPSuccessors = PredVPBB->getHierarchicalSuccessors();
480     unsigned idx = PredVPSuccessors.front() == this ? 0 : 1;
481     assert(!TermBr->getSuccessor(idx) &&
482            "Trying to reset an existing successor block.");
483     TermBr->setSuccessor(idx, IRBB);
484     State->CFG.DTU.applyUpdates({{DominatorTree::Insert, PredBB, IRBB}});
485   }
486 }
487 
488 void VPBasicBlock::execute(VPTransformState *State) {
489   bool Replica = State->Instance && !State->Instance->isFirstIteration();
490   VPBasicBlock *PrevVPBB = State->CFG.PrevVPBB;
491   VPBlockBase *SingleHPred = nullptr;
492   BasicBlock *NewBB = State->CFG.PrevBB; // Reuse it if possible.
493 
494   auto IsLoopRegion = [](VPBlockBase *BB) {
495     auto *R = dyn_cast<VPRegionBlock>(BB);
496     return R && !R->isReplicator();
497   };
498 
499   // 1. Create an IR basic block.
500   if (PrevVPBB && /* A */
501       !((SingleHPred = getSingleHierarchicalPredecessor()) &&
502         SingleHPred->getExitingBasicBlock() == PrevVPBB &&
503         PrevVPBB->getSingleHierarchicalSuccessor() &&
504         (SingleHPred->getParent() == getEnclosingLoopRegion() &&
505          !IsLoopRegion(SingleHPred))) &&         /* B */
506       !(Replica && getPredecessors().empty())) { /* C */
507     // The last IR basic block is reused, as an optimization, in three cases:
508     // A. the first VPBB reuses the loop pre-header BB - when PrevVPBB is null;
509     // B. when the current VPBB has a single (hierarchical) predecessor which
510     //    is PrevVPBB and the latter has a single (hierarchical) successor which
511     //    both are in the same non-replicator region; and
512     // C. when the current VPBB is an entry of a region replica - where PrevVPBB
513     //    is the exiting VPBB of this region from a previous instance, or the
514     //    predecessor of this region.
515 
516     NewBB = createEmptyBasicBlock(State->CFG);
517     State->Builder.SetInsertPoint(NewBB);
518     // Temporarily terminate with unreachable until CFG is rewired.
519     UnreachableInst *Terminator = State->Builder.CreateUnreachable();
520     // Register NewBB in its loop. In innermost loops its the same for all
521     // BB's.
522     if (State->CurrentVectorLoop)
523       State->CurrentVectorLoop->addBasicBlockToLoop(NewBB, *State->LI);
524     State->Builder.SetInsertPoint(Terminator);
525     State->CFG.PrevBB = NewBB;
526   }
527 
528   // 2. Fill the IR basic block with IR instructions.
529   executeRecipes(State, NewBB);
530 }
531 
532 void VPBasicBlock::dropAllReferences(VPValue *NewValue) {
533   for (VPRecipeBase &R : Recipes) {
534     for (auto *Def : R.definedValues())
535       Def->replaceAllUsesWith(NewValue);
536 
537     for (unsigned I = 0, E = R.getNumOperands(); I != E; I++)
538       R.setOperand(I, NewValue);
539   }
540 }
541 
542 void VPBasicBlock::executeRecipes(VPTransformState *State, BasicBlock *BB) {
543   LLVM_DEBUG(dbgs() << "LV: vectorizing VPBB:" << getName()
544                     << " in BB:" << BB->getName() << '\n');
545 
546   State->CFG.VPBB2IRBB[this] = BB;
547   State->CFG.PrevVPBB = this;
548 
549   for (VPRecipeBase &Recipe : Recipes)
550     Recipe.execute(*State);
551 
552   LLVM_DEBUG(dbgs() << "LV: filled BB:" << *BB);
553 }
554 
555 VPBasicBlock *VPBasicBlock::splitAt(iterator SplitAt) {
556   assert((SplitAt == end() || SplitAt->getParent() == this) &&
557          "can only split at a position in the same block");
558 
559   SmallVector<VPBlockBase *, 2> Succs(successors());
560   // First, disconnect the current block from its successors.
561   for (VPBlockBase *Succ : Succs)
562     VPBlockUtils::disconnectBlocks(this, Succ);
563 
564   // Create new empty block after the block to split.
565   auto *SplitBlock = new VPBasicBlock(getName() + ".split");
566   VPBlockUtils::insertBlockAfter(SplitBlock, this);
567 
568   // Add successors for block to split to new block.
569   for (VPBlockBase *Succ : Succs)
570     VPBlockUtils::connectBlocks(SplitBlock, Succ);
571 
572   // Finally, move the recipes starting at SplitAt to new block.
573   for (VPRecipeBase &ToMove :
574        make_early_inc_range(make_range(SplitAt, this->end())))
575     ToMove.moveBefore(*SplitBlock, SplitBlock->end());
576 
577   return SplitBlock;
578 }
579 
580 /// Return the enclosing loop region for region \p P. The templated version is
581 /// used to support both const and non-const block arguments.
582 template <typename T> static T *getEnclosingLoopRegionForRegion(T *P) {
583   if (P && P->isReplicator()) {
584     P = P->getParent();
585     assert(!cast<VPRegionBlock>(P)->isReplicator() &&
586            "unexpected nested replicate regions");
587   }
588   return P;
589 }
590 
591 VPRegionBlock *VPBasicBlock::getEnclosingLoopRegion() {
592   return getEnclosingLoopRegionForRegion(getParent());
593 }
594 
595 const VPRegionBlock *VPBasicBlock::getEnclosingLoopRegion() const {
596   return getEnclosingLoopRegionForRegion(getParent());
597 }
598 
599 static bool hasConditionalTerminator(const VPBasicBlock *VPBB) {
600   if (VPBB->empty()) {
601     assert(
602         VPBB->getNumSuccessors() < 2 &&
603         "block with multiple successors doesn't have a recipe as terminator");
604     return false;
605   }
606 
607   const VPRecipeBase *R = &VPBB->back();
608   bool IsCondBranch = isa<VPBranchOnMaskRecipe>(R) ||
609                       match(R, m_BranchOnCond(m_VPValue())) ||
610                       match(R, m_BranchOnCount(m_VPValue(), m_VPValue()));
611   (void)IsCondBranch;
612 
613   if (VPBB->getNumSuccessors() >= 2 ||
614       (VPBB->isExiting() && !VPBB->getParent()->isReplicator())) {
615     assert(IsCondBranch && "block with multiple successors not terminated by "
616                            "conditional branch recipe");
617 
618     return true;
619   }
620 
621   assert(
622       !IsCondBranch &&
623       "block with 0 or 1 successors terminated by conditional branch recipe");
624   return false;
625 }
626 
627 VPRecipeBase *VPBasicBlock::getTerminator() {
628   if (hasConditionalTerminator(this))
629     return &back();
630   return nullptr;
631 }
632 
633 const VPRecipeBase *VPBasicBlock::getTerminator() const {
634   if (hasConditionalTerminator(this))
635     return &back();
636   return nullptr;
637 }
638 
639 bool VPBasicBlock::isExiting() const {
640   return getParent() && getParent()->getExitingBasicBlock() == this;
641 }
642 
643 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
644 void VPBlockBase::printSuccessors(raw_ostream &O, const Twine &Indent) const {
645   if (getSuccessors().empty()) {
646     O << Indent << "No successors\n";
647   } else {
648     O << Indent << "Successor(s): ";
649     ListSeparator LS;
650     for (auto *Succ : getSuccessors())
651       O << LS << Succ->getName();
652     O << '\n';
653   }
654 }
655 
656 void VPBasicBlock::print(raw_ostream &O, const Twine &Indent,
657                          VPSlotTracker &SlotTracker) const {
658   O << Indent << getName() << ":\n";
659 
660   auto RecipeIndent = Indent + "  ";
661   for (const VPRecipeBase &Recipe : *this) {
662     Recipe.print(O, RecipeIndent, SlotTracker);
663     O << '\n';
664   }
665 
666   printSuccessors(O, Indent);
667 }
668 #endif
669 
670 static std::pair<VPBlockBase *, VPBlockBase *> cloneFrom(VPBlockBase *Entry);
671 
672 // Clone the CFG for all nodes reachable from \p Entry, this includes cloning
673 // the blocks and their recipes. Operands of cloned recipes will NOT be updated.
674 // Remapping of operands must be done separately. Returns a pair with the new
675 // entry and exiting blocks of the cloned region. If \p Entry isn't part of a
676 // region, return nullptr for the exiting block.
677 static std::pair<VPBlockBase *, VPBlockBase *> cloneFrom(VPBlockBase *Entry) {
678   DenseMap<VPBlockBase *, VPBlockBase *> Old2NewVPBlocks;
679   VPBlockBase *Exiting = nullptr;
680   bool InRegion = Entry->getParent();
681   // First, clone blocks reachable from Entry.
682   for (VPBlockBase *BB : vp_depth_first_shallow(Entry)) {
683     VPBlockBase *NewBB = BB->clone();
684     Old2NewVPBlocks[BB] = NewBB;
685     if (InRegion && BB->getNumSuccessors() == 0) {
686       assert(!Exiting && "Multiple exiting blocks?");
687       Exiting = BB;
688     }
689   }
690   assert((!InRegion || Exiting) && "regions must have a single exiting block");
691 
692   // Second, update the predecessors & successors of the cloned blocks.
693   for (VPBlockBase *BB : vp_depth_first_shallow(Entry)) {
694     VPBlockBase *NewBB = Old2NewVPBlocks[BB];
695     SmallVector<VPBlockBase *> NewPreds;
696     for (VPBlockBase *Pred : BB->getPredecessors()) {
697       NewPreds.push_back(Old2NewVPBlocks[Pred]);
698     }
699     NewBB->setPredecessors(NewPreds);
700     SmallVector<VPBlockBase *> NewSuccs;
701     for (VPBlockBase *Succ : BB->successors()) {
702       NewSuccs.push_back(Old2NewVPBlocks[Succ]);
703     }
704     NewBB->setSuccessors(NewSuccs);
705   }
706 
707 #if !defined(NDEBUG)
708   // Verify that the order of predecessors and successors matches in the cloned
709   // version.
710   for (const auto &[OldBB, NewBB] :
711        zip(vp_depth_first_shallow(Entry),
712            vp_depth_first_shallow(Old2NewVPBlocks[Entry]))) {
713     for (const auto &[OldPred, NewPred] :
714          zip(OldBB->getPredecessors(), NewBB->getPredecessors()))
715       assert(NewPred == Old2NewVPBlocks[OldPred] && "Different predecessors");
716 
717     for (const auto &[OldSucc, NewSucc] :
718          zip(OldBB->successors(), NewBB->successors()))
719       assert(NewSucc == Old2NewVPBlocks[OldSucc] && "Different successors");
720   }
721 #endif
722 
723   return std::make_pair(Old2NewVPBlocks[Entry],
724                         Exiting ? Old2NewVPBlocks[Exiting] : nullptr);
725 }
726 
727 VPRegionBlock *VPRegionBlock::clone() {
728   const auto &[NewEntry, NewExiting] = cloneFrom(getEntry());
729   auto *NewRegion =
730       new VPRegionBlock(NewEntry, NewExiting, getName(), isReplicator());
731   for (VPBlockBase *Block : vp_depth_first_shallow(NewEntry))
732     Block->setParent(NewRegion);
733   return NewRegion;
734 }
735 
736 void VPRegionBlock::dropAllReferences(VPValue *NewValue) {
737   for (VPBlockBase *Block : vp_depth_first_shallow(Entry))
738     // Drop all references in VPBasicBlocks and replace all uses with
739     // DummyValue.
740     Block->dropAllReferences(NewValue);
741 }
742 
743 void VPRegionBlock::execute(VPTransformState *State) {
744   ReversePostOrderTraversal<VPBlockShallowTraversalWrapper<VPBlockBase *>>
745       RPOT(Entry);
746 
747   if (!isReplicator()) {
748     // Create and register the new vector loop.
749     Loop *PrevLoop = State->CurrentVectorLoop;
750     State->CurrentVectorLoop = State->LI->AllocateLoop();
751     BasicBlock *VectorPH = State->CFG.VPBB2IRBB[getPreheaderVPBB()];
752     Loop *ParentLoop = State->LI->getLoopFor(VectorPH);
753 
754     // Insert the new loop into the loop nest and register the new basic blocks
755     // before calling any utilities such as SCEV that require valid LoopInfo.
756     if (ParentLoop)
757       ParentLoop->addChildLoop(State->CurrentVectorLoop);
758     else
759       State->LI->addTopLevelLoop(State->CurrentVectorLoop);
760 
761     // Visit the VPBlocks connected to "this", starting from it.
762     for (VPBlockBase *Block : RPOT) {
763       LLVM_DEBUG(dbgs() << "LV: VPBlock in RPO " << Block->getName() << '\n');
764       Block->execute(State);
765     }
766 
767     State->CurrentVectorLoop = PrevLoop;
768     return;
769   }
770 
771   assert(!State->Instance && "Replicating a Region with non-null instance.");
772 
773   // Enter replicating mode.
774   State->Instance = VPIteration(0, 0);
775 
776   for (unsigned Part = 0, UF = State->UF; Part < UF; ++Part) {
777     State->Instance->Part = Part;
778     assert(!State->VF.isScalable() && "VF is assumed to be non scalable.");
779     for (unsigned Lane = 0, VF = State->VF.getKnownMinValue(); Lane < VF;
780          ++Lane) {
781       State->Instance->Lane = VPLane(Lane, VPLane::Kind::First);
782       // Visit the VPBlocks connected to \p this, starting from it.
783       for (VPBlockBase *Block : RPOT) {
784         LLVM_DEBUG(dbgs() << "LV: VPBlock in RPO " << Block->getName() << '\n');
785         Block->execute(State);
786       }
787     }
788   }
789 
790   // Exit replicating mode.
791   State->Instance.reset();
792 }
793 
794 InstructionCost VPBasicBlock::cost(ElementCount VF, VPCostContext &Ctx) {
795   InstructionCost Cost = 0;
796   for (VPRecipeBase &R : Recipes)
797     Cost += R.cost(VF, Ctx);
798   return Cost;
799 }
800 
801 InstructionCost VPRegionBlock::cost(ElementCount VF, VPCostContext &Ctx) {
802   if (!isReplicator()) {
803     InstructionCost Cost = 0;
804     for (VPBlockBase *Block : vp_depth_first_shallow(getEntry()))
805       Cost += Block->cost(VF, Ctx);
806     InstructionCost BackedgeCost =
807         ForceTargetInstructionCost.getNumOccurrences()
808             ? InstructionCost(ForceTargetInstructionCost.getNumOccurrences())
809             : Ctx.TTI.getCFInstrCost(Instruction::Br, TTI::TCK_RecipThroughput);
810     LLVM_DEBUG(dbgs() << "Cost of " << BackedgeCost << " for VF " << VF
811                       << ": vector loop backedge\n");
812     Cost += BackedgeCost;
813     return Cost;
814   }
815 
816   // Compute the cost of a replicate region. Replicating isn't supported for
817   // scalable vectors, return an invalid cost for them.
818   // TODO: Discard scalable VPlans with replicate recipes earlier after
819   // construction.
820   if (VF.isScalable())
821     return InstructionCost::getInvalid();
822 
823   // First compute the cost of the conditionally executed recipes, followed by
824   // account for the branching cost, except if the mask is a header mask or
825   // uniform condition.
826   using namespace llvm::VPlanPatternMatch;
827   VPBasicBlock *Then = cast<VPBasicBlock>(getEntry()->getSuccessors()[0]);
828   InstructionCost ThenCost = Then->cost(VF, Ctx);
829 
830   // For the scalar case, we may not always execute the original predicated
831   // block, Thus, scale the block's cost by the probability of executing it.
832   if (VF.isScalar())
833     return ThenCost / getReciprocalPredBlockProb();
834 
835   return ThenCost;
836 }
837 
838 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
839 void VPRegionBlock::print(raw_ostream &O, const Twine &Indent,
840                           VPSlotTracker &SlotTracker) const {
841   O << Indent << (isReplicator() ? "<xVFxUF> " : "<x1> ") << getName() << ": {";
842   auto NewIndent = Indent + "  ";
843   for (auto *BlockBase : vp_depth_first_shallow(Entry)) {
844     O << '\n';
845     BlockBase->print(O, NewIndent, SlotTracker);
846   }
847   O << Indent << "}\n";
848 
849   printSuccessors(O, Indent);
850 }
851 #endif
852 
853 VPlan::~VPlan() {
854   for (auto &KV : LiveOuts)
855     delete KV.second;
856   LiveOuts.clear();
857 
858   if (Entry) {
859     VPValue DummyValue;
860     for (VPBlockBase *Block : vp_depth_first_shallow(Entry))
861       Block->dropAllReferences(&DummyValue);
862 
863     VPBlockBase::deleteCFG(Entry);
864 
865     Preheader->dropAllReferences(&DummyValue);
866     delete Preheader;
867   }
868   for (VPValue *VPV : VPLiveInsToFree)
869     delete VPV;
870   if (BackedgeTakenCount)
871     delete BackedgeTakenCount;
872 }
873 
874 static VPIRBasicBlock *createVPIRBasicBlockFor(BasicBlock *BB) {
875   auto *VPIRBB = new VPIRBasicBlock(BB);
876   for (Instruction &I :
877        make_range(BB->begin(), BB->getTerminator()->getIterator()))
878     VPIRBB->appendRecipe(new VPIRInstruction(I));
879   return VPIRBB;
880 }
881 
882 VPlanPtr VPlan::createInitialVPlan(Type *InductionTy,
883                                    PredicatedScalarEvolution &PSE,
884                                    bool RequiresScalarEpilogueCheck,
885                                    bool TailFolded, Loop *TheLoop) {
886   VPIRBasicBlock *Entry = createVPIRBasicBlockFor(TheLoop->getLoopPreheader());
887   VPBasicBlock *VecPreheader = new VPBasicBlock("vector.ph");
888   auto Plan = std::make_unique<VPlan>(Entry, VecPreheader);
889 
890   // Create SCEV and VPValue for the trip count.
891   const SCEV *BackedgeTakenCount = PSE.getBackedgeTakenCount();
892   assert(!isa<SCEVCouldNotCompute>(BackedgeTakenCount) && "Invalid loop count");
893   ScalarEvolution &SE = *PSE.getSE();
894   const SCEV *TripCount =
895       SE.getTripCountFromExitCount(BackedgeTakenCount, InductionTy, TheLoop);
896   Plan->TripCount =
897       vputils::getOrCreateVPValueForSCEVExpr(*Plan, TripCount, SE);
898 
899   // Create VPRegionBlock, with empty header and latch blocks, to be filled
900   // during processing later.
901   VPBasicBlock *HeaderVPBB = new VPBasicBlock("vector.body");
902   VPBasicBlock *LatchVPBB = new VPBasicBlock("vector.latch");
903   VPBlockUtils::insertBlockAfter(LatchVPBB, HeaderVPBB);
904   auto *TopRegion = new VPRegionBlock(HeaderVPBB, LatchVPBB, "vector loop",
905                                       false /*isReplicator*/);
906 
907   VPBlockUtils::insertBlockAfter(TopRegion, VecPreheader);
908   VPBasicBlock *MiddleVPBB = new VPBasicBlock("middle.block");
909   VPBlockUtils::insertBlockAfter(MiddleVPBB, TopRegion);
910 
911   VPBasicBlock *ScalarPH = new VPBasicBlock("scalar.ph");
912   if (!RequiresScalarEpilogueCheck) {
913     VPBlockUtils::connectBlocks(MiddleVPBB, ScalarPH);
914     return Plan;
915   }
916 
917   // If needed, add a check in the middle block to see if we have completed
918   // all of the iterations in the first vector loop.  Three cases:
919   // 1) If (N - N%VF) == N, then we *don't* need to run the remainder.
920   //    Thus if tail is to be folded, we know we don't need to run the
921   //    remainder and we can set the condition to true.
922   // 2) If we require a scalar epilogue, there is no conditional branch as
923   //    we unconditionally branch to the scalar preheader.  Do nothing.
924   // 3) Otherwise, construct a runtime check.
925   BasicBlock *IRExitBlock = TheLoop->getUniqueExitBlock();
926   auto *VPExitBlock = createVPIRBasicBlockFor(IRExitBlock);
927   // The connection order corresponds to the operands of the conditional branch.
928   VPBlockUtils::insertBlockAfter(VPExitBlock, MiddleVPBB);
929   VPBlockUtils::connectBlocks(MiddleVPBB, ScalarPH);
930 
931   auto *ScalarLatchTerm = TheLoop->getLoopLatch()->getTerminator();
932   // Here we use the same DebugLoc as the scalar loop latch terminator instead
933   // of the corresponding compare because they may have ended up with
934   // different line numbers and we want to avoid awkward line stepping while
935   // debugging. Eg. if the compare has got a line number inside the loop.
936   VPBuilder Builder(MiddleVPBB);
937   VPValue *Cmp =
938       TailFolded
939           ? Plan->getOrAddLiveIn(ConstantInt::getTrue(
940                 IntegerType::getInt1Ty(TripCount->getType()->getContext())))
941           : Builder.createICmp(CmpInst::ICMP_EQ, Plan->getTripCount(),
942                                &Plan->getVectorTripCount(),
943                                ScalarLatchTerm->getDebugLoc(), "cmp.n");
944   Builder.createNaryOp(VPInstruction::BranchOnCond, {Cmp},
945                        ScalarLatchTerm->getDebugLoc());
946   return Plan;
947 }
948 
949 void VPlan::prepareToExecute(Value *TripCountV, Value *VectorTripCountV,
950                              Value *CanonicalIVStartValue,
951                              VPTransformState &State) {
952   Type *TCTy = TripCountV->getType();
953   // Check if the backedge taken count is needed, and if so build it.
954   if (BackedgeTakenCount && BackedgeTakenCount->getNumUsers()) {
955     IRBuilder<> Builder(State.CFG.PrevBB->getTerminator());
956     auto *TCMO = Builder.CreateSub(TripCountV, ConstantInt::get(TCTy, 1),
957                                    "trip.count.minus.1");
958     BackedgeTakenCount->setUnderlyingValue(TCMO);
959   }
960 
961   VectorTripCount.setUnderlyingValue(VectorTripCountV);
962 
963   IRBuilder<> Builder(State.CFG.PrevBB->getTerminator());
964   // FIXME: Model VF * UF computation completely in VPlan.
965   assert(VFxUF.getNumUsers() && "VFxUF expected to always have users");
966   if (VF.getNumUsers()) {
967     Value *RuntimeVF = getRuntimeVF(Builder, TCTy, State.VF);
968     VF.setUnderlyingValue(RuntimeVF);
969     VFxUF.setUnderlyingValue(
970         State.UF > 1
971             ? Builder.CreateMul(RuntimeVF, ConstantInt::get(TCTy, State.UF))
972             : RuntimeVF);
973   } else {
974     VFxUF.setUnderlyingValue(
975         createStepForVF(Builder, TCTy, State.VF, State.UF));
976   }
977 
978   // When vectorizing the epilogue loop, the canonical induction start value
979   // needs to be changed from zero to the value after the main vector loop.
980   // FIXME: Improve modeling for canonical IV start values in the epilogue loop.
981   if (CanonicalIVStartValue) {
982     VPValue *VPV = getOrAddLiveIn(CanonicalIVStartValue);
983     auto *IV = getCanonicalIV();
984     assert(all_of(IV->users(),
985                   [](const VPUser *U) {
986                     return isa<VPScalarIVStepsRecipe>(U) ||
987                            isa<VPScalarCastRecipe>(U) ||
988                            isa<VPDerivedIVRecipe>(U) ||
989                            cast<VPInstruction>(U)->getOpcode() ==
990                                Instruction::Add;
991                   }) &&
992            "the canonical IV should only be used by its increment or "
993            "ScalarIVSteps when resetting the start value");
994     IV->setOperand(0, VPV);
995   }
996 }
997 
998 /// Replace \p VPBB with a VPIRBasicBlock wrapping \p IRBB. All recipes from \p
999 /// VPBB are moved to the end of the newly created VPIRBasicBlock. VPBB must
1000 /// have a single predecessor, which is rewired to the new VPIRBasicBlock. All
1001 /// successors of VPBB, if any, are rewired to the new VPIRBasicBlock.
1002 static void replaceVPBBWithIRVPBB(VPBasicBlock *VPBB, BasicBlock *IRBB) {
1003   VPIRBasicBlock *IRVPBB = createVPIRBasicBlockFor(IRBB);
1004   for (auto &R : make_early_inc_range(*VPBB)) {
1005     assert(!R.isPhi() && "Tried to move phi recipe to end of block");
1006     R.moveBefore(*IRVPBB, IRVPBB->end());
1007   }
1008   VPBlockBase *PredVPBB = VPBB->getSinglePredecessor();
1009   VPBlockUtils::disconnectBlocks(PredVPBB, VPBB);
1010   VPBlockUtils::connectBlocks(PredVPBB, IRVPBB);
1011   for (auto *Succ : to_vector(VPBB->getSuccessors())) {
1012     VPBlockUtils::connectBlocks(IRVPBB, Succ);
1013     VPBlockUtils::disconnectBlocks(VPBB, Succ);
1014   }
1015   delete VPBB;
1016 }
1017 
1018 /// Generate the code inside the preheader and body of the vectorized loop.
1019 /// Assumes a single pre-header basic-block was created for this. Introduce
1020 /// additional basic-blocks as needed, and fill them all.
1021 void VPlan::execute(VPTransformState *State) {
1022   // Set UF to 1, as the unrollByUF VPlan transform already explicitly unrolled
1023   // the VPlan.
1024   // TODO: Remove State::UF and all uses.
1025   State->UF = 1;
1026   // Initialize CFG state.
1027   State->CFG.PrevVPBB = nullptr;
1028   State->CFG.ExitBB = State->CFG.PrevBB->getSingleSuccessor();
1029   BasicBlock *VectorPreHeader = State->CFG.PrevBB;
1030   State->Builder.SetInsertPoint(VectorPreHeader->getTerminator());
1031 
1032   // Disconnect VectorPreHeader from ExitBB in both the CFG and DT.
1033   cast<BranchInst>(VectorPreHeader->getTerminator())->setSuccessor(0, nullptr);
1034   State->CFG.DTU.applyUpdates(
1035       {{DominatorTree::Delete, VectorPreHeader, State->CFG.ExitBB}});
1036 
1037   // Replace regular VPBB's for the middle and scalar preheader blocks with
1038   // VPIRBasicBlocks wrapping their IR blocks. The IR blocks are created during
1039   // skeleton creation, so we can only create the VPIRBasicBlocks now during
1040   // VPlan execution rather than earlier during VPlan construction.
1041   BasicBlock *MiddleBB = State->CFG.ExitBB;
1042   VPBasicBlock *MiddleVPBB =
1043       cast<VPBasicBlock>(getVectorLoopRegion()->getSingleSuccessor());
1044   // Find the VPBB for the scalar preheader, relying on the current structure
1045   // when creating the middle block and its successrs: if there's a single
1046   // predecessor, it must be the scalar preheader. Otherwise, the second
1047   // successor is the scalar preheader.
1048   BasicBlock *ScalarPh = MiddleBB->getSingleSuccessor();
1049   auto &MiddleSuccs = MiddleVPBB->getSuccessors();
1050   assert((MiddleSuccs.size() == 1 || MiddleSuccs.size() == 2) &&
1051          "middle block has unexpected successors");
1052   VPBasicBlock *ScalarPhVPBB = cast<VPBasicBlock>(
1053       MiddleSuccs.size() == 1 ? MiddleSuccs[0] : MiddleSuccs[1]);
1054   assert(!isa<VPIRBasicBlock>(ScalarPhVPBB) &&
1055          "scalar preheader cannot be wrapped already");
1056   replaceVPBBWithIRVPBB(ScalarPhVPBB, ScalarPh);
1057   replaceVPBBWithIRVPBB(MiddleVPBB, MiddleBB);
1058 
1059   // Disconnect the middle block from its single successor (the scalar loop
1060   // header) in both the CFG and DT. The branch will be recreated during VPlan
1061   // execution.
1062   auto *BrInst = new UnreachableInst(MiddleBB->getContext());
1063   BrInst->insertBefore(MiddleBB->getTerminator());
1064   MiddleBB->getTerminator()->eraseFromParent();
1065   State->CFG.DTU.applyUpdates({{DominatorTree::Delete, MiddleBB, ScalarPh}});
1066 
1067   // Generate code in the loop pre-header and body.
1068   for (VPBlockBase *Block : vp_depth_first_shallow(Entry))
1069     Block->execute(State);
1070 
1071   VPBasicBlock *LatchVPBB = getVectorLoopRegion()->getExitingBasicBlock();
1072   BasicBlock *VectorLatchBB = State->CFG.VPBB2IRBB[LatchVPBB];
1073 
1074   // Fix the latch value of canonical, reduction and first-order recurrences
1075   // phis in the vector loop.
1076   VPBasicBlock *Header = getVectorLoopRegion()->getEntryBasicBlock();
1077   for (VPRecipeBase &R : Header->phis()) {
1078     // Skip phi-like recipes that generate their backedege values themselves.
1079     if (isa<VPWidenPHIRecipe>(&R))
1080       continue;
1081 
1082     if (isa<VPWidenPointerInductionRecipe>(&R) ||
1083         isa<VPWidenIntOrFpInductionRecipe>(&R)) {
1084       PHINode *Phi = nullptr;
1085       if (isa<VPWidenIntOrFpInductionRecipe>(&R)) {
1086         Phi = cast<PHINode>(State->get(R.getVPSingleValue(), 0));
1087       } else {
1088         auto *WidenPhi = cast<VPWidenPointerInductionRecipe>(&R);
1089         assert(!WidenPhi->onlyScalarsGenerated(State->VF.isScalable()) &&
1090                "recipe generating only scalars should have been replaced");
1091         auto *GEP = cast<GetElementPtrInst>(State->get(WidenPhi, 0));
1092         Phi = cast<PHINode>(GEP->getPointerOperand());
1093       }
1094 
1095       Phi->setIncomingBlock(1, VectorLatchBB);
1096 
1097       // Move the last step to the end of the latch block. This ensures
1098       // consistent placement of all induction updates.
1099       Instruction *Inc = cast<Instruction>(Phi->getIncomingValue(1));
1100       Inc->moveBefore(VectorLatchBB->getTerminator()->getPrevNode());
1101 
1102       // Use the steps for the last part as backedge value for the induction.
1103       if (auto *IV = dyn_cast<VPWidenIntOrFpInductionRecipe>(&R))
1104         Inc->setOperand(0, State->get(IV->getLastUnrolledPartOperand(), 0));
1105       continue;
1106     }
1107 
1108     auto *PhiR = cast<VPHeaderPHIRecipe>(&R);
1109     // For  canonical IV, first-order recurrences and in-order reduction phis,
1110     // only a single part is generated, which provides the last part from the
1111     // previous iteration. For non-ordered reductions all UF parts are
1112     // generated.
1113     bool SinglePartNeeded =
1114         isa<VPCanonicalIVPHIRecipe>(PhiR) ||
1115         isa<VPFirstOrderRecurrencePHIRecipe, VPEVLBasedIVPHIRecipe>(PhiR) ||
1116         (isa<VPReductionPHIRecipe>(PhiR) &&
1117          cast<VPReductionPHIRecipe>(PhiR)->isOrdered());
1118     bool NeedsScalar =
1119         isa<VPCanonicalIVPHIRecipe, VPEVLBasedIVPHIRecipe>(PhiR) ||
1120         (isa<VPReductionPHIRecipe>(PhiR) &&
1121          cast<VPReductionPHIRecipe>(PhiR)->isInLoop());
1122     unsigned LastPartForNewPhi = SinglePartNeeded ? 1 : State->UF;
1123 
1124     for (unsigned Part = 0; Part < LastPartForNewPhi; ++Part) {
1125       Value *Phi = State->get(PhiR, Part, NeedsScalar);
1126       Value *Val =
1127           State->get(PhiR->getBackedgeValue(),
1128                      SinglePartNeeded ? State->UF - 1 : Part, NeedsScalar);
1129       cast<PHINode>(Phi)->addIncoming(Val, VectorLatchBB);
1130     }
1131   }
1132 
1133   State->CFG.DTU.flush();
1134   assert(State->CFG.DTU.getDomTree().verify(
1135              DominatorTree::VerificationLevel::Fast) &&
1136          "DT not preserved correctly");
1137 }
1138 
1139 InstructionCost VPlan::cost(ElementCount VF, VPCostContext &Ctx) {
1140   // For now only return the cost of the vector loop region, ignoring any other
1141   // blocks, like the preheader or middle blocks.
1142   return getVectorLoopRegion()->cost(VF, Ctx);
1143 }
1144 
1145 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1146 void VPlan::printLiveIns(raw_ostream &O) const {
1147   VPSlotTracker SlotTracker(this);
1148 
1149   if (VF.getNumUsers() > 0) {
1150     O << "\nLive-in ";
1151     VF.printAsOperand(O, SlotTracker);
1152     O << " = VF";
1153   }
1154 
1155   if (VFxUF.getNumUsers() > 0) {
1156     O << "\nLive-in ";
1157     VFxUF.printAsOperand(O, SlotTracker);
1158     O << " = VF * UF";
1159   }
1160 
1161   if (VectorTripCount.getNumUsers() > 0) {
1162     O << "\nLive-in ";
1163     VectorTripCount.printAsOperand(O, SlotTracker);
1164     O << " = vector-trip-count";
1165   }
1166 
1167   if (BackedgeTakenCount && BackedgeTakenCount->getNumUsers()) {
1168     O << "\nLive-in ";
1169     BackedgeTakenCount->printAsOperand(O, SlotTracker);
1170     O << " = backedge-taken count";
1171   }
1172 
1173   O << "\n";
1174   if (TripCount->isLiveIn())
1175     O << "Live-in ";
1176   TripCount->printAsOperand(O, SlotTracker);
1177   O << " = original trip-count";
1178   O << "\n";
1179 }
1180 
1181 LLVM_DUMP_METHOD
1182 void VPlan::print(raw_ostream &O) const {
1183   VPSlotTracker SlotTracker(this);
1184 
1185   O << "VPlan '" << getName() << "' {";
1186 
1187   printLiveIns(O);
1188 
1189   if (!getPreheader()->empty()) {
1190     O << "\n";
1191     getPreheader()->print(O, "", SlotTracker);
1192   }
1193 
1194   for (const VPBlockBase *Block : vp_depth_first_shallow(getEntry())) {
1195     O << '\n';
1196     Block->print(O, "", SlotTracker);
1197   }
1198 
1199   if (!LiveOuts.empty())
1200     O << "\n";
1201   for (const auto &KV : LiveOuts) {
1202     KV.second->print(O, SlotTracker);
1203   }
1204 
1205   O << "}\n";
1206 }
1207 
1208 std::string VPlan::getName() const {
1209   std::string Out;
1210   raw_string_ostream RSO(Out);
1211   RSO << Name << " for ";
1212   if (!VFs.empty()) {
1213     RSO << "VF={" << VFs[0];
1214     for (ElementCount VF : drop_begin(VFs))
1215       RSO << "," << VF;
1216     RSO << "},";
1217   }
1218 
1219   if (UFs.empty()) {
1220     RSO << "UF>=1";
1221   } else {
1222     RSO << "UF={" << UFs[0];
1223     for (unsigned UF : drop_begin(UFs))
1224       RSO << "," << UF;
1225     RSO << "}";
1226   }
1227 
1228   return Out;
1229 }
1230 
1231 LLVM_DUMP_METHOD
1232 void VPlan::printDOT(raw_ostream &O) const {
1233   VPlanPrinter Printer(O, *this);
1234   Printer.dump();
1235 }
1236 
1237 LLVM_DUMP_METHOD
1238 void VPlan::dump() const { print(dbgs()); }
1239 #endif
1240 
1241 void VPlan::addLiveOut(PHINode *PN, VPValue *V) {
1242   assert(LiveOuts.count(PN) == 0 && "an exit value for PN already exists");
1243   LiveOuts.insert({PN, new VPLiveOut(PN, V)});
1244 }
1245 
1246 static void remapOperands(VPBlockBase *Entry, VPBlockBase *NewEntry,
1247                           DenseMap<VPValue *, VPValue *> &Old2NewVPValues) {
1248   // Update the operands of all cloned recipes starting at NewEntry. This
1249   // traverses all reachable blocks. This is done in two steps, to handle cycles
1250   // in PHI recipes.
1251   ReversePostOrderTraversal<VPBlockDeepTraversalWrapper<VPBlockBase *>>
1252       OldDeepRPOT(Entry);
1253   ReversePostOrderTraversal<VPBlockDeepTraversalWrapper<VPBlockBase *>>
1254       NewDeepRPOT(NewEntry);
1255   // First, collect all mappings from old to new VPValues defined by cloned
1256   // recipes.
1257   for (const auto &[OldBB, NewBB] :
1258        zip(VPBlockUtils::blocksOnly<VPBasicBlock>(OldDeepRPOT),
1259            VPBlockUtils::blocksOnly<VPBasicBlock>(NewDeepRPOT))) {
1260     assert(OldBB->getRecipeList().size() == NewBB->getRecipeList().size() &&
1261            "blocks must have the same number of recipes");
1262     for (const auto &[OldR, NewR] : zip(*OldBB, *NewBB)) {
1263       assert(OldR.getNumOperands() == NewR.getNumOperands() &&
1264              "recipes must have the same number of operands");
1265       assert(OldR.getNumDefinedValues() == NewR.getNumDefinedValues() &&
1266              "recipes must define the same number of operands");
1267       for (const auto &[OldV, NewV] :
1268            zip(OldR.definedValues(), NewR.definedValues()))
1269         Old2NewVPValues[OldV] = NewV;
1270     }
1271   }
1272 
1273   // Update all operands to use cloned VPValues.
1274   for (VPBasicBlock *NewBB :
1275        VPBlockUtils::blocksOnly<VPBasicBlock>(NewDeepRPOT)) {
1276     for (VPRecipeBase &NewR : *NewBB)
1277       for (unsigned I = 0, E = NewR.getNumOperands(); I != E; ++I) {
1278         VPValue *NewOp = Old2NewVPValues.lookup(NewR.getOperand(I));
1279         NewR.setOperand(I, NewOp);
1280       }
1281   }
1282 }
1283 
1284 VPlan *VPlan::duplicate() {
1285   // Clone blocks.
1286   VPBasicBlock *NewPreheader = Preheader->clone();
1287   const auto &[NewEntry, __] = cloneFrom(Entry);
1288 
1289   // Create VPlan, clone live-ins and remap operands in the cloned blocks.
1290   auto *NewPlan = new VPlan(NewPreheader, cast<VPBasicBlock>(NewEntry));
1291   DenseMap<VPValue *, VPValue *> Old2NewVPValues;
1292   for (VPValue *OldLiveIn : VPLiveInsToFree) {
1293     Old2NewVPValues[OldLiveIn] =
1294         NewPlan->getOrAddLiveIn(OldLiveIn->getLiveInIRValue());
1295   }
1296   Old2NewVPValues[&VectorTripCount] = &NewPlan->VectorTripCount;
1297   Old2NewVPValues[&VF] = &NewPlan->VF;
1298   Old2NewVPValues[&VFxUF] = &NewPlan->VFxUF;
1299   if (BackedgeTakenCount) {
1300     NewPlan->BackedgeTakenCount = new VPValue();
1301     Old2NewVPValues[BackedgeTakenCount] = NewPlan->BackedgeTakenCount;
1302   }
1303   assert(TripCount && "trip count must be set");
1304   if (TripCount->isLiveIn())
1305     Old2NewVPValues[TripCount] =
1306         NewPlan->getOrAddLiveIn(TripCount->getLiveInIRValue());
1307   // else NewTripCount will be created and inserted into Old2NewVPValues when
1308   // TripCount is cloned. In any case NewPlan->TripCount is updated below.
1309 
1310   remapOperands(Preheader, NewPreheader, Old2NewVPValues);
1311   remapOperands(Entry, NewEntry, Old2NewVPValues);
1312 
1313   // Clone live-outs.
1314   for (const auto &[_, LO] : LiveOuts)
1315     NewPlan->addLiveOut(LO->getPhi(), Old2NewVPValues[LO->getOperand(0)]);
1316 
1317   // Initialize remaining fields of cloned VPlan.
1318   NewPlan->VFs = VFs;
1319   NewPlan->UFs = UFs;
1320   // TODO: Adjust names.
1321   NewPlan->Name = Name;
1322   assert(Old2NewVPValues.contains(TripCount) &&
1323          "TripCount must have been added to Old2NewVPValues");
1324   NewPlan->TripCount = Old2NewVPValues[TripCount];
1325   return NewPlan;
1326 }
1327 
1328 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1329 
1330 Twine VPlanPrinter::getUID(const VPBlockBase *Block) {
1331   return (isa<VPRegionBlock>(Block) ? "cluster_N" : "N") +
1332          Twine(getOrCreateBID(Block));
1333 }
1334 
1335 Twine VPlanPrinter::getOrCreateName(const VPBlockBase *Block) {
1336   const std::string &Name = Block->getName();
1337   if (!Name.empty())
1338     return Name;
1339   return "VPB" + Twine(getOrCreateBID(Block));
1340 }
1341 
1342 void VPlanPrinter::dump() {
1343   Depth = 1;
1344   bumpIndent(0);
1345   OS << "digraph VPlan {\n";
1346   OS << "graph [labelloc=t, fontsize=30; label=\"Vectorization Plan";
1347   if (!Plan.getName().empty())
1348     OS << "\\n" << DOT::EscapeString(Plan.getName());
1349 
1350   {
1351     // Print live-ins.
1352   std::string Str;
1353   raw_string_ostream SS(Str);
1354   Plan.printLiveIns(SS);
1355   SmallVector<StringRef, 0> Lines;
1356   StringRef(Str).rtrim('\n').split(Lines, "\n");
1357   for (auto Line : Lines)
1358     OS << DOT::EscapeString(Line.str()) << "\\n";
1359   }
1360 
1361   OS << "\"]\n";
1362   OS << "node [shape=rect, fontname=Courier, fontsize=30]\n";
1363   OS << "edge [fontname=Courier, fontsize=30]\n";
1364   OS << "compound=true\n";
1365 
1366   dumpBlock(Plan.getPreheader());
1367 
1368   for (const VPBlockBase *Block : vp_depth_first_shallow(Plan.getEntry()))
1369     dumpBlock(Block);
1370 
1371   OS << "}\n";
1372 }
1373 
1374 void VPlanPrinter::dumpBlock(const VPBlockBase *Block) {
1375   if (const VPBasicBlock *BasicBlock = dyn_cast<VPBasicBlock>(Block))
1376     dumpBasicBlock(BasicBlock);
1377   else if (const VPRegionBlock *Region = dyn_cast<VPRegionBlock>(Block))
1378     dumpRegion(Region);
1379   else
1380     llvm_unreachable("Unsupported kind of VPBlock.");
1381 }
1382 
1383 void VPlanPrinter::drawEdge(const VPBlockBase *From, const VPBlockBase *To,
1384                             bool Hidden, const Twine &Label) {
1385   // Due to "dot" we print an edge between two regions as an edge between the
1386   // exiting basic block and the entry basic of the respective regions.
1387   const VPBlockBase *Tail = From->getExitingBasicBlock();
1388   const VPBlockBase *Head = To->getEntryBasicBlock();
1389   OS << Indent << getUID(Tail) << " -> " << getUID(Head);
1390   OS << " [ label=\"" << Label << '\"';
1391   if (Tail != From)
1392     OS << " ltail=" << getUID(From);
1393   if (Head != To)
1394     OS << " lhead=" << getUID(To);
1395   if (Hidden)
1396     OS << "; splines=none";
1397   OS << "]\n";
1398 }
1399 
1400 void VPlanPrinter::dumpEdges(const VPBlockBase *Block) {
1401   auto &Successors = Block->getSuccessors();
1402   if (Successors.size() == 1)
1403     drawEdge(Block, Successors.front(), false, "");
1404   else if (Successors.size() == 2) {
1405     drawEdge(Block, Successors.front(), false, "T");
1406     drawEdge(Block, Successors.back(), false, "F");
1407   } else {
1408     unsigned SuccessorNumber = 0;
1409     for (auto *Successor : Successors)
1410       drawEdge(Block, Successor, false, Twine(SuccessorNumber++));
1411   }
1412 }
1413 
1414 void VPlanPrinter::dumpBasicBlock(const VPBasicBlock *BasicBlock) {
1415   // Implement dot-formatted dump by performing plain-text dump into the
1416   // temporary storage followed by some post-processing.
1417   OS << Indent << getUID(BasicBlock) << " [label =\n";
1418   bumpIndent(1);
1419   std::string Str;
1420   raw_string_ostream SS(Str);
1421   // Use no indentation as we need to wrap the lines into quotes ourselves.
1422   BasicBlock->print(SS, "", SlotTracker);
1423 
1424   // We need to process each line of the output separately, so split
1425   // single-string plain-text dump.
1426   SmallVector<StringRef, 0> Lines;
1427   StringRef(Str).rtrim('\n').split(Lines, "\n");
1428 
1429   auto EmitLine = [&](StringRef Line, StringRef Suffix) {
1430     OS << Indent << '"' << DOT::EscapeString(Line.str()) << "\\l\"" << Suffix;
1431   };
1432 
1433   // Don't need the "+" after the last line.
1434   for (auto Line : make_range(Lines.begin(), Lines.end() - 1))
1435     EmitLine(Line, " +\n");
1436   EmitLine(Lines.back(), "\n");
1437 
1438   bumpIndent(-1);
1439   OS << Indent << "]\n";
1440 
1441   dumpEdges(BasicBlock);
1442 }
1443 
1444 void VPlanPrinter::dumpRegion(const VPRegionBlock *Region) {
1445   OS << Indent << "subgraph " << getUID(Region) << " {\n";
1446   bumpIndent(1);
1447   OS << Indent << "fontname=Courier\n"
1448      << Indent << "label=\""
1449      << DOT::EscapeString(Region->isReplicator() ? "<xVFxUF> " : "<x1> ")
1450      << DOT::EscapeString(Region->getName()) << "\"\n";
1451   // Dump the blocks of the region.
1452   assert(Region->getEntry() && "Region contains no inner blocks.");
1453   for (const VPBlockBase *Block : vp_depth_first_shallow(Region->getEntry()))
1454     dumpBlock(Block);
1455   bumpIndent(-1);
1456   OS << Indent << "}\n";
1457   dumpEdges(Region);
1458 }
1459 
1460 void VPlanIngredient::print(raw_ostream &O) const {
1461   if (auto *Inst = dyn_cast<Instruction>(V)) {
1462     if (!Inst->getType()->isVoidTy()) {
1463       Inst->printAsOperand(O, false);
1464       O << " = ";
1465     }
1466     O << Inst->getOpcodeName() << " ";
1467     unsigned E = Inst->getNumOperands();
1468     if (E > 0) {
1469       Inst->getOperand(0)->printAsOperand(O, false);
1470       for (unsigned I = 1; I < E; ++I)
1471         Inst->getOperand(I)->printAsOperand(O << ", ", false);
1472     }
1473   } else // !Inst
1474     V->printAsOperand(O, false);
1475 }
1476 
1477 #endif
1478 
1479 bool VPValue::isDefinedOutsideLoopRegions() const {
1480   return !hasDefiningRecipe() ||
1481          !getDefiningRecipe()->getParent()->getEnclosingLoopRegion();
1482 }
1483 
1484 void VPValue::replaceAllUsesWith(VPValue *New) {
1485   replaceUsesWithIf(New, [](VPUser &, unsigned) { return true; });
1486 }
1487 
1488 void VPValue::replaceUsesWithIf(
1489     VPValue *New,
1490     llvm::function_ref<bool(VPUser &U, unsigned Idx)> ShouldReplace) {
1491   // Note that this early exit is required for correctness; the implementation
1492   // below relies on the number of users for this VPValue to decrease, which
1493   // isn't the case if this == New.
1494   if (this == New)
1495     return;
1496 
1497   for (unsigned J = 0; J < getNumUsers();) {
1498     VPUser *User = Users[J];
1499     bool RemovedUser = false;
1500     for (unsigned I = 0, E = User->getNumOperands(); I < E; ++I) {
1501       if (User->getOperand(I) != this || !ShouldReplace(*User, I))
1502         continue;
1503 
1504       RemovedUser = true;
1505       User->setOperand(I, New);
1506     }
1507     // If a user got removed after updating the current user, the next user to
1508     // update will be moved to the current position, so we only need to
1509     // increment the index if the number of users did not change.
1510     if (!RemovedUser)
1511       J++;
1512   }
1513 }
1514 
1515 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1516 void VPValue::printAsOperand(raw_ostream &OS, VPSlotTracker &Tracker) const {
1517   OS << Tracker.getOrCreateName(this);
1518 }
1519 
1520 void VPUser::printOperands(raw_ostream &O, VPSlotTracker &SlotTracker) const {
1521   interleaveComma(operands(), O, [&O, &SlotTracker](VPValue *Op) {
1522     Op->printAsOperand(O, SlotTracker);
1523   });
1524 }
1525 #endif
1526 
1527 void VPInterleavedAccessInfo::visitRegion(VPRegionBlock *Region,
1528                                           Old2NewTy &Old2New,
1529                                           InterleavedAccessInfo &IAI) {
1530   ReversePostOrderTraversal<VPBlockShallowTraversalWrapper<VPBlockBase *>>
1531       RPOT(Region->getEntry());
1532   for (VPBlockBase *Base : RPOT) {
1533     visitBlock(Base, Old2New, IAI);
1534   }
1535 }
1536 
1537 void VPInterleavedAccessInfo::visitBlock(VPBlockBase *Block, Old2NewTy &Old2New,
1538                                          InterleavedAccessInfo &IAI) {
1539   if (VPBasicBlock *VPBB = dyn_cast<VPBasicBlock>(Block)) {
1540     for (VPRecipeBase &VPI : *VPBB) {
1541       if (isa<VPWidenPHIRecipe>(&VPI))
1542         continue;
1543       assert(isa<VPInstruction>(&VPI) && "Can only handle VPInstructions");
1544       auto *VPInst = cast<VPInstruction>(&VPI);
1545 
1546       auto *Inst = dyn_cast_or_null<Instruction>(VPInst->getUnderlyingValue());
1547       if (!Inst)
1548         continue;
1549       auto *IG = IAI.getInterleaveGroup(Inst);
1550       if (!IG)
1551         continue;
1552 
1553       auto NewIGIter = Old2New.find(IG);
1554       if (NewIGIter == Old2New.end())
1555         Old2New[IG] = new InterleaveGroup<VPInstruction>(
1556             IG->getFactor(), IG->isReverse(), IG->getAlign());
1557 
1558       if (Inst == IG->getInsertPos())
1559         Old2New[IG]->setInsertPos(VPInst);
1560 
1561       InterleaveGroupMap[VPInst] = Old2New[IG];
1562       InterleaveGroupMap[VPInst]->insertMember(
1563           VPInst, IG->getIndex(Inst),
1564           Align(IG->isReverse() ? (-1) * int(IG->getFactor())
1565                                 : IG->getFactor()));
1566     }
1567   } else if (VPRegionBlock *Region = dyn_cast<VPRegionBlock>(Block))
1568     visitRegion(Region, Old2New, IAI);
1569   else
1570     llvm_unreachable("Unsupported kind of VPBlock.");
1571 }
1572 
1573 VPInterleavedAccessInfo::VPInterleavedAccessInfo(VPlan &Plan,
1574                                                  InterleavedAccessInfo &IAI) {
1575   Old2NewTy Old2New;
1576   visitRegion(Plan.getVectorLoopRegion(), Old2New, IAI);
1577 }
1578 
1579 void VPSlotTracker::assignName(const VPValue *V) {
1580   assert(!VPValue2Name.contains(V) && "VPValue already has a name!");
1581   auto *UV = V->getUnderlyingValue();
1582   if (!UV) {
1583     VPValue2Name[V] = (Twine("vp<%") + Twine(NextSlot) + ">").str();
1584     NextSlot++;
1585     return;
1586   }
1587 
1588   // Use the name of the underlying Value, wrapped in "ir<>", and versioned by
1589   // appending ".Number" to the name if there are multiple uses.
1590   std::string Name;
1591   raw_string_ostream S(Name);
1592   UV->printAsOperand(S, false);
1593   assert(!Name.empty() && "Name cannot be empty.");
1594   std::string BaseName = (Twine("ir<") + Name + Twine(">")).str();
1595 
1596   // First assign the base name for V.
1597   const auto &[A, _] = VPValue2Name.insert({V, BaseName});
1598   // Integer or FP constants with different types will result in he same string
1599   // due to stripping types.
1600   if (V->isLiveIn() && isa<ConstantInt, ConstantFP>(UV))
1601     return;
1602 
1603   // If it is already used by C > 0 other VPValues, increase the version counter
1604   // C and use it for V.
1605   const auto &[C, UseInserted] = BaseName2Version.insert({BaseName, 0});
1606   if (!UseInserted) {
1607     C->second++;
1608     A->second = (BaseName + Twine(".") + Twine(C->second)).str();
1609   }
1610 }
1611 
1612 void VPSlotTracker::assignNames(const VPlan &Plan) {
1613   if (Plan.VF.getNumUsers() > 0)
1614     assignName(&Plan.VF);
1615   if (Plan.VFxUF.getNumUsers() > 0)
1616     assignName(&Plan.VFxUF);
1617   assignName(&Plan.VectorTripCount);
1618   if (Plan.BackedgeTakenCount)
1619     assignName(Plan.BackedgeTakenCount);
1620   for (VPValue *LI : Plan.VPLiveInsToFree)
1621     assignName(LI);
1622   assignNames(Plan.getPreheader());
1623 
1624   ReversePostOrderTraversal<VPBlockDeepTraversalWrapper<const VPBlockBase *>>
1625       RPOT(VPBlockDeepTraversalWrapper<const VPBlockBase *>(Plan.getEntry()));
1626   for (const VPBasicBlock *VPBB :
1627        VPBlockUtils::blocksOnly<const VPBasicBlock>(RPOT))
1628     assignNames(VPBB);
1629 }
1630 
1631 void VPSlotTracker::assignNames(const VPBasicBlock *VPBB) {
1632   for (const VPRecipeBase &Recipe : *VPBB)
1633     for (VPValue *Def : Recipe.definedValues())
1634       assignName(Def);
1635 }
1636 
1637 std::string VPSlotTracker::getOrCreateName(const VPValue *V) const {
1638   std::string Name = VPValue2Name.lookup(V);
1639   if (!Name.empty())
1640     return Name;
1641 
1642   // If no name was assigned, no VPlan was provided when creating the slot
1643   // tracker or it is not reachable from the provided VPlan. This can happen,
1644   // e.g. when trying to print a recipe that has not been inserted into a VPlan
1645   // in a debugger.
1646   // TODO: Update VPSlotTracker constructor to assign names to recipes &
1647   // VPValues not associated with a VPlan, instead of constructing names ad-hoc
1648   // here.
1649   const VPRecipeBase *DefR = V->getDefiningRecipe();
1650   (void)DefR;
1651   assert((!DefR || !DefR->getParent() || !DefR->getParent()->getPlan()) &&
1652          "VPValue defined by a recipe in a VPlan?");
1653 
1654   // Use the underlying value's name, if there is one.
1655   if (auto *UV = V->getUnderlyingValue()) {
1656     std::string Name;
1657     raw_string_ostream S(Name);
1658     UV->printAsOperand(S, false);
1659     return (Twine("ir<") + Name + ">").str();
1660   }
1661 
1662   return "<badref>";
1663 }
1664 
1665 bool LoopVectorizationPlanner::getDecisionAndClampRange(
1666     const std::function<bool(ElementCount)> &Predicate, VFRange &Range) {
1667   assert(!Range.isEmpty() && "Trying to test an empty VF range.");
1668   bool PredicateAtRangeStart = Predicate(Range.Start);
1669 
1670   for (ElementCount TmpVF : VFRange(Range.Start * 2, Range.End))
1671     if (Predicate(TmpVF) != PredicateAtRangeStart) {
1672       Range.End = TmpVF;
1673       break;
1674     }
1675 
1676   return PredicateAtRangeStart;
1677 }
1678 
1679 /// Build VPlans for the full range of feasible VF's = {\p MinVF, 2 * \p MinVF,
1680 /// 4 * \p MinVF, ..., \p MaxVF} by repeatedly building a VPlan for a sub-range
1681 /// of VF's starting at a given VF and extending it as much as possible. Each
1682 /// vectorization decision can potentially shorten this sub-range during
1683 /// buildVPlan().
1684 void LoopVectorizationPlanner::buildVPlans(ElementCount MinVF,
1685                                            ElementCount MaxVF) {
1686   auto MaxVFTimes2 = MaxVF * 2;
1687   for (ElementCount VF = MinVF; ElementCount::isKnownLT(VF, MaxVFTimes2);) {
1688     VFRange SubRange = {VF, MaxVFTimes2};
1689     auto Plan = buildVPlan(SubRange);
1690     VPlanTransforms::optimize(*Plan);
1691     VPlans.push_back(std::move(Plan));
1692     VF = SubRange.End;
1693   }
1694 }
1695 
1696 VPlan &LoopVectorizationPlanner::getPlanFor(ElementCount VF) const {
1697   assert(count_if(VPlans,
1698                   [VF](const VPlanPtr &Plan) { return Plan->hasVF(VF); }) ==
1699              1 &&
1700          "Multiple VPlans for VF.");
1701 
1702   for (const VPlanPtr &Plan : VPlans) {
1703     if (Plan->hasVF(VF))
1704       return *Plan.get();
1705   }
1706   llvm_unreachable("No plan found!");
1707 }
1708 
1709 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1710 void LoopVectorizationPlanner::printPlans(raw_ostream &O) {
1711   if (VPlans.empty()) {
1712     O << "LV: No VPlans built.\n";
1713     return;
1714   }
1715   for (const auto &Plan : VPlans)
1716     if (PrintVPlansInDotFormat)
1717       Plan->printDOT(O);
1718     else
1719       Plan->print(O);
1720 }
1721 #endif
1722