xref: /llvm-project/llvm/lib/Transforms/Vectorize/VPlan.cpp (revision b841e2eca3b5c8b408214a46593f6a025e0fe48b)
1 //===- VPlan.cpp - Vectorizer Plan ----------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 ///
9 /// \file
10 /// This is the LLVM vectorization plan. It represents a candidate for
11 /// vectorization, allowing to plan and optimize how to vectorize a given loop
12 /// before generating LLVM-IR.
13 /// The vectorizer uses vectorization plans to estimate the costs of potential
14 /// candidates and if profitable to execute the desired plan, generating vector
15 /// LLVM-IR code.
16 ///
17 //===----------------------------------------------------------------------===//
18 
19 #include "VPlan.h"
20 #include "LoopVectorizationPlanner.h"
21 #include "VPlanCFG.h"
22 #include "VPlanDominatorTree.h"
23 #include "VPlanPatternMatch.h"
24 #include "llvm/ADT/PostOrderIterator.h"
25 #include "llvm/ADT/STLExtras.h"
26 #include "llvm/ADT/SmallVector.h"
27 #include "llvm/ADT/StringExtras.h"
28 #include "llvm/ADT/Twine.h"
29 #include "llvm/Analysis/DomTreeUpdater.h"
30 #include "llvm/Analysis/LoopInfo.h"
31 #include "llvm/IR/BasicBlock.h"
32 #include "llvm/IR/CFG.h"
33 #include "llvm/IR/IRBuilder.h"
34 #include "llvm/IR/Instruction.h"
35 #include "llvm/IR/Instructions.h"
36 #include "llvm/IR/Type.h"
37 #include "llvm/IR/Value.h"
38 #include "llvm/Support/Casting.h"
39 #include "llvm/Support/CommandLine.h"
40 #include "llvm/Support/Debug.h"
41 #include "llvm/Support/GenericDomTreeConstruction.h"
42 #include "llvm/Support/GraphWriter.h"
43 #include "llvm/Support/raw_ostream.h"
44 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
45 #include "llvm/Transforms/Utils/LoopVersioning.h"
46 #include "llvm/Transforms/Utils/ScalarEvolutionExpander.h"
47 #include <cassert>
48 #include <string>
49 #include <vector>
50 
51 using namespace llvm;
52 using namespace llvm::VPlanPatternMatch;
53 
54 namespace llvm {
55 extern cl::opt<bool> EnableVPlanNativePath;
56 }
57 
58 #define DEBUG_TYPE "vplan"
59 
60 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
61 raw_ostream &llvm::operator<<(raw_ostream &OS, const VPValue &V) {
62   const VPInstruction *Instr = dyn_cast<VPInstruction>(&V);
63   VPSlotTracker SlotTracker(
64       (Instr && Instr->getParent()) ? Instr->getParent()->getPlan() : nullptr);
65   V.print(OS, SlotTracker);
66   return OS;
67 }
68 #endif
69 
70 Value *VPLane::getAsRuntimeExpr(IRBuilderBase &Builder,
71                                 const ElementCount &VF) const {
72   switch (LaneKind) {
73   case VPLane::Kind::ScalableLast:
74     // Lane = RuntimeVF - VF.getKnownMinValue() + Lane
75     return Builder.CreateSub(getRuntimeVF(Builder, Builder.getInt32Ty(), VF),
76                              Builder.getInt32(VF.getKnownMinValue() - Lane));
77   case VPLane::Kind::First:
78     return Builder.getInt32(Lane);
79   }
80   llvm_unreachable("Unknown lane kind");
81 }
82 
83 VPValue::VPValue(const unsigned char SC, Value *UV, VPDef *Def)
84     : SubclassID(SC), UnderlyingVal(UV), Def(Def) {
85   if (Def)
86     Def->addDefinedValue(this);
87 }
88 
89 VPValue::~VPValue() {
90   assert(Users.empty() && "trying to delete a VPValue with remaining users");
91   if (Def)
92     Def->removeDefinedValue(this);
93 }
94 
95 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
96 void VPValue::print(raw_ostream &OS, VPSlotTracker &SlotTracker) const {
97   if (const VPRecipeBase *R = dyn_cast_or_null<VPRecipeBase>(Def))
98     R->print(OS, "", SlotTracker);
99   else
100     printAsOperand(OS, SlotTracker);
101 }
102 
103 void VPValue::dump() const {
104   const VPRecipeBase *Instr = dyn_cast_or_null<VPRecipeBase>(this->Def);
105   VPSlotTracker SlotTracker(
106       (Instr && Instr->getParent()) ? Instr->getParent()->getPlan() : nullptr);
107   print(dbgs(), SlotTracker);
108   dbgs() << "\n";
109 }
110 
111 void VPDef::dump() const {
112   const VPRecipeBase *Instr = dyn_cast_or_null<VPRecipeBase>(this);
113   VPSlotTracker SlotTracker(
114       (Instr && Instr->getParent()) ? Instr->getParent()->getPlan() : nullptr);
115   print(dbgs(), "", SlotTracker);
116   dbgs() << "\n";
117 }
118 #endif
119 
120 VPRecipeBase *VPValue::getDefiningRecipe() {
121   return cast_or_null<VPRecipeBase>(Def);
122 }
123 
124 const VPRecipeBase *VPValue::getDefiningRecipe() const {
125   return cast_or_null<VPRecipeBase>(Def);
126 }
127 
128 // Get the top-most entry block of \p Start. This is the entry block of the
129 // containing VPlan. This function is templated to support both const and non-const blocks
130 template <typename T> static T *getPlanEntry(T *Start) {
131   T *Next = Start;
132   T *Current = Start;
133   while ((Next = Next->getParent()))
134     Current = Next;
135 
136   SmallSetVector<T *, 8> WorkList;
137   WorkList.insert(Current);
138 
139   for (unsigned i = 0; i < WorkList.size(); i++) {
140     T *Current = WorkList[i];
141     if (Current->getNumPredecessors() == 0)
142       return Current;
143     auto &Predecessors = Current->getPredecessors();
144     WorkList.insert(Predecessors.begin(), Predecessors.end());
145   }
146 
147   llvm_unreachable("VPlan without any entry node without predecessors");
148 }
149 
150 VPlan *VPBlockBase::getPlan() { return getPlanEntry(this)->Plan; }
151 
152 const VPlan *VPBlockBase::getPlan() const { return getPlanEntry(this)->Plan; }
153 
154 /// \return the VPBasicBlock that is the entry of Block, possibly indirectly.
155 const VPBasicBlock *VPBlockBase::getEntryBasicBlock() const {
156   const VPBlockBase *Block = this;
157   while (const VPRegionBlock *Region = dyn_cast<VPRegionBlock>(Block))
158     Block = Region->getEntry();
159   return cast<VPBasicBlock>(Block);
160 }
161 
162 VPBasicBlock *VPBlockBase::getEntryBasicBlock() {
163   VPBlockBase *Block = this;
164   while (VPRegionBlock *Region = dyn_cast<VPRegionBlock>(Block))
165     Block = Region->getEntry();
166   return cast<VPBasicBlock>(Block);
167 }
168 
169 void VPBlockBase::setPlan(VPlan *ParentPlan) {
170   assert(
171       (ParentPlan->getEntry() == this || ParentPlan->getPreheader() == this) &&
172       "Can only set plan on its entry or preheader block.");
173   Plan = ParentPlan;
174 }
175 
176 /// \return the VPBasicBlock that is the exit of Block, possibly indirectly.
177 const VPBasicBlock *VPBlockBase::getExitingBasicBlock() const {
178   const VPBlockBase *Block = this;
179   while (const VPRegionBlock *Region = dyn_cast<VPRegionBlock>(Block))
180     Block = Region->getExiting();
181   return cast<VPBasicBlock>(Block);
182 }
183 
184 VPBasicBlock *VPBlockBase::getExitingBasicBlock() {
185   VPBlockBase *Block = this;
186   while (VPRegionBlock *Region = dyn_cast<VPRegionBlock>(Block))
187     Block = Region->getExiting();
188   return cast<VPBasicBlock>(Block);
189 }
190 
191 VPBlockBase *VPBlockBase::getEnclosingBlockWithSuccessors() {
192   if (!Successors.empty() || !Parent)
193     return this;
194   assert(Parent->getExiting() == this &&
195          "Block w/o successors not the exiting block of its parent.");
196   return Parent->getEnclosingBlockWithSuccessors();
197 }
198 
199 VPBlockBase *VPBlockBase::getEnclosingBlockWithPredecessors() {
200   if (!Predecessors.empty() || !Parent)
201     return this;
202   assert(Parent->getEntry() == this &&
203          "Block w/o predecessors not the entry of its parent.");
204   return Parent->getEnclosingBlockWithPredecessors();
205 }
206 
207 void VPBlockBase::deleteCFG(VPBlockBase *Entry) {
208   for (VPBlockBase *Block : to_vector(vp_depth_first_shallow(Entry)))
209     delete Block;
210 }
211 
212 VPBasicBlock::iterator VPBasicBlock::getFirstNonPhi() {
213   iterator It = begin();
214   while (It != end() && It->isPhi())
215     It++;
216   return It;
217 }
218 
219 VPTransformState::VPTransformState(ElementCount VF, unsigned UF, LoopInfo *LI,
220                                    DominatorTree *DT, IRBuilderBase &Builder,
221                                    InnerLoopVectorizer *ILV, VPlan *Plan,
222                                    LLVMContext &Ctx)
223     : VF(VF), UF(UF), CFG(DT), LI(LI), Builder(Builder), ILV(ILV), Plan(Plan),
224       LVer(nullptr),
225       TypeAnalysis(Plan->getCanonicalIV()->getScalarType(), Ctx) {}
226 
227 Value *VPTransformState::get(VPValue *Def, const VPIteration &Instance) {
228   if (Def->isLiveIn())
229     return Def->getLiveInIRValue();
230 
231   if (hasScalarValue(Def, Instance)) {
232     return Data
233         .PerPartScalars[Def][Instance.Part][Instance.Lane.mapToCacheIndex(VF)];
234   }
235   if (!Instance.Lane.isFirstLane() &&
236       vputils::isUniformAfterVectorization(Def) &&
237       hasScalarValue(Def, {Instance.Part, VPLane::getFirstLane()})) {
238     return Data.PerPartScalars[Def][Instance.Part][0];
239   }
240 
241   assert(hasVectorValue(Def, Instance.Part));
242   auto *VecPart = Data.PerPartOutput[Def][Instance.Part];
243   if (!VecPart->getType()->isVectorTy()) {
244     assert(Instance.Lane.isFirstLane() && "cannot get lane > 0 for scalar");
245     return VecPart;
246   }
247   // TODO: Cache created scalar values.
248   Value *Lane = Instance.Lane.getAsRuntimeExpr(Builder, VF);
249   auto *Extract = Builder.CreateExtractElement(VecPart, Lane);
250   // set(Def, Extract, Instance);
251   return Extract;
252 }
253 
254 Value *VPTransformState::get(VPValue *Def, unsigned Part, bool NeedsScalar) {
255   if (NeedsScalar) {
256     assert((VF.isScalar() || Def->isLiveIn() || hasVectorValue(Def, Part) ||
257             (hasScalarValue(Def, VPIteration(Part, 0)) &&
258              Data.PerPartScalars[Def][Part].size() == 1)) &&
259            "Trying to access a single scalar per part but has multiple scalars "
260            "per part.");
261     return get(Def, VPIteration(Part, 0));
262   }
263 
264   // If Values have been set for this Def return the one relevant for \p Part.
265   if (hasVectorValue(Def, Part))
266     return Data.PerPartOutput[Def][Part];
267 
268   auto GetBroadcastInstrs = [this, Def](Value *V) {
269     bool SafeToHoist = Def->isDefinedOutsideVectorRegions();
270     if (VF.isScalar())
271       return V;
272     // Place the code for broadcasting invariant variables in the new preheader.
273     IRBuilder<>::InsertPointGuard Guard(Builder);
274     if (SafeToHoist) {
275       BasicBlock *LoopVectorPreHeader = CFG.VPBB2IRBB[cast<VPBasicBlock>(
276           Plan->getVectorLoopRegion()->getSinglePredecessor())];
277       if (LoopVectorPreHeader)
278         Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator());
279     }
280 
281     // Place the code for broadcasting invariant variables in the new preheader.
282     // Broadcast the scalar into all locations in the vector.
283     Value *Shuf = Builder.CreateVectorSplat(VF, V, "broadcast");
284 
285     return Shuf;
286   };
287 
288   if (!hasScalarValue(Def, {Part, 0})) {
289     assert(Def->isLiveIn() && "expected a live-in");
290     if (Part != 0)
291       return get(Def, 0);
292     Value *IRV = Def->getLiveInIRValue();
293     Value *B = GetBroadcastInstrs(IRV);
294     set(Def, B, Part);
295     return B;
296   }
297 
298   Value *ScalarValue = get(Def, {Part, 0});
299   // If we aren't vectorizing, we can just copy the scalar map values over
300   // to the vector map.
301   if (VF.isScalar()) {
302     set(Def, ScalarValue, Part);
303     return ScalarValue;
304   }
305 
306   bool IsUniform = vputils::isUniformAfterVectorization(Def);
307 
308   unsigned LastLane = IsUniform ? 0 : VF.getKnownMinValue() - 1;
309   // Check if there is a scalar value for the selected lane.
310   if (!hasScalarValue(Def, {Part, LastLane})) {
311     // At the moment, VPWidenIntOrFpInductionRecipes, VPScalarIVStepsRecipes and
312     // VPExpandSCEVRecipes can also be uniform.
313     assert((isa<VPWidenIntOrFpInductionRecipe>(Def->getDefiningRecipe()) ||
314             isa<VPScalarIVStepsRecipe>(Def->getDefiningRecipe()) ||
315             isa<VPExpandSCEVRecipe>(Def->getDefiningRecipe())) &&
316            "unexpected recipe found to be invariant");
317     IsUniform = true;
318     LastLane = 0;
319   }
320 
321   auto *LastInst = cast<Instruction>(get(Def, {Part, LastLane}));
322   // Set the insert point after the last scalarized instruction or after the
323   // last PHI, if LastInst is a PHI. This ensures the insertelement sequence
324   // will directly follow the scalar definitions.
325   auto OldIP = Builder.saveIP();
326   auto NewIP =
327       isa<PHINode>(LastInst)
328           ? BasicBlock::iterator(LastInst->getParent()->getFirstNonPHI())
329           : std::next(BasicBlock::iterator(LastInst));
330   Builder.SetInsertPoint(&*NewIP);
331 
332   // However, if we are vectorizing, we need to construct the vector values.
333   // If the value is known to be uniform after vectorization, we can just
334   // broadcast the scalar value corresponding to lane zero for each unroll
335   // iteration. Otherwise, we construct the vector values using
336   // insertelement instructions. Since the resulting vectors are stored in
337   // State, we will only generate the insertelements once.
338   Value *VectorValue = nullptr;
339   if (IsUniform) {
340     VectorValue = GetBroadcastInstrs(ScalarValue);
341     set(Def, VectorValue, Part);
342   } else {
343     // Initialize packing with insertelements to start from undef.
344     assert(!VF.isScalable() && "VF is assumed to be non scalable.");
345     Value *Undef = PoisonValue::get(VectorType::get(LastInst->getType(), VF));
346     set(Def, Undef, Part);
347     for (unsigned Lane = 0; Lane < VF.getKnownMinValue(); ++Lane)
348       packScalarIntoVectorValue(Def, {Part, Lane});
349     VectorValue = get(Def, Part);
350   }
351   Builder.restoreIP(OldIP);
352   return VectorValue;
353 }
354 
355 BasicBlock *VPTransformState::CFGState::getPreheaderBBFor(VPRecipeBase *R) {
356   VPRegionBlock *LoopRegion = R->getParent()->getEnclosingLoopRegion();
357   return VPBB2IRBB[LoopRegion->getPreheaderVPBB()];
358 }
359 
360 void VPTransformState::addNewMetadata(Instruction *To,
361                                       const Instruction *Orig) {
362   // If the loop was versioned with memchecks, add the corresponding no-alias
363   // metadata.
364   if (LVer && (isa<LoadInst>(Orig) || isa<StoreInst>(Orig)))
365     LVer->annotateInstWithNoAlias(To, Orig);
366 }
367 
368 void VPTransformState::addMetadata(Value *To, Instruction *From) {
369   // No source instruction to transfer metadata from?
370   if (!From)
371     return;
372 
373   if (Instruction *ToI = dyn_cast<Instruction>(To)) {
374     propagateMetadata(ToI, From);
375     addNewMetadata(ToI, From);
376   }
377 }
378 
379 void VPTransformState::setDebugLocFrom(DebugLoc DL) {
380   const DILocation *DIL = DL;
381   // When a FSDiscriminator is enabled, we don't need to add the multiply
382   // factors to the discriminators.
383   if (DIL &&
384       Builder.GetInsertBlock()
385           ->getParent()
386           ->shouldEmitDebugInfoForProfiling() &&
387       !EnableFSDiscriminator) {
388     // FIXME: For scalable vectors, assume vscale=1.
389     auto NewDIL =
390         DIL->cloneByMultiplyingDuplicationFactor(UF * VF.getKnownMinValue());
391     if (NewDIL)
392       Builder.SetCurrentDebugLocation(*NewDIL);
393     else
394       LLVM_DEBUG(dbgs() << "Failed to create new discriminator: "
395                         << DIL->getFilename() << " Line: " << DIL->getLine());
396   } else
397     Builder.SetCurrentDebugLocation(DIL);
398 }
399 
400 void VPTransformState::packScalarIntoVectorValue(VPValue *Def,
401                                                  const VPIteration &Instance) {
402   Value *ScalarInst = get(Def, Instance);
403   Value *VectorValue = get(Def, Instance.Part);
404   VectorValue = Builder.CreateInsertElement(
405       VectorValue, ScalarInst, Instance.Lane.getAsRuntimeExpr(Builder, VF));
406   set(Def, VectorValue, Instance.Part);
407 }
408 
409 BasicBlock *
410 VPBasicBlock::createEmptyBasicBlock(VPTransformState::CFGState &CFG) {
411   // BB stands for IR BasicBlocks. VPBB stands for VPlan VPBasicBlocks.
412   // Pred stands for Predessor. Prev stands for Previous - last visited/created.
413   BasicBlock *PrevBB = CFG.PrevBB;
414   BasicBlock *NewBB = BasicBlock::Create(PrevBB->getContext(), getName(),
415                                          PrevBB->getParent(), CFG.ExitBB);
416   LLVM_DEBUG(dbgs() << "LV: created " << NewBB->getName() << '\n');
417 
418   // Hook up the new basic block to its predecessors.
419   for (VPBlockBase *PredVPBlock : getHierarchicalPredecessors()) {
420     VPBasicBlock *PredVPBB = PredVPBlock->getExitingBasicBlock();
421     auto &PredVPSuccessors = PredVPBB->getHierarchicalSuccessors();
422     BasicBlock *PredBB = CFG.VPBB2IRBB[PredVPBB];
423 
424     assert(PredBB && "Predecessor basic-block not found building successor.");
425     auto *PredBBTerminator = PredBB->getTerminator();
426     LLVM_DEBUG(dbgs() << "LV: draw edge from" << PredBB->getName() << '\n');
427 
428     auto *TermBr = dyn_cast<BranchInst>(PredBBTerminator);
429     if (isa<UnreachableInst>(PredBBTerminator)) {
430       assert(PredVPSuccessors.size() == 1 &&
431              "Predecessor ending w/o branch must have single successor.");
432       DebugLoc DL = PredBBTerminator->getDebugLoc();
433       PredBBTerminator->eraseFromParent();
434       auto *Br = BranchInst::Create(NewBB, PredBB);
435       Br->setDebugLoc(DL);
436     } else if (TermBr && !TermBr->isConditional()) {
437       TermBr->setSuccessor(0, NewBB);
438     } else {
439       // Set each forward successor here when it is created, excluding
440       // backedges. A backward successor is set when the branch is created.
441       unsigned idx = PredVPSuccessors.front() == this ? 0 : 1;
442       assert(!TermBr->getSuccessor(idx) &&
443              "Trying to reset an existing successor block.");
444       TermBr->setSuccessor(idx, NewBB);
445     }
446     CFG.DTU.applyUpdates({{DominatorTree::Insert, PredBB, NewBB}});
447   }
448   return NewBB;
449 }
450 
451 void VPIRBasicBlock::execute(VPTransformState *State) {
452   assert(getHierarchicalSuccessors().size() <= 2 &&
453          "VPIRBasicBlock can have at most two successors at the moment!");
454   State->Builder.SetInsertPoint(getIRBasicBlock()->getTerminator());
455   executeRecipes(State, getIRBasicBlock());
456   if (getSingleSuccessor()) {
457     assert(isa<UnreachableInst>(getIRBasicBlock()->getTerminator()));
458     auto *Br = State->Builder.CreateBr(getIRBasicBlock());
459     Br->setOperand(0, nullptr);
460     getIRBasicBlock()->getTerminator()->eraseFromParent();
461   }
462 
463   for (VPBlockBase *PredVPBlock : getHierarchicalPredecessors()) {
464     VPBasicBlock *PredVPBB = PredVPBlock->getExitingBasicBlock();
465     BasicBlock *PredBB = State->CFG.VPBB2IRBB[PredVPBB];
466     assert(PredBB && "Predecessor basic-block not found building successor.");
467     LLVM_DEBUG(dbgs() << "LV: draw edge from" << PredBB->getName() << '\n');
468 
469     auto *PredBBTerminator = PredBB->getTerminator();
470     auto *TermBr = cast<BranchInst>(PredBBTerminator);
471     // Set each forward successor here when it is created, excluding
472     // backedges. A backward successor is set when the branch is created.
473     const auto &PredVPSuccessors = PredVPBB->getHierarchicalSuccessors();
474     unsigned idx = PredVPSuccessors.front() == this ? 0 : 1;
475     assert(!TermBr->getSuccessor(idx) &&
476            "Trying to reset an existing successor block.");
477     TermBr->setSuccessor(idx, IRBB);
478     State->CFG.DTU.applyUpdates({{DominatorTree::Insert, PredBB, IRBB}});
479   }
480 }
481 
482 void VPBasicBlock::execute(VPTransformState *State) {
483   bool Replica = State->Instance && !State->Instance->isFirstIteration();
484   VPBasicBlock *PrevVPBB = State->CFG.PrevVPBB;
485   VPBlockBase *SingleHPred = nullptr;
486   BasicBlock *NewBB = State->CFG.PrevBB; // Reuse it if possible.
487 
488   auto IsLoopRegion = [](VPBlockBase *BB) {
489     auto *R = dyn_cast<VPRegionBlock>(BB);
490     return R && !R->isReplicator();
491   };
492 
493   // 1. Create an IR basic block.
494   if (PrevVPBB && /* A */
495       !((SingleHPred = getSingleHierarchicalPredecessor()) &&
496         SingleHPred->getExitingBasicBlock() == PrevVPBB &&
497         PrevVPBB->getSingleHierarchicalSuccessor() &&
498         (SingleHPred->getParent() == getEnclosingLoopRegion() &&
499          !IsLoopRegion(SingleHPred))) &&         /* B */
500       !(Replica && getPredecessors().empty())) { /* C */
501     // The last IR basic block is reused, as an optimization, in three cases:
502     // A. the first VPBB reuses the loop pre-header BB - when PrevVPBB is null;
503     // B. when the current VPBB has a single (hierarchical) predecessor which
504     //    is PrevVPBB and the latter has a single (hierarchical) successor which
505     //    both are in the same non-replicator region; and
506     // C. when the current VPBB is an entry of a region replica - where PrevVPBB
507     //    is the exiting VPBB of this region from a previous instance, or the
508     //    predecessor of this region.
509 
510     NewBB = createEmptyBasicBlock(State->CFG);
511     State->Builder.SetInsertPoint(NewBB);
512     // Temporarily terminate with unreachable until CFG is rewired.
513     UnreachableInst *Terminator = State->Builder.CreateUnreachable();
514     // Register NewBB in its loop. In innermost loops its the same for all
515     // BB's.
516     if (State->CurrentVectorLoop)
517       State->CurrentVectorLoop->addBasicBlockToLoop(NewBB, *State->LI);
518     State->Builder.SetInsertPoint(Terminator);
519     State->CFG.PrevBB = NewBB;
520   }
521 
522   // 2. Fill the IR basic block with IR instructions.
523   executeRecipes(State, NewBB);
524 }
525 
526 void VPBasicBlock::dropAllReferences(VPValue *NewValue) {
527   for (VPRecipeBase &R : Recipes) {
528     for (auto *Def : R.definedValues())
529       Def->replaceAllUsesWith(NewValue);
530 
531     for (unsigned I = 0, E = R.getNumOperands(); I != E; I++)
532       R.setOperand(I, NewValue);
533   }
534 }
535 
536 void VPBasicBlock::executeRecipes(VPTransformState *State, BasicBlock *BB) {
537   LLVM_DEBUG(dbgs() << "LV: vectorizing VPBB:" << getName()
538                     << " in BB:" << BB->getName() << '\n');
539 
540   State->CFG.VPBB2IRBB[this] = BB;
541   State->CFG.PrevVPBB = this;
542 
543   for (VPRecipeBase &Recipe : Recipes)
544     Recipe.execute(*State);
545 
546   LLVM_DEBUG(dbgs() << "LV: filled BB:" << *BB);
547 }
548 
549 VPBasicBlock *VPBasicBlock::splitAt(iterator SplitAt) {
550   assert((SplitAt == end() || SplitAt->getParent() == this) &&
551          "can only split at a position in the same block");
552 
553   SmallVector<VPBlockBase *, 2> Succs(successors());
554   // First, disconnect the current block from its successors.
555   for (VPBlockBase *Succ : Succs)
556     VPBlockUtils::disconnectBlocks(this, Succ);
557 
558   // Create new empty block after the block to split.
559   auto *SplitBlock = new VPBasicBlock(getName() + ".split");
560   VPBlockUtils::insertBlockAfter(SplitBlock, this);
561 
562   // Add successors for block to split to new block.
563   for (VPBlockBase *Succ : Succs)
564     VPBlockUtils::connectBlocks(SplitBlock, Succ);
565 
566   // Finally, move the recipes starting at SplitAt to new block.
567   for (VPRecipeBase &ToMove :
568        make_early_inc_range(make_range(SplitAt, this->end())))
569     ToMove.moveBefore(*SplitBlock, SplitBlock->end());
570 
571   return SplitBlock;
572 }
573 
574 VPRegionBlock *VPBasicBlock::getEnclosingLoopRegion() {
575   VPRegionBlock *P = getParent();
576   if (P && P->isReplicator()) {
577     P = P->getParent();
578     assert(!cast<VPRegionBlock>(P)->isReplicator() &&
579            "unexpected nested replicate regions");
580   }
581   return P;
582 }
583 
584 static bool hasConditionalTerminator(const VPBasicBlock *VPBB) {
585   if (VPBB->empty()) {
586     assert(
587         VPBB->getNumSuccessors() < 2 &&
588         "block with multiple successors doesn't have a recipe as terminator");
589     return false;
590   }
591 
592   const VPRecipeBase *R = &VPBB->back();
593   bool IsCondBranch = isa<VPBranchOnMaskRecipe>(R) ||
594                       match(R, m_BranchOnCond(m_VPValue())) ||
595                       match(R, m_BranchOnCount(m_VPValue(), m_VPValue()));
596   (void)IsCondBranch;
597 
598   if (VPBB->getNumSuccessors() >= 2 ||
599       (VPBB->isExiting() && !VPBB->getParent()->isReplicator())) {
600     assert(IsCondBranch && "block with multiple successors not terminated by "
601                            "conditional branch recipe");
602 
603     return true;
604   }
605 
606   assert(
607       !IsCondBranch &&
608       "block with 0 or 1 successors terminated by conditional branch recipe");
609   return false;
610 }
611 
612 VPRecipeBase *VPBasicBlock::getTerminator() {
613   if (hasConditionalTerminator(this))
614     return &back();
615   return nullptr;
616 }
617 
618 const VPRecipeBase *VPBasicBlock::getTerminator() const {
619   if (hasConditionalTerminator(this))
620     return &back();
621   return nullptr;
622 }
623 
624 bool VPBasicBlock::isExiting() const {
625   return getParent() && getParent()->getExitingBasicBlock() == this;
626 }
627 
628 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
629 void VPBlockBase::printSuccessors(raw_ostream &O, const Twine &Indent) const {
630   if (getSuccessors().empty()) {
631     O << Indent << "No successors\n";
632   } else {
633     O << Indent << "Successor(s): ";
634     ListSeparator LS;
635     for (auto *Succ : getSuccessors())
636       O << LS << Succ->getName();
637     O << '\n';
638   }
639 }
640 
641 void VPBasicBlock::print(raw_ostream &O, const Twine &Indent,
642                          VPSlotTracker &SlotTracker) const {
643   O << Indent << getName() << ":\n";
644 
645   auto RecipeIndent = Indent + "  ";
646   for (const VPRecipeBase &Recipe : *this) {
647     Recipe.print(O, RecipeIndent, SlotTracker);
648     O << '\n';
649   }
650 
651   printSuccessors(O, Indent);
652 }
653 #endif
654 
655 static std::pair<VPBlockBase *, VPBlockBase *> cloneFrom(VPBlockBase *Entry);
656 
657 // Clone the CFG for all nodes reachable from \p Entry, this includes cloning
658 // the blocks and their recipes. Operands of cloned recipes will NOT be updated.
659 // Remapping of operands must be done separately. Returns a pair with the new
660 // entry and exiting blocks of the cloned region. If \p Entry isn't part of a
661 // region, return nullptr for the exiting block.
662 static std::pair<VPBlockBase *, VPBlockBase *> cloneFrom(VPBlockBase *Entry) {
663   DenseMap<VPBlockBase *, VPBlockBase *> Old2NewVPBlocks;
664   VPBlockBase *Exiting = nullptr;
665   bool InRegion = Entry->getParent();
666   // First, clone blocks reachable from Entry.
667   for (VPBlockBase *BB : vp_depth_first_shallow(Entry)) {
668     VPBlockBase *NewBB = BB->clone();
669     Old2NewVPBlocks[BB] = NewBB;
670     if (InRegion && BB->getNumSuccessors() == 0) {
671       assert(!Exiting && "Multiple exiting blocks?");
672       Exiting = BB;
673     }
674   }
675   assert((!InRegion || Exiting) && "regions must have a single exiting block");
676 
677   // Second, update the predecessors & successors of the cloned blocks.
678   for (VPBlockBase *BB : vp_depth_first_shallow(Entry)) {
679     VPBlockBase *NewBB = Old2NewVPBlocks[BB];
680     SmallVector<VPBlockBase *> NewPreds;
681     for (VPBlockBase *Pred : BB->getPredecessors()) {
682       NewPreds.push_back(Old2NewVPBlocks[Pred]);
683     }
684     NewBB->setPredecessors(NewPreds);
685     SmallVector<VPBlockBase *> NewSuccs;
686     for (VPBlockBase *Succ : BB->successors()) {
687       NewSuccs.push_back(Old2NewVPBlocks[Succ]);
688     }
689     NewBB->setSuccessors(NewSuccs);
690   }
691 
692 #if !defined(NDEBUG)
693   // Verify that the order of predecessors and successors matches in the cloned
694   // version.
695   for (const auto &[OldBB, NewBB] :
696        zip(vp_depth_first_shallow(Entry),
697            vp_depth_first_shallow(Old2NewVPBlocks[Entry]))) {
698     for (const auto &[OldPred, NewPred] :
699          zip(OldBB->getPredecessors(), NewBB->getPredecessors()))
700       assert(NewPred == Old2NewVPBlocks[OldPred] && "Different predecessors");
701 
702     for (const auto &[OldSucc, NewSucc] :
703          zip(OldBB->successors(), NewBB->successors()))
704       assert(NewSucc == Old2NewVPBlocks[OldSucc] && "Different successors");
705   }
706 #endif
707 
708   return std::make_pair(Old2NewVPBlocks[Entry],
709                         Exiting ? Old2NewVPBlocks[Exiting] : nullptr);
710 }
711 
712 VPRegionBlock *VPRegionBlock::clone() {
713   const auto &[NewEntry, NewExiting] = cloneFrom(getEntry());
714   auto *NewRegion =
715       new VPRegionBlock(NewEntry, NewExiting, getName(), isReplicator());
716   for (VPBlockBase *Block : vp_depth_first_shallow(NewEntry))
717     Block->setParent(NewRegion);
718   return NewRegion;
719 }
720 
721 void VPRegionBlock::dropAllReferences(VPValue *NewValue) {
722   for (VPBlockBase *Block : vp_depth_first_shallow(Entry))
723     // Drop all references in VPBasicBlocks and replace all uses with
724     // DummyValue.
725     Block->dropAllReferences(NewValue);
726 }
727 
728 void VPRegionBlock::execute(VPTransformState *State) {
729   ReversePostOrderTraversal<VPBlockShallowTraversalWrapper<VPBlockBase *>>
730       RPOT(Entry);
731 
732   if (!isReplicator()) {
733     // Create and register the new vector loop.
734     Loop *PrevLoop = State->CurrentVectorLoop;
735     State->CurrentVectorLoop = State->LI->AllocateLoop();
736     BasicBlock *VectorPH = State->CFG.VPBB2IRBB[getPreheaderVPBB()];
737     Loop *ParentLoop = State->LI->getLoopFor(VectorPH);
738 
739     // Insert the new loop into the loop nest and register the new basic blocks
740     // before calling any utilities such as SCEV that require valid LoopInfo.
741     if (ParentLoop)
742       ParentLoop->addChildLoop(State->CurrentVectorLoop);
743     else
744       State->LI->addTopLevelLoop(State->CurrentVectorLoop);
745 
746     // Visit the VPBlocks connected to "this", starting from it.
747     for (VPBlockBase *Block : RPOT) {
748       LLVM_DEBUG(dbgs() << "LV: VPBlock in RPO " << Block->getName() << '\n');
749       Block->execute(State);
750     }
751 
752     State->CurrentVectorLoop = PrevLoop;
753     return;
754   }
755 
756   assert(!State->Instance && "Replicating a Region with non-null instance.");
757 
758   // Enter replicating mode.
759   State->Instance = VPIteration(0, 0);
760 
761   for (unsigned Part = 0, UF = State->UF; Part < UF; ++Part) {
762     State->Instance->Part = Part;
763     assert(!State->VF.isScalable() && "VF is assumed to be non scalable.");
764     for (unsigned Lane = 0, VF = State->VF.getKnownMinValue(); Lane < VF;
765          ++Lane) {
766       State->Instance->Lane = VPLane(Lane, VPLane::Kind::First);
767       // Visit the VPBlocks connected to \p this, starting from it.
768       for (VPBlockBase *Block : RPOT) {
769         LLVM_DEBUG(dbgs() << "LV: VPBlock in RPO " << Block->getName() << '\n');
770         Block->execute(State);
771       }
772     }
773   }
774 
775   // Exit replicating mode.
776   State->Instance.reset();
777 }
778 
779 InstructionCost VPBasicBlock::cost(ElementCount VF, VPCostContext &Ctx) {
780   InstructionCost Cost = 0;
781   for (VPRecipeBase &R : Recipes)
782     Cost += R.cost(VF, Ctx);
783   return Cost;
784 }
785 
786 InstructionCost VPRegionBlock::cost(ElementCount VF, VPCostContext &Ctx) {
787   if (!isReplicator()) {
788     InstructionCost Cost = 0;
789     for (VPBlockBase *Block : vp_depth_first_shallow(getEntry()))
790       Cost += Block->cost(VF, Ctx);
791     InstructionCost BackedgeCost =
792         Ctx.TTI.getCFInstrCost(Instruction::Br, TTI::TCK_RecipThroughput);
793     LLVM_DEBUG(dbgs() << "Cost of " << BackedgeCost << " for VF " << VF
794                       << ": vector loop backedge\n");
795     Cost += BackedgeCost;
796     return Cost;
797   }
798 
799   // Compute the cost of a replicate region. Replicating isn't supported for
800   // scalable vectors, return an invalid cost for them.
801   // TODO: Discard scalable VPlans with replicate recipes earlier after
802   // construction.
803   if (VF.isScalable())
804     return InstructionCost::getInvalid();
805 
806   // First compute the cost of the conditionally executed recipes, followed by
807   // account for the branching cost, except if the mask is a header mask or
808   // uniform condition.
809   using namespace llvm::VPlanPatternMatch;
810   VPBasicBlock *Then = cast<VPBasicBlock>(getEntry()->getSuccessors()[0]);
811   InstructionCost ThenCost = Then->cost(VF, Ctx);
812 
813   // For the scalar case, we may not always execute the original predicated
814   // block, Thus, scale the block's cost by the probability of executing it.
815   if (VF.isScalar())
816     return ThenCost / getReciprocalPredBlockProb();
817 
818   return ThenCost;
819 }
820 
821 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
822 void VPRegionBlock::print(raw_ostream &O, const Twine &Indent,
823                           VPSlotTracker &SlotTracker) const {
824   O << Indent << (isReplicator() ? "<xVFxUF> " : "<x1> ") << getName() << ": {";
825   auto NewIndent = Indent + "  ";
826   for (auto *BlockBase : vp_depth_first_shallow(Entry)) {
827     O << '\n';
828     BlockBase->print(O, NewIndent, SlotTracker);
829   }
830   O << Indent << "}\n";
831 
832   printSuccessors(O, Indent);
833 }
834 #endif
835 
836 VPlan::~VPlan() {
837   for (auto &KV : LiveOuts)
838     delete KV.second;
839   LiveOuts.clear();
840 
841   if (Entry) {
842     VPValue DummyValue;
843     for (VPBlockBase *Block : vp_depth_first_shallow(Entry))
844       Block->dropAllReferences(&DummyValue);
845 
846     VPBlockBase::deleteCFG(Entry);
847 
848     Preheader->dropAllReferences(&DummyValue);
849     delete Preheader;
850   }
851   for (VPValue *VPV : VPLiveInsToFree)
852     delete VPV;
853   if (BackedgeTakenCount)
854     delete BackedgeTakenCount;
855 }
856 
857 VPlanPtr VPlan::createInitialVPlan(const SCEV *TripCount, ScalarEvolution &SE,
858                                    bool RequiresScalarEpilogueCheck,
859                                    bool TailFolded, Loop *TheLoop) {
860   VPIRBasicBlock *Entry = new VPIRBasicBlock(TheLoop->getLoopPreheader());
861   VPBasicBlock *VecPreheader = new VPBasicBlock("vector.ph");
862   auto Plan = std::make_unique<VPlan>(Entry, VecPreheader);
863   Plan->TripCount =
864       vputils::getOrCreateVPValueForSCEVExpr(*Plan, TripCount, SE);
865   // Create VPRegionBlock, with empty header and latch blocks, to be filled
866   // during processing later.
867   VPBasicBlock *HeaderVPBB = new VPBasicBlock("vector.body");
868   VPBasicBlock *LatchVPBB = new VPBasicBlock("vector.latch");
869   VPBlockUtils::insertBlockAfter(LatchVPBB, HeaderVPBB);
870   auto *TopRegion = new VPRegionBlock(HeaderVPBB, LatchVPBB, "vector loop",
871                                       false /*isReplicator*/);
872 
873   VPBlockUtils::insertBlockAfter(TopRegion, VecPreheader);
874   VPBasicBlock *MiddleVPBB = new VPBasicBlock("middle.block");
875   VPBlockUtils::insertBlockAfter(MiddleVPBB, TopRegion);
876 
877   VPBasicBlock *ScalarPH = new VPBasicBlock("scalar.ph");
878   if (!RequiresScalarEpilogueCheck) {
879     VPBlockUtils::connectBlocks(MiddleVPBB, ScalarPH);
880     return Plan;
881   }
882 
883   // If needed, add a check in the middle block to see if we have completed
884   // all of the iterations in the first vector loop.  Three cases:
885   // 1) If (N - N%VF) == N, then we *don't* need to run the remainder.
886   //    Thus if tail is to be folded, we know we don't need to run the
887   //    remainder and we can set the condition to true.
888   // 2) If we require a scalar epilogue, there is no conditional branch as
889   //    we unconditionally branch to the scalar preheader.  Do nothing.
890   // 3) Otherwise, construct a runtime check.
891   BasicBlock *IRExitBlock = TheLoop->getUniqueExitBlock();
892   auto *VPExitBlock = new VPIRBasicBlock(IRExitBlock);
893   // The connection order corresponds to the operands of the conditional branch.
894   VPBlockUtils::insertBlockAfter(VPExitBlock, MiddleVPBB);
895   VPBlockUtils::connectBlocks(MiddleVPBB, ScalarPH);
896 
897   auto *ScalarLatchTerm = TheLoop->getLoopLatch()->getTerminator();
898   // Here we use the same DebugLoc as the scalar loop latch terminator instead
899   // of the corresponding compare because they may have ended up with
900   // different line numbers and we want to avoid awkward line stepping while
901   // debugging. Eg. if the compare has got a line number inside the loop.
902   VPBuilder Builder(MiddleVPBB);
903   VPValue *Cmp =
904       TailFolded
905           ? Plan->getOrAddLiveIn(ConstantInt::getTrue(
906                 IntegerType::getInt1Ty(TripCount->getType()->getContext())))
907           : Builder.createICmp(CmpInst::ICMP_EQ, Plan->getTripCount(),
908                                &Plan->getVectorTripCount(),
909                                ScalarLatchTerm->getDebugLoc(), "cmp.n");
910   Builder.createNaryOp(VPInstruction::BranchOnCond, {Cmp},
911                        ScalarLatchTerm->getDebugLoc());
912   return Plan;
913 }
914 
915 void VPlan::prepareToExecute(Value *TripCountV, Value *VectorTripCountV,
916                              Value *CanonicalIVStartValue,
917                              VPTransformState &State) {
918   // Check if the backedge taken count is needed, and if so build it.
919   if (BackedgeTakenCount && BackedgeTakenCount->getNumUsers()) {
920     IRBuilder<> Builder(State.CFG.PrevBB->getTerminator());
921     auto *TCMO = Builder.CreateSub(TripCountV,
922                                    ConstantInt::get(TripCountV->getType(), 1),
923                                    "trip.count.minus.1");
924     BackedgeTakenCount->setUnderlyingValue(TCMO);
925   }
926 
927   VectorTripCount.setUnderlyingValue(VectorTripCountV);
928 
929   IRBuilder<> Builder(State.CFG.PrevBB->getTerminator());
930   // FIXME: Model VF * UF computation completely in VPlan.
931   VFxUF.setUnderlyingValue(
932       createStepForVF(Builder, TripCountV->getType(), State.VF, State.UF));
933 
934   // When vectorizing the epilogue loop, the canonical induction start value
935   // needs to be changed from zero to the value after the main vector loop.
936   // FIXME: Improve modeling for canonical IV start values in the epilogue loop.
937   if (CanonicalIVStartValue) {
938     VPValue *VPV = getOrAddLiveIn(CanonicalIVStartValue);
939     auto *IV = getCanonicalIV();
940     assert(all_of(IV->users(),
941                   [](const VPUser *U) {
942                     return isa<VPScalarIVStepsRecipe>(U) ||
943                            isa<VPScalarCastRecipe>(U) ||
944                            isa<VPDerivedIVRecipe>(U) ||
945                            cast<VPInstruction>(U)->getOpcode() ==
946                                Instruction::Add;
947                   }) &&
948            "the canonical IV should only be used by its increment or "
949            "ScalarIVSteps when resetting the start value");
950     IV->setOperand(0, VPV);
951   }
952 }
953 
954 /// Replace \p VPBB with a VPIRBasicBlock wrapping \p IRBB. All recipes from \p
955 /// VPBB are moved to the newly created VPIRBasicBlock.  VPBB must have a single
956 /// predecessor, which is rewired to the new VPIRBasicBlock. All successors of
957 /// VPBB, if any, are rewired to the new VPIRBasicBlock.
958 static void replaceVPBBWithIRVPBB(VPBasicBlock *VPBB, BasicBlock *IRBB) {
959   VPIRBasicBlock *IRMiddleVPBB = new VPIRBasicBlock(IRBB);
960   for (auto &R : make_early_inc_range(*VPBB))
961     R.moveBefore(*IRMiddleVPBB, IRMiddleVPBB->end());
962   VPBlockBase *PredVPBB = VPBB->getSinglePredecessor();
963   VPBlockUtils::disconnectBlocks(PredVPBB, VPBB);
964   VPBlockUtils::connectBlocks(PredVPBB, IRMiddleVPBB);
965   for (auto *Succ : to_vector(VPBB->getSuccessors())) {
966     VPBlockUtils::connectBlocks(IRMiddleVPBB, Succ);
967     VPBlockUtils::disconnectBlocks(VPBB, Succ);
968   }
969   delete VPBB;
970 }
971 
972 /// Generate the code inside the preheader and body of the vectorized loop.
973 /// Assumes a single pre-header basic-block was created for this. Introduce
974 /// additional basic-blocks as needed, and fill them all.
975 void VPlan::execute(VPTransformState *State) {
976   // Initialize CFG state.
977   State->CFG.PrevVPBB = nullptr;
978   State->CFG.ExitBB = State->CFG.PrevBB->getSingleSuccessor();
979   BasicBlock *VectorPreHeader = State->CFG.PrevBB;
980   State->Builder.SetInsertPoint(VectorPreHeader->getTerminator());
981 
982   // Disconnect VectorPreHeader from ExitBB in both the CFG and DT.
983   cast<BranchInst>(VectorPreHeader->getTerminator())->setSuccessor(0, nullptr);
984   State->CFG.DTU.applyUpdates(
985       {{DominatorTree::Delete, VectorPreHeader, State->CFG.ExitBB}});
986 
987   // Replace regular VPBB's for the middle and scalar preheader blocks with
988   // VPIRBasicBlocks wrapping their IR blocks. The IR blocks are created during
989   // skeleton creation, so we can only create the VPIRBasicBlocks now during
990   // VPlan execution rather than earlier during VPlan construction.
991   BasicBlock *MiddleBB = State->CFG.ExitBB;
992   VPBasicBlock *MiddleVPBB =
993       cast<VPBasicBlock>(getVectorLoopRegion()->getSingleSuccessor());
994   // Find the VPBB for the scalar preheader, relying on the current structure
995   // when creating the middle block and its successrs: if there's a single
996   // predecessor, it must be the scalar preheader. Otherwise, the second
997   // successor is the scalar preheader.
998   BasicBlock *ScalarPh = MiddleBB->getSingleSuccessor();
999   auto &MiddleSuccs = MiddleVPBB->getSuccessors();
1000   assert((MiddleSuccs.size() == 1 || MiddleSuccs.size() == 2) &&
1001          "middle block has unexpected successors");
1002   VPBasicBlock *ScalarPhVPBB = cast<VPBasicBlock>(
1003       MiddleSuccs.size() == 1 ? MiddleSuccs[0] : MiddleSuccs[1]);
1004   assert(!isa<VPIRBasicBlock>(ScalarPhVPBB) &&
1005          "scalar preheader cannot be wrapped already");
1006   replaceVPBBWithIRVPBB(ScalarPhVPBB, ScalarPh);
1007   replaceVPBBWithIRVPBB(MiddleVPBB, MiddleBB);
1008 
1009   // Disconnect the middle block from its single successor (the scalar loop
1010   // header) in both the CFG and DT. The branch will be recreated during VPlan
1011   // execution.
1012   auto *BrInst = new UnreachableInst(MiddleBB->getContext());
1013   BrInst->insertBefore(MiddleBB->getTerminator());
1014   MiddleBB->getTerminator()->eraseFromParent();
1015   State->CFG.DTU.applyUpdates({{DominatorTree::Delete, MiddleBB, ScalarPh}});
1016 
1017   // Generate code in the loop pre-header and body.
1018   for (VPBlockBase *Block : vp_depth_first_shallow(Entry))
1019     Block->execute(State);
1020 
1021   VPBasicBlock *LatchVPBB = getVectorLoopRegion()->getExitingBasicBlock();
1022   BasicBlock *VectorLatchBB = State->CFG.VPBB2IRBB[LatchVPBB];
1023 
1024   // Fix the latch value of canonical, reduction and first-order recurrences
1025   // phis in the vector loop.
1026   VPBasicBlock *Header = getVectorLoopRegion()->getEntryBasicBlock();
1027   for (VPRecipeBase &R : Header->phis()) {
1028     // Skip phi-like recipes that generate their backedege values themselves.
1029     if (isa<VPWidenPHIRecipe>(&R))
1030       continue;
1031 
1032     if (isa<VPWidenPointerInductionRecipe>(&R) ||
1033         isa<VPWidenIntOrFpInductionRecipe>(&R)) {
1034       PHINode *Phi = nullptr;
1035       if (isa<VPWidenIntOrFpInductionRecipe>(&R)) {
1036         Phi = cast<PHINode>(State->get(R.getVPSingleValue(), 0));
1037       } else {
1038         auto *WidenPhi = cast<VPWidenPointerInductionRecipe>(&R);
1039         assert(!WidenPhi->onlyScalarsGenerated(State->VF.isScalable()) &&
1040                "recipe generating only scalars should have been replaced");
1041         auto *GEP = cast<GetElementPtrInst>(State->get(WidenPhi, 0));
1042         Phi = cast<PHINode>(GEP->getPointerOperand());
1043       }
1044 
1045       Phi->setIncomingBlock(1, VectorLatchBB);
1046 
1047       // Move the last step to the end of the latch block. This ensures
1048       // consistent placement of all induction updates.
1049       Instruction *Inc = cast<Instruction>(Phi->getIncomingValue(1));
1050       Inc->moveBefore(VectorLatchBB->getTerminator()->getPrevNode());
1051       continue;
1052     }
1053 
1054     auto *PhiR = cast<VPHeaderPHIRecipe>(&R);
1055     // For  canonical IV, first-order recurrences and in-order reduction phis,
1056     // only a single part is generated, which provides the last part from the
1057     // previous iteration. For non-ordered reductions all UF parts are
1058     // generated.
1059     bool SinglePartNeeded =
1060         isa<VPCanonicalIVPHIRecipe>(PhiR) ||
1061         isa<VPFirstOrderRecurrencePHIRecipe, VPEVLBasedIVPHIRecipe>(PhiR) ||
1062         (isa<VPReductionPHIRecipe>(PhiR) &&
1063          cast<VPReductionPHIRecipe>(PhiR)->isOrdered());
1064     bool NeedsScalar =
1065         isa<VPCanonicalIVPHIRecipe, VPEVLBasedIVPHIRecipe>(PhiR) ||
1066         (isa<VPReductionPHIRecipe>(PhiR) &&
1067          cast<VPReductionPHIRecipe>(PhiR)->isInLoop());
1068     unsigned LastPartForNewPhi = SinglePartNeeded ? 1 : State->UF;
1069 
1070     for (unsigned Part = 0; Part < LastPartForNewPhi; ++Part) {
1071       Value *Phi = State->get(PhiR, Part, NeedsScalar);
1072       Value *Val =
1073           State->get(PhiR->getBackedgeValue(),
1074                      SinglePartNeeded ? State->UF - 1 : Part, NeedsScalar);
1075       cast<PHINode>(Phi)->addIncoming(Val, VectorLatchBB);
1076     }
1077   }
1078 
1079   State->CFG.DTU.flush();
1080   assert(State->CFG.DTU.getDomTree().verify(
1081              DominatorTree::VerificationLevel::Fast) &&
1082          "DT not preserved correctly");
1083 }
1084 
1085 InstructionCost VPlan::cost(ElementCount VF, VPCostContext &Ctx) {
1086   // For now only return the cost of the vector loop region, ignoring any other
1087   // blocks, like the preheader or middle blocks.
1088   return getVectorLoopRegion()->cost(VF, Ctx);
1089 }
1090 
1091 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1092 void VPlan::printLiveIns(raw_ostream &O) const {
1093   VPSlotTracker SlotTracker(this);
1094 
1095   if (VFxUF.getNumUsers() > 0) {
1096     O << "\nLive-in ";
1097     VFxUF.printAsOperand(O, SlotTracker);
1098     O << " = VF * UF";
1099   }
1100 
1101   if (VectorTripCount.getNumUsers() > 0) {
1102     O << "\nLive-in ";
1103     VectorTripCount.printAsOperand(O, SlotTracker);
1104     O << " = vector-trip-count";
1105   }
1106 
1107   if (BackedgeTakenCount && BackedgeTakenCount->getNumUsers()) {
1108     O << "\nLive-in ";
1109     BackedgeTakenCount->printAsOperand(O, SlotTracker);
1110     O << " = backedge-taken count";
1111   }
1112 
1113   O << "\n";
1114   if (TripCount->isLiveIn())
1115     O << "Live-in ";
1116   TripCount->printAsOperand(O, SlotTracker);
1117   O << " = original trip-count";
1118   O << "\n";
1119 }
1120 
1121 LLVM_DUMP_METHOD
1122 void VPlan::print(raw_ostream &O) const {
1123   VPSlotTracker SlotTracker(this);
1124 
1125   O << "VPlan '" << getName() << "' {";
1126 
1127   printLiveIns(O);
1128 
1129   if (!getPreheader()->empty()) {
1130     O << "\n";
1131     getPreheader()->print(O, "", SlotTracker);
1132   }
1133 
1134   for (const VPBlockBase *Block : vp_depth_first_shallow(getEntry())) {
1135     O << '\n';
1136     Block->print(O, "", SlotTracker);
1137   }
1138 
1139   if (!LiveOuts.empty())
1140     O << "\n";
1141   for (const auto &KV : LiveOuts) {
1142     KV.second->print(O, SlotTracker);
1143   }
1144 
1145   O << "}\n";
1146 }
1147 
1148 std::string VPlan::getName() const {
1149   std::string Out;
1150   raw_string_ostream RSO(Out);
1151   RSO << Name << " for ";
1152   if (!VFs.empty()) {
1153     RSO << "VF={" << VFs[0];
1154     for (ElementCount VF : drop_begin(VFs))
1155       RSO << "," << VF;
1156     RSO << "},";
1157   }
1158 
1159   if (UFs.empty()) {
1160     RSO << "UF>=1";
1161   } else {
1162     RSO << "UF={" << UFs[0];
1163     for (unsigned UF : drop_begin(UFs))
1164       RSO << "," << UF;
1165     RSO << "}";
1166   }
1167 
1168   return Out;
1169 }
1170 
1171 LLVM_DUMP_METHOD
1172 void VPlan::printDOT(raw_ostream &O) const {
1173   VPlanPrinter Printer(O, *this);
1174   Printer.dump();
1175 }
1176 
1177 LLVM_DUMP_METHOD
1178 void VPlan::dump() const { print(dbgs()); }
1179 #endif
1180 
1181 void VPlan::addLiveOut(PHINode *PN, VPValue *V) {
1182   assert(LiveOuts.count(PN) == 0 && "an exit value for PN already exists");
1183   LiveOuts.insert({PN, new VPLiveOut(PN, V)});
1184 }
1185 
1186 static void remapOperands(VPBlockBase *Entry, VPBlockBase *NewEntry,
1187                           DenseMap<VPValue *, VPValue *> &Old2NewVPValues) {
1188   // Update the operands of all cloned recipes starting at NewEntry. This
1189   // traverses all reachable blocks. This is done in two steps, to handle cycles
1190   // in PHI recipes.
1191   ReversePostOrderTraversal<VPBlockDeepTraversalWrapper<VPBlockBase *>>
1192       OldDeepRPOT(Entry);
1193   ReversePostOrderTraversal<VPBlockDeepTraversalWrapper<VPBlockBase *>>
1194       NewDeepRPOT(NewEntry);
1195   // First, collect all mappings from old to new VPValues defined by cloned
1196   // recipes.
1197   for (const auto &[OldBB, NewBB] :
1198        zip(VPBlockUtils::blocksOnly<VPBasicBlock>(OldDeepRPOT),
1199            VPBlockUtils::blocksOnly<VPBasicBlock>(NewDeepRPOT))) {
1200     assert(OldBB->getRecipeList().size() == NewBB->getRecipeList().size() &&
1201            "blocks must have the same number of recipes");
1202     for (const auto &[OldR, NewR] : zip(*OldBB, *NewBB)) {
1203       assert(OldR.getNumOperands() == NewR.getNumOperands() &&
1204              "recipes must have the same number of operands");
1205       assert(OldR.getNumDefinedValues() == NewR.getNumDefinedValues() &&
1206              "recipes must define the same number of operands");
1207       for (const auto &[OldV, NewV] :
1208            zip(OldR.definedValues(), NewR.definedValues()))
1209         Old2NewVPValues[OldV] = NewV;
1210     }
1211   }
1212 
1213   // Update all operands to use cloned VPValues.
1214   for (VPBasicBlock *NewBB :
1215        VPBlockUtils::blocksOnly<VPBasicBlock>(NewDeepRPOT)) {
1216     for (VPRecipeBase &NewR : *NewBB)
1217       for (unsigned I = 0, E = NewR.getNumOperands(); I != E; ++I) {
1218         VPValue *NewOp = Old2NewVPValues.lookup(NewR.getOperand(I));
1219         NewR.setOperand(I, NewOp);
1220       }
1221   }
1222 }
1223 
1224 VPlan *VPlan::duplicate() {
1225   // Clone blocks.
1226   VPBasicBlock *NewPreheader = Preheader->clone();
1227   const auto &[NewEntry, __] = cloneFrom(Entry);
1228 
1229   // Create VPlan, clone live-ins and remap operands in the cloned blocks.
1230   auto *NewPlan = new VPlan(NewPreheader, cast<VPBasicBlock>(NewEntry));
1231   DenseMap<VPValue *, VPValue *> Old2NewVPValues;
1232   for (VPValue *OldLiveIn : VPLiveInsToFree) {
1233     Old2NewVPValues[OldLiveIn] =
1234         NewPlan->getOrAddLiveIn(OldLiveIn->getLiveInIRValue());
1235   }
1236   Old2NewVPValues[&VectorTripCount] = &NewPlan->VectorTripCount;
1237   Old2NewVPValues[&VFxUF] = &NewPlan->VFxUF;
1238   if (BackedgeTakenCount) {
1239     NewPlan->BackedgeTakenCount = new VPValue();
1240     Old2NewVPValues[BackedgeTakenCount] = NewPlan->BackedgeTakenCount;
1241   }
1242   assert(TripCount && "trip count must be set");
1243   if (TripCount->isLiveIn())
1244     Old2NewVPValues[TripCount] =
1245         NewPlan->getOrAddLiveIn(TripCount->getLiveInIRValue());
1246   // else NewTripCount will be created and inserted into Old2NewVPValues when
1247   // TripCount is cloned. In any case NewPlan->TripCount is updated below.
1248 
1249   remapOperands(Preheader, NewPreheader, Old2NewVPValues);
1250   remapOperands(Entry, NewEntry, Old2NewVPValues);
1251 
1252   // Clone live-outs.
1253   for (const auto &[_, LO] : LiveOuts)
1254     NewPlan->addLiveOut(LO->getPhi(), Old2NewVPValues[LO->getOperand(0)]);
1255 
1256   // Initialize remaining fields of cloned VPlan.
1257   NewPlan->VFs = VFs;
1258   NewPlan->UFs = UFs;
1259   // TODO: Adjust names.
1260   NewPlan->Name = Name;
1261   assert(Old2NewVPValues.contains(TripCount) &&
1262          "TripCount must have been added to Old2NewVPValues");
1263   NewPlan->TripCount = Old2NewVPValues[TripCount];
1264   return NewPlan;
1265 }
1266 
1267 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1268 
1269 Twine VPlanPrinter::getUID(const VPBlockBase *Block) {
1270   return (isa<VPRegionBlock>(Block) ? "cluster_N" : "N") +
1271          Twine(getOrCreateBID(Block));
1272 }
1273 
1274 Twine VPlanPrinter::getOrCreateName(const VPBlockBase *Block) {
1275   const std::string &Name = Block->getName();
1276   if (!Name.empty())
1277     return Name;
1278   return "VPB" + Twine(getOrCreateBID(Block));
1279 }
1280 
1281 void VPlanPrinter::dump() {
1282   Depth = 1;
1283   bumpIndent(0);
1284   OS << "digraph VPlan {\n";
1285   OS << "graph [labelloc=t, fontsize=30; label=\"Vectorization Plan";
1286   if (!Plan.getName().empty())
1287     OS << "\\n" << DOT::EscapeString(Plan.getName());
1288 
1289   {
1290     // Print live-ins.
1291   std::string Str;
1292   raw_string_ostream SS(Str);
1293   Plan.printLiveIns(SS);
1294   SmallVector<StringRef, 0> Lines;
1295   StringRef(Str).rtrim('\n').split(Lines, "\n");
1296   for (auto Line : Lines)
1297     OS << DOT::EscapeString(Line.str()) << "\\n";
1298   }
1299 
1300   OS << "\"]\n";
1301   OS << "node [shape=rect, fontname=Courier, fontsize=30]\n";
1302   OS << "edge [fontname=Courier, fontsize=30]\n";
1303   OS << "compound=true\n";
1304 
1305   dumpBlock(Plan.getPreheader());
1306 
1307   for (const VPBlockBase *Block : vp_depth_first_shallow(Plan.getEntry()))
1308     dumpBlock(Block);
1309 
1310   OS << "}\n";
1311 }
1312 
1313 void VPlanPrinter::dumpBlock(const VPBlockBase *Block) {
1314   if (const VPBasicBlock *BasicBlock = dyn_cast<VPBasicBlock>(Block))
1315     dumpBasicBlock(BasicBlock);
1316   else if (const VPRegionBlock *Region = dyn_cast<VPRegionBlock>(Block))
1317     dumpRegion(Region);
1318   else
1319     llvm_unreachable("Unsupported kind of VPBlock.");
1320 }
1321 
1322 void VPlanPrinter::drawEdge(const VPBlockBase *From, const VPBlockBase *To,
1323                             bool Hidden, const Twine &Label) {
1324   // Due to "dot" we print an edge between two regions as an edge between the
1325   // exiting basic block and the entry basic of the respective regions.
1326   const VPBlockBase *Tail = From->getExitingBasicBlock();
1327   const VPBlockBase *Head = To->getEntryBasicBlock();
1328   OS << Indent << getUID(Tail) << " -> " << getUID(Head);
1329   OS << " [ label=\"" << Label << '\"';
1330   if (Tail != From)
1331     OS << " ltail=" << getUID(From);
1332   if (Head != To)
1333     OS << " lhead=" << getUID(To);
1334   if (Hidden)
1335     OS << "; splines=none";
1336   OS << "]\n";
1337 }
1338 
1339 void VPlanPrinter::dumpEdges(const VPBlockBase *Block) {
1340   auto &Successors = Block->getSuccessors();
1341   if (Successors.size() == 1)
1342     drawEdge(Block, Successors.front(), false, "");
1343   else if (Successors.size() == 2) {
1344     drawEdge(Block, Successors.front(), false, "T");
1345     drawEdge(Block, Successors.back(), false, "F");
1346   } else {
1347     unsigned SuccessorNumber = 0;
1348     for (auto *Successor : Successors)
1349       drawEdge(Block, Successor, false, Twine(SuccessorNumber++));
1350   }
1351 }
1352 
1353 void VPlanPrinter::dumpBasicBlock(const VPBasicBlock *BasicBlock) {
1354   // Implement dot-formatted dump by performing plain-text dump into the
1355   // temporary storage followed by some post-processing.
1356   OS << Indent << getUID(BasicBlock) << " [label =\n";
1357   bumpIndent(1);
1358   std::string Str;
1359   raw_string_ostream SS(Str);
1360   // Use no indentation as we need to wrap the lines into quotes ourselves.
1361   BasicBlock->print(SS, "", SlotTracker);
1362 
1363   // We need to process each line of the output separately, so split
1364   // single-string plain-text dump.
1365   SmallVector<StringRef, 0> Lines;
1366   StringRef(Str).rtrim('\n').split(Lines, "\n");
1367 
1368   auto EmitLine = [&](StringRef Line, StringRef Suffix) {
1369     OS << Indent << '"' << DOT::EscapeString(Line.str()) << "\\l\"" << Suffix;
1370   };
1371 
1372   // Don't need the "+" after the last line.
1373   for (auto Line : make_range(Lines.begin(), Lines.end() - 1))
1374     EmitLine(Line, " +\n");
1375   EmitLine(Lines.back(), "\n");
1376 
1377   bumpIndent(-1);
1378   OS << Indent << "]\n";
1379 
1380   dumpEdges(BasicBlock);
1381 }
1382 
1383 void VPlanPrinter::dumpRegion(const VPRegionBlock *Region) {
1384   OS << Indent << "subgraph " << getUID(Region) << " {\n";
1385   bumpIndent(1);
1386   OS << Indent << "fontname=Courier\n"
1387      << Indent << "label=\""
1388      << DOT::EscapeString(Region->isReplicator() ? "<xVFxUF> " : "<x1> ")
1389      << DOT::EscapeString(Region->getName()) << "\"\n";
1390   // Dump the blocks of the region.
1391   assert(Region->getEntry() && "Region contains no inner blocks.");
1392   for (const VPBlockBase *Block : vp_depth_first_shallow(Region->getEntry()))
1393     dumpBlock(Block);
1394   bumpIndent(-1);
1395   OS << Indent << "}\n";
1396   dumpEdges(Region);
1397 }
1398 
1399 void VPlanIngredient::print(raw_ostream &O) const {
1400   if (auto *Inst = dyn_cast<Instruction>(V)) {
1401     if (!Inst->getType()->isVoidTy()) {
1402       Inst->printAsOperand(O, false);
1403       O << " = ";
1404     }
1405     O << Inst->getOpcodeName() << " ";
1406     unsigned E = Inst->getNumOperands();
1407     if (E > 0) {
1408       Inst->getOperand(0)->printAsOperand(O, false);
1409       for (unsigned I = 1; I < E; ++I)
1410         Inst->getOperand(I)->printAsOperand(O << ", ", false);
1411     }
1412   } else // !Inst
1413     V->printAsOperand(O, false);
1414 }
1415 
1416 #endif
1417 
1418 template void DomTreeBuilder::Calculate<VPDominatorTree>(VPDominatorTree &DT);
1419 
1420 void VPValue::replaceAllUsesWith(VPValue *New) {
1421   replaceUsesWithIf(New, [](VPUser &, unsigned) { return true; });
1422 }
1423 
1424 void VPValue::replaceUsesWithIf(
1425     VPValue *New,
1426     llvm::function_ref<bool(VPUser &U, unsigned Idx)> ShouldReplace) {
1427   // Note that this early exit is required for correctness; the implementation
1428   // below relies on the number of users for this VPValue to decrease, which
1429   // isn't the case if this == New.
1430   if (this == New)
1431     return;
1432 
1433   for (unsigned J = 0; J < getNumUsers();) {
1434     VPUser *User = Users[J];
1435     bool RemovedUser = false;
1436     for (unsigned I = 0, E = User->getNumOperands(); I < E; ++I) {
1437       if (User->getOperand(I) != this || !ShouldReplace(*User, I))
1438         continue;
1439 
1440       RemovedUser = true;
1441       User->setOperand(I, New);
1442     }
1443     // If a user got removed after updating the current user, the next user to
1444     // update will be moved to the current position, so we only need to
1445     // increment the index if the number of users did not change.
1446     if (!RemovedUser)
1447       J++;
1448   }
1449 }
1450 
1451 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1452 void VPValue::printAsOperand(raw_ostream &OS, VPSlotTracker &Tracker) const {
1453   OS << Tracker.getOrCreateName(this);
1454 }
1455 
1456 void VPUser::printOperands(raw_ostream &O, VPSlotTracker &SlotTracker) const {
1457   interleaveComma(operands(), O, [&O, &SlotTracker](VPValue *Op) {
1458     Op->printAsOperand(O, SlotTracker);
1459   });
1460 }
1461 #endif
1462 
1463 void VPInterleavedAccessInfo::visitRegion(VPRegionBlock *Region,
1464                                           Old2NewTy &Old2New,
1465                                           InterleavedAccessInfo &IAI) {
1466   ReversePostOrderTraversal<VPBlockShallowTraversalWrapper<VPBlockBase *>>
1467       RPOT(Region->getEntry());
1468   for (VPBlockBase *Base : RPOT) {
1469     visitBlock(Base, Old2New, IAI);
1470   }
1471 }
1472 
1473 void VPInterleavedAccessInfo::visitBlock(VPBlockBase *Block, Old2NewTy &Old2New,
1474                                          InterleavedAccessInfo &IAI) {
1475   if (VPBasicBlock *VPBB = dyn_cast<VPBasicBlock>(Block)) {
1476     for (VPRecipeBase &VPI : *VPBB) {
1477       if (isa<VPWidenPHIRecipe>(&VPI))
1478         continue;
1479       assert(isa<VPInstruction>(&VPI) && "Can only handle VPInstructions");
1480       auto *VPInst = cast<VPInstruction>(&VPI);
1481 
1482       auto *Inst = dyn_cast_or_null<Instruction>(VPInst->getUnderlyingValue());
1483       if (!Inst)
1484         continue;
1485       auto *IG = IAI.getInterleaveGroup(Inst);
1486       if (!IG)
1487         continue;
1488 
1489       auto NewIGIter = Old2New.find(IG);
1490       if (NewIGIter == Old2New.end())
1491         Old2New[IG] = new InterleaveGroup<VPInstruction>(
1492             IG->getFactor(), IG->isReverse(), IG->getAlign());
1493 
1494       if (Inst == IG->getInsertPos())
1495         Old2New[IG]->setInsertPos(VPInst);
1496 
1497       InterleaveGroupMap[VPInst] = Old2New[IG];
1498       InterleaveGroupMap[VPInst]->insertMember(
1499           VPInst, IG->getIndex(Inst),
1500           Align(IG->isReverse() ? (-1) * int(IG->getFactor())
1501                                 : IG->getFactor()));
1502     }
1503   } else if (VPRegionBlock *Region = dyn_cast<VPRegionBlock>(Block))
1504     visitRegion(Region, Old2New, IAI);
1505   else
1506     llvm_unreachable("Unsupported kind of VPBlock.");
1507 }
1508 
1509 VPInterleavedAccessInfo::VPInterleavedAccessInfo(VPlan &Plan,
1510                                                  InterleavedAccessInfo &IAI) {
1511   Old2NewTy Old2New;
1512   visitRegion(Plan.getVectorLoopRegion(), Old2New, IAI);
1513 }
1514 
1515 void VPSlotTracker::assignName(const VPValue *V) {
1516   assert(!VPValue2Name.contains(V) && "VPValue already has a name!");
1517   auto *UV = V->getUnderlyingValue();
1518   if (!UV) {
1519     VPValue2Name[V] = (Twine("vp<%") + Twine(NextSlot) + ">").str();
1520     NextSlot++;
1521     return;
1522   }
1523 
1524   // Use the name of the underlying Value, wrapped in "ir<>", and versioned by
1525   // appending ".Number" to the name if there are multiple uses.
1526   std::string Name;
1527   raw_string_ostream S(Name);
1528   UV->printAsOperand(S, false);
1529   assert(!Name.empty() && "Name cannot be empty.");
1530   std::string BaseName = (Twine("ir<") + Name + Twine(">")).str();
1531 
1532   // First assign the base name for V.
1533   const auto &[A, _] = VPValue2Name.insert({V, BaseName});
1534   // Integer or FP constants with different types will result in he same string
1535   // due to stripping types.
1536   if (V->isLiveIn() && isa<ConstantInt, ConstantFP>(UV))
1537     return;
1538 
1539   // If it is already used by C > 0 other VPValues, increase the version counter
1540   // C and use it for V.
1541   const auto &[C, UseInserted] = BaseName2Version.insert({BaseName, 0});
1542   if (!UseInserted) {
1543     C->second++;
1544     A->second = (BaseName + Twine(".") + Twine(C->second)).str();
1545   }
1546 }
1547 
1548 void VPSlotTracker::assignNames(const VPlan &Plan) {
1549   if (Plan.VFxUF.getNumUsers() > 0)
1550     assignName(&Plan.VFxUF);
1551   assignName(&Plan.VectorTripCount);
1552   if (Plan.BackedgeTakenCount)
1553     assignName(Plan.BackedgeTakenCount);
1554   for (VPValue *LI : Plan.VPLiveInsToFree)
1555     assignName(LI);
1556   assignNames(Plan.getPreheader());
1557 
1558   ReversePostOrderTraversal<VPBlockDeepTraversalWrapper<const VPBlockBase *>>
1559       RPOT(VPBlockDeepTraversalWrapper<const VPBlockBase *>(Plan.getEntry()));
1560   for (const VPBasicBlock *VPBB :
1561        VPBlockUtils::blocksOnly<const VPBasicBlock>(RPOT))
1562     assignNames(VPBB);
1563 }
1564 
1565 void VPSlotTracker::assignNames(const VPBasicBlock *VPBB) {
1566   for (const VPRecipeBase &Recipe : *VPBB)
1567     for (VPValue *Def : Recipe.definedValues())
1568       assignName(Def);
1569 }
1570 
1571 std::string VPSlotTracker::getOrCreateName(const VPValue *V) const {
1572   std::string Name = VPValue2Name.lookup(V);
1573   if (!Name.empty())
1574     return Name;
1575 
1576   // If no name was assigned, no VPlan was provided when creating the slot
1577   // tracker or it is not reachable from the provided VPlan. This can happen,
1578   // e.g. when trying to print a recipe that has not been inserted into a VPlan
1579   // in a debugger.
1580   // TODO: Update VPSlotTracker constructor to assign names to recipes &
1581   // VPValues not associated with a VPlan, instead of constructing names ad-hoc
1582   // here.
1583   const VPRecipeBase *DefR = V->getDefiningRecipe();
1584   (void)DefR;
1585   assert((!DefR || !DefR->getParent() || !DefR->getParent()->getPlan()) &&
1586          "VPValue defined by a recipe in a VPlan?");
1587 
1588   // Use the underlying value's name, if there is one.
1589   if (auto *UV = V->getUnderlyingValue()) {
1590     std::string Name;
1591     raw_string_ostream S(Name);
1592     UV->printAsOperand(S, false);
1593     return (Twine("ir<") + Name + ">").str();
1594   }
1595 
1596   return "<badref>";
1597 }
1598 
1599 bool vputils::onlyFirstLaneUsed(const VPValue *Def) {
1600   return all_of(Def->users(),
1601                 [Def](const VPUser *U) { return U->onlyFirstLaneUsed(Def); });
1602 }
1603 
1604 bool vputils::onlyFirstPartUsed(const VPValue *Def) {
1605   return all_of(Def->users(),
1606                 [Def](const VPUser *U) { return U->onlyFirstPartUsed(Def); });
1607 }
1608 
1609 VPValue *vputils::getOrCreateVPValueForSCEVExpr(VPlan &Plan, const SCEV *Expr,
1610                                                 ScalarEvolution &SE) {
1611   if (auto *Expanded = Plan.getSCEVExpansion(Expr))
1612     return Expanded;
1613   VPValue *Expanded = nullptr;
1614   if (auto *E = dyn_cast<SCEVConstant>(Expr))
1615     Expanded = Plan.getOrAddLiveIn(E->getValue());
1616   else if (auto *E = dyn_cast<SCEVUnknown>(Expr))
1617     Expanded = Plan.getOrAddLiveIn(E->getValue());
1618   else {
1619     Expanded = new VPExpandSCEVRecipe(Expr, SE);
1620     Plan.getPreheader()->appendRecipe(Expanded->getDefiningRecipe());
1621   }
1622   Plan.addSCEVExpansion(Expr, Expanded);
1623   return Expanded;
1624 }
1625 
1626 bool vputils::isHeaderMask(VPValue *V, VPlan &Plan) {
1627   if (isa<VPActiveLaneMaskPHIRecipe>(V))
1628     return true;
1629 
1630   auto IsWideCanonicalIV = [](VPValue *A) {
1631     return isa<VPWidenCanonicalIVRecipe>(A) ||
1632            (isa<VPWidenIntOrFpInductionRecipe>(A) &&
1633             cast<VPWidenIntOrFpInductionRecipe>(A)->isCanonical());
1634   };
1635 
1636   VPValue *A, *B;
1637   if (match(V, m_ActiveLaneMask(m_VPValue(A), m_VPValue(B))))
1638     return B == Plan.getTripCount() &&
1639            (match(A, m_ScalarIVSteps(m_CanonicalIV(), m_SpecificInt(1))) ||
1640             IsWideCanonicalIV(A));
1641 
1642   return match(V, m_Binary<Instruction::ICmp>(m_VPValue(A), m_VPValue(B))) &&
1643          IsWideCanonicalIV(A) && B == Plan.getOrCreateBackedgeTakenCount();
1644 }
1645