xref: /llvm-project/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp (revision 3829fd75c858c97437c0f7411238d57c2687dcb1)
1 //===- VPlanRecipes.cpp - Implementations for VPlan recipes ---------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 ///
9 /// \file
10 /// This file contains implementations for different VPlan recipes.
11 ///
12 //===----------------------------------------------------------------------===//
13 
14 #include "VPlan.h"
15 #include "VPlanAnalysis.h"
16 #include "VPlanUtils.h"
17 #include "llvm/ADT/STLExtras.h"
18 #include "llvm/ADT/SmallVector.h"
19 #include "llvm/ADT/Twine.h"
20 #include "llvm/Analysis/IVDescriptors.h"
21 #include "llvm/IR/BasicBlock.h"
22 #include "llvm/IR/IRBuilder.h"
23 #include "llvm/IR/Instruction.h"
24 #include "llvm/IR/Instructions.h"
25 #include "llvm/IR/Intrinsics.h"
26 #include "llvm/IR/Type.h"
27 #include "llvm/IR/Value.h"
28 #include "llvm/IR/VectorBuilder.h"
29 #include "llvm/Support/Casting.h"
30 #include "llvm/Support/CommandLine.h"
31 #include "llvm/Support/Debug.h"
32 #include "llvm/Support/raw_ostream.h"
33 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
34 #include "llvm/Transforms/Utils/LoopUtils.h"
35 #include "llvm/Transforms/Utils/ScalarEvolutionExpander.h"
36 #include <cassert>
37 
38 using namespace llvm;
39 
40 using VectorParts = SmallVector<Value *, 2>;
41 
42 namespace llvm {
43 extern cl::opt<bool> EnableVPlanNativePath;
44 }
45 extern cl::opt<unsigned> ForceTargetInstructionCost;
46 
47 #define LV_NAME "loop-vectorize"
48 #define DEBUG_TYPE LV_NAME
49 
50 bool VPRecipeBase::mayWriteToMemory() const {
51   switch (getVPDefID()) {
52   case VPInstructionSC:
53     if (Instruction::isBinaryOp(cast<VPInstruction>(this)->getOpcode()))
54       return false;
55     switch (cast<VPInstruction>(this)->getOpcode()) {
56     case Instruction::Or:
57     case Instruction::ICmp:
58     case Instruction::Select:
59     case VPInstruction::Not:
60     case VPInstruction::CalculateTripCountMinusVF:
61     case VPInstruction::CanonicalIVIncrementForPart:
62     case VPInstruction::ExtractFromEnd:
63     case VPInstruction::FirstOrderRecurrenceSplice:
64     case VPInstruction::LogicalAnd:
65     case VPInstruction::PtrAdd:
66       return false;
67     default:
68       return true;
69     }
70   case VPInterleaveSC:
71     return cast<VPInterleaveRecipe>(this)->getNumStoreOperands() > 0;
72   case VPWidenStoreEVLSC:
73   case VPWidenStoreSC:
74     return true;
75   case VPReplicateSC:
76     return cast<Instruction>(getVPSingleValue()->getUnderlyingValue())
77         ->mayWriteToMemory();
78   case VPWidenCallSC:
79     return !cast<VPWidenCallRecipe>(this)
80                 ->getCalledScalarFunction()
81                 ->onlyReadsMemory();
82   case VPBranchOnMaskSC:
83   case VPScalarIVStepsSC:
84   case VPPredInstPHISC:
85     return false;
86   case VPBlendSC:
87   case VPReductionEVLSC:
88   case VPReductionSC:
89   case VPWidenCanonicalIVSC:
90   case VPWidenCastSC:
91   case VPWidenGEPSC:
92   case VPWidenIntOrFpInductionSC:
93   case VPWidenLoadEVLSC:
94   case VPWidenLoadSC:
95   case VPWidenPHISC:
96   case VPWidenSC:
97   case VPWidenEVLSC:
98   case VPWidenSelectSC: {
99     const Instruction *I =
100         dyn_cast_or_null<Instruction>(getVPSingleValue()->getUnderlyingValue());
101     (void)I;
102     assert((!I || !I->mayWriteToMemory()) &&
103            "underlying instruction may write to memory");
104     return false;
105   }
106   default:
107     return true;
108   }
109 }
110 
111 bool VPRecipeBase::mayReadFromMemory() const {
112   switch (getVPDefID()) {
113   case VPWidenLoadEVLSC:
114   case VPWidenLoadSC:
115     return true;
116   case VPReplicateSC:
117     return cast<Instruction>(getVPSingleValue()->getUnderlyingValue())
118         ->mayReadFromMemory();
119   case VPWidenCallSC:
120     return !cast<VPWidenCallRecipe>(this)
121                 ->getCalledScalarFunction()
122                 ->onlyWritesMemory();
123   case VPBranchOnMaskSC:
124   case VPPredInstPHISC:
125   case VPScalarIVStepsSC:
126   case VPWidenStoreEVLSC:
127   case VPWidenStoreSC:
128     return false;
129   case VPBlendSC:
130   case VPReductionEVLSC:
131   case VPReductionSC:
132   case VPWidenCanonicalIVSC:
133   case VPWidenCastSC:
134   case VPWidenGEPSC:
135   case VPWidenIntOrFpInductionSC:
136   case VPWidenPHISC:
137   case VPWidenSC:
138   case VPWidenEVLSC:
139   case VPWidenSelectSC: {
140     const Instruction *I =
141         dyn_cast_or_null<Instruction>(getVPSingleValue()->getUnderlyingValue());
142     (void)I;
143     assert((!I || !I->mayReadFromMemory()) &&
144            "underlying instruction may read from memory");
145     return false;
146   }
147   default:
148     return true;
149   }
150 }
151 
152 bool VPRecipeBase::mayHaveSideEffects() const {
153   switch (getVPDefID()) {
154   case VPDerivedIVSC:
155   case VPPredInstPHISC:
156   case VPScalarCastSC:
157     return false;
158   case VPInstructionSC:
159     return mayWriteToMemory();
160   case VPWidenCallSC: {
161     Function *Fn = cast<VPWidenCallRecipe>(this)->getCalledScalarFunction();
162     return mayWriteToMemory() || !Fn->doesNotThrow() || !Fn->willReturn();
163   }
164   case VPBlendSC:
165   case VPReductionEVLSC:
166   case VPReductionSC:
167   case VPScalarIVStepsSC:
168   case VPWidenCanonicalIVSC:
169   case VPWidenCastSC:
170   case VPWidenGEPSC:
171   case VPWidenIntOrFpInductionSC:
172   case VPWidenPHISC:
173   case VPWidenPointerInductionSC:
174   case VPWidenSC:
175   case VPWidenEVLSC:
176   case VPWidenSelectSC: {
177     const Instruction *I =
178         dyn_cast_or_null<Instruction>(getVPSingleValue()->getUnderlyingValue());
179     (void)I;
180     assert((!I || !I->mayHaveSideEffects()) &&
181            "underlying instruction has side-effects");
182     return false;
183   }
184   case VPInterleaveSC:
185     return mayWriteToMemory();
186   case VPWidenLoadEVLSC:
187   case VPWidenLoadSC:
188   case VPWidenStoreEVLSC:
189   case VPWidenStoreSC:
190     assert(
191         cast<VPWidenMemoryRecipe>(this)->getIngredient().mayHaveSideEffects() ==
192             mayWriteToMemory() &&
193         "mayHaveSideffects result for ingredient differs from this "
194         "implementation");
195     return mayWriteToMemory();
196   case VPReplicateSC: {
197     auto *R = cast<VPReplicateRecipe>(this);
198     return R->getUnderlyingInstr()->mayHaveSideEffects();
199   }
200   default:
201     return true;
202   }
203 }
204 
205 void VPLiveOut::fixPhi(VPlan &Plan, VPTransformState &State) {
206   VPValue *ExitValue = getOperand(0);
207   VPBasicBlock *MiddleVPBB =
208       cast<VPBasicBlock>(Plan.getVectorLoopRegion()->getSingleSuccessor());
209   VPRecipeBase *ExitingRecipe = ExitValue->getDefiningRecipe();
210   auto *ExitingVPBB = ExitingRecipe ? ExitingRecipe->getParent() : nullptr;
211   // Values leaving the vector loop reach live out phi's in the exiting block
212   // via middle block.
213   auto *PredVPBB = !ExitingVPBB || ExitingVPBB->getEnclosingLoopRegion()
214                        ? MiddleVPBB
215                        : ExitingVPBB;
216   BasicBlock *PredBB = State.CFG.VPBB2IRBB[PredVPBB];
217   Value *V = State.get(ExitValue, VPLane(0));
218   if (Phi->getBasicBlockIndex(PredBB) != -1)
219     Phi->setIncomingValueForBlock(PredBB, V);
220   else
221     Phi->addIncoming(V, PredBB);
222 }
223 
224 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
225 void VPLiveOut::print(raw_ostream &O, VPSlotTracker &SlotTracker) const {
226   O << "Live-out ";
227   getPhi()->printAsOperand(O);
228   O << " = ";
229   getOperand(0)->printAsOperand(O, SlotTracker);
230   O << "\n";
231 }
232 #endif
233 
234 void VPRecipeBase::insertBefore(VPRecipeBase *InsertPos) {
235   assert(!Parent && "Recipe already in some VPBasicBlock");
236   assert(InsertPos->getParent() &&
237          "Insertion position not in any VPBasicBlock");
238   InsertPos->getParent()->insert(this, InsertPos->getIterator());
239 }
240 
241 void VPRecipeBase::insertBefore(VPBasicBlock &BB,
242                                 iplist<VPRecipeBase>::iterator I) {
243   assert(!Parent && "Recipe already in some VPBasicBlock");
244   assert(I == BB.end() || I->getParent() == &BB);
245   BB.insert(this, I);
246 }
247 
248 void VPRecipeBase::insertAfter(VPRecipeBase *InsertPos) {
249   assert(!Parent && "Recipe already in some VPBasicBlock");
250   assert(InsertPos->getParent() &&
251          "Insertion position not in any VPBasicBlock");
252   InsertPos->getParent()->insert(this, std::next(InsertPos->getIterator()));
253 }
254 
255 void VPRecipeBase::removeFromParent() {
256   assert(getParent() && "Recipe not in any VPBasicBlock");
257   getParent()->getRecipeList().remove(getIterator());
258   Parent = nullptr;
259 }
260 
261 iplist<VPRecipeBase>::iterator VPRecipeBase::eraseFromParent() {
262   assert(getParent() && "Recipe not in any VPBasicBlock");
263   return getParent()->getRecipeList().erase(getIterator());
264 }
265 
266 void VPRecipeBase::moveAfter(VPRecipeBase *InsertPos) {
267   removeFromParent();
268   insertAfter(InsertPos);
269 }
270 
271 void VPRecipeBase::moveBefore(VPBasicBlock &BB,
272                               iplist<VPRecipeBase>::iterator I) {
273   removeFromParent();
274   insertBefore(BB, I);
275 }
276 
277 /// Return the underlying instruction to be used for computing \p R's cost via
278 /// the legacy cost model. Return nullptr if there's no suitable instruction.
279 static Instruction *getInstructionForCost(const VPRecipeBase *R) {
280   if (auto *S = dyn_cast<VPSingleDefRecipe>(R))
281     return dyn_cast_or_null<Instruction>(S->getUnderlyingValue());
282   if (auto *IG = dyn_cast<VPInterleaveRecipe>(R))
283     return IG->getInsertPos();
284   // Currently the legacy cost model only calculates the instruction cost with
285   // underlying instruction. Removing the WidenMem here will prevent
286   // force-target-instruction-cost overwriting the cost of recipe with
287   // underlying instruction which is inconsistent with the legacy model.
288   // TODO: Remove WidenMem from this function when we don't need to compare to
289   // the legacy model.
290   if (auto *WidenMem = dyn_cast<VPWidenMemoryRecipe>(R))
291     return &WidenMem->getIngredient();
292   return nullptr;
293 }
294 
295 InstructionCost VPRecipeBase::cost(ElementCount VF, VPCostContext &Ctx) {
296   auto *UI = getInstructionForCost(this);
297   if (UI && Ctx.skipCostComputation(UI, VF.isVector()))
298     return 0;
299 
300   InstructionCost RecipeCost = computeCost(VF, Ctx);
301   if (UI && ForceTargetInstructionCost.getNumOccurrences() > 0 &&
302       RecipeCost.isValid())
303     RecipeCost = InstructionCost(ForceTargetInstructionCost);
304 
305   LLVM_DEBUG({
306     dbgs() << "Cost of " << RecipeCost << " for VF " << VF << ": ";
307     dump();
308   });
309   return RecipeCost;
310 }
311 
312 InstructionCost VPRecipeBase::computeCost(ElementCount VF,
313                                           VPCostContext &Ctx) const {
314   // Compute the cost for the recipe falling back to the legacy cost model using
315   // the underlying instruction. If there is no underlying instruction, returns
316   // 0.
317   Instruction *UI = getInstructionForCost(this);
318   if (UI && isa<VPReplicateRecipe>(this)) {
319     // VPReplicateRecipe may be cloned as part of an existing VPlan-to-VPlan
320     // transform, avoid computing their cost multiple times for now.
321     Ctx.SkipCostComputation.insert(UI);
322   }
323   return UI ? Ctx.getLegacyCost(UI, VF) : 0;
324 }
325 
326 FastMathFlags VPRecipeWithIRFlags::getFastMathFlags() const {
327   assert(OpType == OperationType::FPMathOp &&
328          "recipe doesn't have fast math flags");
329   FastMathFlags Res;
330   Res.setAllowReassoc(FMFs.AllowReassoc);
331   Res.setNoNaNs(FMFs.NoNaNs);
332   Res.setNoInfs(FMFs.NoInfs);
333   Res.setNoSignedZeros(FMFs.NoSignedZeros);
334   Res.setAllowReciprocal(FMFs.AllowReciprocal);
335   Res.setAllowContract(FMFs.AllowContract);
336   Res.setApproxFunc(FMFs.ApproxFunc);
337   return Res;
338 }
339 
340 template <unsigned PartOpIdx>
341 VPValue *
342 VPUnrollPartAccessor<PartOpIdx>::getUnrollPartOperand(VPUser &U) const {
343   if (U.getNumOperands() == PartOpIdx + 1)
344     return U.getOperand(PartOpIdx);
345   return nullptr;
346 }
347 
348 template <unsigned PartOpIdx>
349 unsigned VPUnrollPartAccessor<PartOpIdx>::getUnrollPart(VPUser &U) const {
350   if (auto *UnrollPartOp = getUnrollPartOperand(U))
351     return cast<ConstantInt>(UnrollPartOp->getLiveInIRValue())->getZExtValue();
352   return 0;
353 }
354 
355 VPInstruction::VPInstruction(unsigned Opcode, CmpInst::Predicate Pred,
356                              VPValue *A, VPValue *B, DebugLoc DL,
357                              const Twine &Name)
358     : VPRecipeWithIRFlags(VPDef::VPInstructionSC, ArrayRef<VPValue *>({A, B}),
359                           Pred, DL),
360       Opcode(Opcode), Name(Name.str()) {
361   assert(Opcode == Instruction::ICmp &&
362          "only ICmp predicates supported at the moment");
363 }
364 
365 VPInstruction::VPInstruction(unsigned Opcode,
366                              std::initializer_list<VPValue *> Operands,
367                              FastMathFlags FMFs, DebugLoc DL, const Twine &Name)
368     : VPRecipeWithIRFlags(VPDef::VPInstructionSC, Operands, FMFs, DL),
369       Opcode(Opcode), Name(Name.str()) {
370   // Make sure the VPInstruction is a floating-point operation.
371   assert(isFPMathOp() && "this op can't take fast-math flags");
372 }
373 
374 bool VPInstruction::doesGeneratePerAllLanes() const {
375   return Opcode == VPInstruction::PtrAdd && !vputils::onlyFirstLaneUsed(this);
376 }
377 
378 bool VPInstruction::canGenerateScalarForFirstLane() const {
379   if (Instruction::isBinaryOp(getOpcode()))
380     return true;
381   if (isSingleScalar() || isVectorToScalar())
382     return true;
383   switch (Opcode) {
384   case Instruction::ICmp:
385   case VPInstruction::BranchOnCond:
386   case VPInstruction::BranchOnCount:
387   case VPInstruction::CalculateTripCountMinusVF:
388   case VPInstruction::CanonicalIVIncrementForPart:
389   case VPInstruction::PtrAdd:
390   case VPInstruction::ExplicitVectorLength:
391     return true;
392   default:
393     return false;
394   }
395 }
396 
397 Value *VPInstruction::generatePerLane(VPTransformState &State,
398                                       const VPLane &Lane) {
399   IRBuilderBase &Builder = State.Builder;
400 
401   assert(getOpcode() == VPInstruction::PtrAdd &&
402          "only PtrAdd opcodes are supported for now");
403   return Builder.CreatePtrAdd(State.get(getOperand(0), Lane),
404                               State.get(getOperand(1), Lane), Name);
405 }
406 
407 Value *VPInstruction::generate(VPTransformState &State) {
408   IRBuilderBase &Builder = State.Builder;
409 
410   if (Instruction::isBinaryOp(getOpcode())) {
411     bool OnlyFirstLaneUsed = vputils::onlyFirstLaneUsed(this);
412     Value *A = State.get(getOperand(0), OnlyFirstLaneUsed);
413     Value *B = State.get(getOperand(1), OnlyFirstLaneUsed);
414     auto *Res =
415         Builder.CreateBinOp((Instruction::BinaryOps)getOpcode(), A, B, Name);
416     if (auto *I = dyn_cast<Instruction>(Res))
417       setFlags(I);
418     return Res;
419   }
420 
421   switch (getOpcode()) {
422   case VPInstruction::Not: {
423     Value *A = State.get(getOperand(0));
424     return Builder.CreateNot(A, Name);
425   }
426   case Instruction::ICmp: {
427     bool OnlyFirstLaneUsed = vputils::onlyFirstLaneUsed(this);
428     Value *A = State.get(getOperand(0), OnlyFirstLaneUsed);
429     Value *B = State.get(getOperand(1), OnlyFirstLaneUsed);
430     return Builder.CreateCmp(getPredicate(), A, B, Name);
431   }
432   case Instruction::Select: {
433     Value *Cond = State.get(getOperand(0));
434     Value *Op1 = State.get(getOperand(1));
435     Value *Op2 = State.get(getOperand(2));
436     return Builder.CreateSelect(Cond, Op1, Op2, Name);
437   }
438   case VPInstruction::ActiveLaneMask: {
439     // Get first lane of vector induction variable.
440     Value *VIVElem0 = State.get(getOperand(0), VPLane(0));
441     // Get the original loop tripcount.
442     Value *ScalarTC = State.get(getOperand(1), VPLane(0));
443 
444     // If this part of the active lane mask is scalar, generate the CMP directly
445     // to avoid unnecessary extracts.
446     if (State.VF.isScalar())
447       return Builder.CreateCmp(CmpInst::Predicate::ICMP_ULT, VIVElem0, ScalarTC,
448                                Name);
449 
450     auto *Int1Ty = Type::getInt1Ty(Builder.getContext());
451     auto *PredTy = VectorType::get(Int1Ty, State.VF);
452     return Builder.CreateIntrinsic(Intrinsic::get_active_lane_mask,
453                                    {PredTy, ScalarTC->getType()},
454                                    {VIVElem0, ScalarTC}, nullptr, Name);
455   }
456   case VPInstruction::FirstOrderRecurrenceSplice: {
457     // Generate code to combine the previous and current values in vector v3.
458     //
459     //   vector.ph:
460     //     v_init = vector(..., ..., ..., a[-1])
461     //     br vector.body
462     //
463     //   vector.body
464     //     i = phi [0, vector.ph], [i+4, vector.body]
465     //     v1 = phi [v_init, vector.ph], [v2, vector.body]
466     //     v2 = a[i, i+1, i+2, i+3];
467     //     v3 = vector(v1(3), v2(0, 1, 2))
468 
469     auto *V1 = State.get(getOperand(0));
470     if (!V1->getType()->isVectorTy())
471       return V1;
472     Value *V2 = State.get(getOperand(1));
473     return Builder.CreateVectorSplice(V1, V2, -1, Name);
474   }
475   case VPInstruction::CalculateTripCountMinusVF: {
476     unsigned UF = getParent()->getPlan()->getUF();
477     Value *ScalarTC = State.get(getOperand(0), VPLane(0));
478     Value *Step = createStepForVF(Builder, ScalarTC->getType(), State.VF, UF);
479     Value *Sub = Builder.CreateSub(ScalarTC, Step);
480     Value *Cmp = Builder.CreateICmp(CmpInst::Predicate::ICMP_UGT, ScalarTC, Step);
481     Value *Zero = ConstantInt::get(ScalarTC->getType(), 0);
482     return Builder.CreateSelect(Cmp, Sub, Zero);
483   }
484   case VPInstruction::ExplicitVectorLength: {
485     // TODO: Restructure this code with an explicit remainder loop, vsetvli can
486     // be outside of the main loop.
487     Value *AVL = State.get(getOperand(0), /*IsScalar*/ true);
488     // Compute EVL
489     assert(AVL->getType()->isIntegerTy() &&
490            "Requested vector length should be an integer.");
491 
492     assert(State.VF.isScalable() && "Expected scalable vector factor.");
493     Value *VFArg = State.Builder.getInt32(State.VF.getKnownMinValue());
494 
495     Value *EVL = State.Builder.CreateIntrinsic(
496         State.Builder.getInt32Ty(), Intrinsic::experimental_get_vector_length,
497         {AVL, VFArg, State.Builder.getTrue()});
498     return EVL;
499   }
500   case VPInstruction::CanonicalIVIncrementForPart: {
501     unsigned Part = getUnrollPart(*this);
502     auto *IV = State.get(getOperand(0), VPLane(0));
503     assert(Part != 0 && "Must have a positive part");
504     // The canonical IV is incremented by the vectorization factor (num of
505     // SIMD elements) times the unroll part.
506     Value *Step = createStepForVF(Builder, IV->getType(), State.VF, Part);
507     return Builder.CreateAdd(IV, Step, Name, hasNoUnsignedWrap(),
508                              hasNoSignedWrap());
509   }
510   case VPInstruction::BranchOnCond: {
511     Value *Cond = State.get(getOperand(0), VPLane(0));
512     // Replace the temporary unreachable terminator with a new conditional
513     // branch, hooking it up to backward destination for exiting blocks now and
514     // to forward destination(s) later when they are created.
515     BranchInst *CondBr =
516         Builder.CreateCondBr(Cond, Builder.GetInsertBlock(), nullptr);
517     CondBr->setSuccessor(0, nullptr);
518     Builder.GetInsertBlock()->getTerminator()->eraseFromParent();
519 
520     if (!getParent()->isExiting())
521       return CondBr;
522 
523     VPRegionBlock *ParentRegion = getParent()->getParent();
524     VPBasicBlock *Header = ParentRegion->getEntryBasicBlock();
525     CondBr->setSuccessor(1, State.CFG.VPBB2IRBB[Header]);
526     return CondBr;
527   }
528   case VPInstruction::BranchOnCount: {
529     // First create the compare.
530     Value *IV = State.get(getOperand(0), /*IsScalar*/ true);
531     Value *TC = State.get(getOperand(1), /*IsScalar*/ true);
532     Value *Cond = Builder.CreateICmpEQ(IV, TC);
533 
534     // Now create the branch.
535     auto *Plan = getParent()->getPlan();
536     VPRegionBlock *TopRegion = Plan->getVectorLoopRegion();
537     VPBasicBlock *Header = TopRegion->getEntry()->getEntryBasicBlock();
538 
539     // Replace the temporary unreachable terminator with a new conditional
540     // branch, hooking it up to backward destination (the header) now and to the
541     // forward destination (the exit/middle block) later when it is created.
542     // Note that CreateCondBr expects a valid BB as first argument, so we need
543     // to set it to nullptr later.
544     BranchInst *CondBr = Builder.CreateCondBr(Cond, Builder.GetInsertBlock(),
545                                               State.CFG.VPBB2IRBB[Header]);
546     CondBr->setSuccessor(0, nullptr);
547     Builder.GetInsertBlock()->getTerminator()->eraseFromParent();
548     return CondBr;
549   }
550   case VPInstruction::ComputeReductionResult: {
551     // FIXME: The cross-recipe dependency on VPReductionPHIRecipe is temporary
552     // and will be removed by breaking up the recipe further.
553     auto *PhiR = cast<VPReductionPHIRecipe>(getOperand(0));
554     auto *OrigPhi = cast<PHINode>(PhiR->getUnderlyingValue());
555     // Get its reduction variable descriptor.
556     const RecurrenceDescriptor &RdxDesc = PhiR->getRecurrenceDescriptor();
557 
558     RecurKind RK = RdxDesc.getRecurrenceKind();
559 
560     Type *PhiTy = OrigPhi->getType();
561     // The recipe's operands are the reduction phi, followed by one operand for
562     // each part of the reduction.
563     unsigned UF = getNumOperands() - 1;
564     VectorParts RdxParts(UF);
565     for (unsigned Part = 0; Part < UF; ++Part)
566       RdxParts[Part] = State.get(getOperand(1 + Part), PhiR->isInLoop());
567 
568     // If the vector reduction can be performed in a smaller type, we truncate
569     // then extend the loop exit value to enable InstCombine to evaluate the
570     // entire expression in the smaller type.
571     // TODO: Handle this in truncateToMinBW.
572     if (State.VF.isVector() && PhiTy != RdxDesc.getRecurrenceType()) {
573       Type *RdxVecTy = VectorType::get(RdxDesc.getRecurrenceType(), State.VF);
574       for (unsigned Part = 0; Part < UF; ++Part)
575         RdxParts[Part] = Builder.CreateTrunc(RdxParts[Part], RdxVecTy);
576     }
577     // Reduce all of the unrolled parts into a single vector.
578     Value *ReducedPartRdx = RdxParts[0];
579     unsigned Op = RecurrenceDescriptor::getOpcode(RK);
580     if (RecurrenceDescriptor::isAnyOfRecurrenceKind(RK))
581       Op = Instruction::Or;
582 
583     if (PhiR->isOrdered()) {
584       ReducedPartRdx = RdxParts[UF - 1];
585     } else {
586       // Floating-point operations should have some FMF to enable the reduction.
587       IRBuilderBase::FastMathFlagGuard FMFG(Builder);
588       Builder.setFastMathFlags(RdxDesc.getFastMathFlags());
589       for (unsigned Part = 1; Part < UF; ++Part) {
590         Value *RdxPart = RdxParts[Part];
591         if (Op != Instruction::ICmp && Op != Instruction::FCmp)
592           ReducedPartRdx = Builder.CreateBinOp(
593               (Instruction::BinaryOps)Op, RdxPart, ReducedPartRdx, "bin.rdx");
594         else
595           ReducedPartRdx = createMinMaxOp(Builder, RK, ReducedPartRdx, RdxPart);
596       }
597     }
598 
599     // Create the reduction after the loop. Note that inloop reductions create
600     // the target reduction in the loop using a Reduction recipe.
601     if ((State.VF.isVector() ||
602          RecurrenceDescriptor::isAnyOfRecurrenceKind(RK)) &&
603         !PhiR->isInLoop()) {
604       ReducedPartRdx =
605           createReduction(Builder, RdxDesc, ReducedPartRdx, OrigPhi);
606       // If the reduction can be performed in a smaller type, we need to extend
607       // the reduction to the wider type before we branch to the original loop.
608       if (PhiTy != RdxDesc.getRecurrenceType())
609         ReducedPartRdx = RdxDesc.isSigned()
610                              ? Builder.CreateSExt(ReducedPartRdx, PhiTy)
611                              : Builder.CreateZExt(ReducedPartRdx, PhiTy);
612     }
613 
614     return ReducedPartRdx;
615   }
616   case VPInstruction::ExtractFromEnd: {
617     auto *CI = cast<ConstantInt>(getOperand(1)->getLiveInIRValue());
618     unsigned Offset = CI->getZExtValue();
619     assert(Offset > 0 && "Offset from end must be positive");
620     Value *Res;
621     if (State.VF.isVector()) {
622       assert(Offset <= State.VF.getKnownMinValue() &&
623              "invalid offset to extract from");
624       // Extract lane VF - Offset from the operand.
625       Res = State.get(getOperand(0), VPLane::getLaneFromEnd(State.VF, Offset));
626     } else {
627       assert(Offset <= 1 && "invalid offset to extract from");
628       Res = State.get(getOperand(0));
629     }
630     if (isa<ExtractElementInst>(Res))
631       Res->setName(Name);
632     return Res;
633   }
634   case VPInstruction::LogicalAnd: {
635     Value *A = State.get(getOperand(0));
636     Value *B = State.get(getOperand(1));
637     return Builder.CreateLogicalAnd(A, B, Name);
638   }
639   case VPInstruction::PtrAdd: {
640     assert(vputils::onlyFirstLaneUsed(this) &&
641            "can only generate first lane for PtrAdd");
642     Value *Ptr = State.get(getOperand(0), /* IsScalar */ true);
643     Value *Addend = State.get(getOperand(1), /* IsScalar */ true);
644     return isInBounds() ? Builder.CreateInBoundsPtrAdd(Ptr, Addend, Name)
645                         : Builder.CreatePtrAdd(Ptr, Addend, Name);
646   }
647   case VPInstruction::ResumePhi: {
648     Value *IncomingFromVPlanPred =
649         State.get(getOperand(0), /* IsScalar */ true);
650     Value *IncomingFromOtherPreds =
651         State.get(getOperand(1), /* IsScalar */ true);
652     auto *NewPhi =
653         Builder.CreatePHI(IncomingFromOtherPreds->getType(), 2, Name);
654     BasicBlock *VPlanPred =
655         State.CFG
656             .VPBB2IRBB[cast<VPBasicBlock>(getParent()->getSinglePredecessor())];
657     NewPhi->addIncoming(IncomingFromVPlanPred, VPlanPred);
658     for (auto *OtherPred : predecessors(Builder.GetInsertBlock())) {
659       assert(OtherPred != VPlanPred &&
660              "VPlan predecessors should not be connected yet");
661       NewPhi->addIncoming(IncomingFromOtherPreds, OtherPred);
662     }
663     return NewPhi;
664   }
665 
666   default:
667     llvm_unreachable("Unsupported opcode for instruction");
668   }
669 }
670 
671 bool VPInstruction::isVectorToScalar() const {
672   return getOpcode() == VPInstruction::ExtractFromEnd ||
673          getOpcode() == VPInstruction::ComputeReductionResult;
674 }
675 
676 bool VPInstruction::isSingleScalar() const {
677   return getOpcode() == VPInstruction::ResumePhi;
678 }
679 
680 #if !defined(NDEBUG)
681 bool VPInstruction::isFPMathOp() const {
682   // Inspired by FPMathOperator::classof. Notable differences are that we don't
683   // support Call, PHI and Select opcodes here yet.
684   return Opcode == Instruction::FAdd || Opcode == Instruction::FMul ||
685          Opcode == Instruction::FNeg || Opcode == Instruction::FSub ||
686          Opcode == Instruction::FDiv || Opcode == Instruction::FRem ||
687          Opcode == Instruction::FCmp || Opcode == Instruction::Select;
688 }
689 #endif
690 
691 void VPInstruction::execute(VPTransformState &State) {
692   assert(!State.Lane && "VPInstruction executing an Lane");
693   IRBuilderBase::FastMathFlagGuard FMFGuard(State.Builder);
694   assert((hasFastMathFlags() == isFPMathOp() ||
695           getOpcode() == Instruction::Select) &&
696          "Recipe not a FPMathOp but has fast-math flags?");
697   if (hasFastMathFlags())
698     State.Builder.setFastMathFlags(getFastMathFlags());
699   State.setDebugLocFrom(getDebugLoc());
700   bool GeneratesPerFirstLaneOnly = canGenerateScalarForFirstLane() &&
701                                    (vputils::onlyFirstLaneUsed(this) ||
702                                     isVectorToScalar() || isSingleScalar());
703   bool GeneratesPerAllLanes = doesGeneratePerAllLanes();
704   if (GeneratesPerAllLanes) {
705     for (unsigned Lane = 0, NumLanes = State.VF.getKnownMinValue();
706          Lane != NumLanes; ++Lane) {
707       Value *GeneratedValue = generatePerLane(State, VPLane(Lane));
708       assert(GeneratedValue && "generatePerLane must produce a value");
709       State.set(this, GeneratedValue, VPLane(Lane));
710     }
711     return;
712   }
713 
714   Value *GeneratedValue = generate(State);
715   if (!hasResult())
716     return;
717   assert(GeneratedValue && "generate must produce a value");
718   assert(
719       (GeneratedValue->getType()->isVectorTy() == !GeneratesPerFirstLaneOnly ||
720        State.VF.isScalar()) &&
721       "scalar value but not only first lane defined");
722   State.set(this, GeneratedValue,
723             /*IsScalar*/ GeneratesPerFirstLaneOnly);
724 }
725 
726 bool VPInstruction::onlyFirstLaneUsed(const VPValue *Op) const {
727   assert(is_contained(operands(), Op) && "Op must be an operand of the recipe");
728   if (Instruction::isBinaryOp(getOpcode()))
729     return vputils::onlyFirstLaneUsed(this);
730 
731   switch (getOpcode()) {
732   default:
733     return false;
734   case Instruction::ICmp:
735   case VPInstruction::PtrAdd:
736     // TODO: Cover additional opcodes.
737     return vputils::onlyFirstLaneUsed(this);
738   case VPInstruction::ActiveLaneMask:
739   case VPInstruction::ExplicitVectorLength:
740   case VPInstruction::CalculateTripCountMinusVF:
741   case VPInstruction::CanonicalIVIncrementForPart:
742   case VPInstruction::BranchOnCount:
743   case VPInstruction::BranchOnCond:
744   case VPInstruction::ResumePhi:
745     return true;
746   };
747   llvm_unreachable("switch should return");
748 }
749 
750 bool VPInstruction::onlyFirstPartUsed(const VPValue *Op) const {
751   assert(is_contained(operands(), Op) && "Op must be an operand of the recipe");
752   if (Instruction::isBinaryOp(getOpcode()))
753     return vputils::onlyFirstPartUsed(this);
754 
755   switch (getOpcode()) {
756   default:
757     return false;
758   case Instruction::ICmp:
759   case Instruction::Select:
760     return vputils::onlyFirstPartUsed(this);
761   case VPInstruction::BranchOnCount:
762   case VPInstruction::BranchOnCond:
763   case VPInstruction::CanonicalIVIncrementForPart:
764     return true;
765   };
766   llvm_unreachable("switch should return");
767 }
768 
769 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
770 void VPInstruction::dump() const {
771   VPSlotTracker SlotTracker(getParent()->getPlan());
772   print(dbgs(), "", SlotTracker);
773 }
774 
775 void VPInstruction::print(raw_ostream &O, const Twine &Indent,
776                           VPSlotTracker &SlotTracker) const {
777   O << Indent << "EMIT ";
778 
779   if (hasResult()) {
780     printAsOperand(O, SlotTracker);
781     O << " = ";
782   }
783 
784   switch (getOpcode()) {
785   case VPInstruction::Not:
786     O << "not";
787     break;
788   case VPInstruction::SLPLoad:
789     O << "combined load";
790     break;
791   case VPInstruction::SLPStore:
792     O << "combined store";
793     break;
794   case VPInstruction::ActiveLaneMask:
795     O << "active lane mask";
796     break;
797   case VPInstruction::ResumePhi:
798     O << "resume-phi";
799     break;
800   case VPInstruction::ExplicitVectorLength:
801     O << "EXPLICIT-VECTOR-LENGTH";
802     break;
803   case VPInstruction::FirstOrderRecurrenceSplice:
804     O << "first-order splice";
805     break;
806   case VPInstruction::BranchOnCond:
807     O << "branch-on-cond";
808     break;
809   case VPInstruction::CalculateTripCountMinusVF:
810     O << "TC > VF ? TC - VF : 0";
811     break;
812   case VPInstruction::CanonicalIVIncrementForPart:
813     O << "VF * Part +";
814     break;
815   case VPInstruction::BranchOnCount:
816     O << "branch-on-count";
817     break;
818   case VPInstruction::ExtractFromEnd:
819     O << "extract-from-end";
820     break;
821   case VPInstruction::ComputeReductionResult:
822     O << "compute-reduction-result";
823     break;
824   case VPInstruction::LogicalAnd:
825     O << "logical-and";
826     break;
827   case VPInstruction::PtrAdd:
828     O << "ptradd";
829     break;
830   default:
831     O << Instruction::getOpcodeName(getOpcode());
832   }
833 
834   printFlags(O);
835   printOperands(O, SlotTracker);
836 
837   if (auto DL = getDebugLoc()) {
838     O << ", !dbg ";
839     DL.print(O);
840   }
841 }
842 #endif
843 
844 void VPIRInstruction::execute(VPTransformState &State) {
845   assert((isa<PHINode>(&I) || getNumOperands() == 0) &&
846          "Only PHINodes can have extra operands");
847   if (getNumOperands() == 1) {
848     VPValue *ExitValue = getOperand(0);
849     auto Lane = vputils::isUniformAfterVectorization(ExitValue)
850                     ? VPLane::getFirstLane()
851                     : VPLane::getLastLaneForVF(State.VF);
852     auto *PredVPBB = cast<VPBasicBlock>(getParent()->getSinglePredecessor());
853     BasicBlock *PredBB = State.CFG.VPBB2IRBB[PredVPBB];
854     // Set insertion point in PredBB in case an extract needs to be generated.
855     // TODO: Model extracts explicitly.
856     State.Builder.SetInsertPoint(PredBB, PredBB->getFirstNonPHIIt());
857     Value *V = State.get(ExitValue, VPLane(Lane));
858     auto *Phi = cast<PHINode>(&I);
859     Phi->addIncoming(V, PredBB);
860   }
861 
862   // Advance the insert point after the wrapped IR instruction. This allows
863   // interleaving VPIRInstructions and other recipes.
864   State.Builder.SetInsertPoint(I.getParent(), std::next(I.getIterator()));
865 }
866 
867 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
868 void VPIRInstruction::print(raw_ostream &O, const Twine &Indent,
869                             VPSlotTracker &SlotTracker) const {
870   O << Indent << "IR " << I;
871 
872   if (getNumOperands() != 0) {
873     assert(getNumOperands() == 1 && "can have at most 1 operand");
874     O << " (extra operand: ";
875     printOperands(O, SlotTracker);
876     O << ")";
877   }
878 }
879 #endif
880 
881 void VPWidenCallRecipe::execute(VPTransformState &State) {
882   assert(State.VF.isVector() && "not widening");
883   Function *CalledScalarFn = getCalledScalarFunction();
884   assert(!isDbgInfoIntrinsic(CalledScalarFn->getIntrinsicID()) &&
885          "DbgInfoIntrinsic should have been dropped during VPlan construction");
886   State.setDebugLocFrom(getDebugLoc());
887 
888   bool UseIntrinsic = VectorIntrinsicID != Intrinsic::not_intrinsic;
889   FunctionType *VFTy = nullptr;
890   if (Variant)
891     VFTy = Variant->getFunctionType();
892   SmallVector<Type *, 2> TysForDecl;
893   // Add return type if intrinsic is overloaded on it.
894   if (UseIntrinsic &&
895       isVectorIntrinsicWithOverloadTypeAtArg(VectorIntrinsicID, -1))
896     TysForDecl.push_back(VectorType::get(
897         CalledScalarFn->getReturnType()->getScalarType(), State.VF));
898   SmallVector<Value *, 4> Args;
899   for (const auto &I : enumerate(arg_operands())) {
900     // Some intrinsics have a scalar argument - don't replace it with a
901     // vector.
902     Value *Arg;
903     if (UseIntrinsic &&
904         isVectorIntrinsicWithScalarOpAtArg(VectorIntrinsicID, I.index()))
905       Arg = State.get(I.value(), VPLane(0));
906     // Some vectorized function variants may also take a scalar argument,
907     // e.g. linear parameters for pointers. This needs to be the scalar value
908     // from the start of the respective part when interleaving.
909     else if (VFTy && !VFTy->getParamType(I.index())->isVectorTy())
910       Arg = State.get(I.value(), VPLane(0));
911     else
912       Arg = State.get(I.value());
913     if (UseIntrinsic &&
914         isVectorIntrinsicWithOverloadTypeAtArg(VectorIntrinsicID, I.index()))
915       TysForDecl.push_back(Arg->getType());
916     Args.push_back(Arg);
917   }
918 
919   Function *VectorF;
920   if (UseIntrinsic) {
921     // Use vector version of the intrinsic.
922     Module *M = State.Builder.GetInsertBlock()->getModule();
923     VectorF = Intrinsic::getDeclaration(M, VectorIntrinsicID, TysForDecl);
924     assert(VectorF && "Can't retrieve vector intrinsic.");
925   } else {
926 #ifndef NDEBUG
927     assert(Variant != nullptr && "Can't create vector function.");
928 #endif
929     VectorF = Variant;
930   }
931 
932   auto *CI = cast_or_null<CallInst>(getUnderlyingInstr());
933   SmallVector<OperandBundleDef, 1> OpBundles;
934   if (CI)
935     CI->getOperandBundlesAsDefs(OpBundles);
936 
937   CallInst *V = State.Builder.CreateCall(VectorF, Args, OpBundles);
938 
939   setFlags(V);
940 
941   if (!V->getType()->isVoidTy())
942     State.set(this, V);
943   State.addMetadata(V, CI);
944 }
945 
946 InstructionCost VPWidenCallRecipe::computeCost(ElementCount VF,
947                                                VPCostContext &Ctx) const {
948   TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
949   if (Variant) {
950     return Ctx.TTI.getCallInstrCost(nullptr, Variant->getReturnType(),
951                                     Variant->getFunctionType()->params(),
952                                     CostKind);
953   }
954 
955   // Some backends analyze intrinsic arguments to determine cost. Use the
956   // underlying value for the operand if it has one. Otherwise try to use the
957   // operand of the underlying call instruction, if there is one. Otherwise
958   // clear Arguments.
959   // TODO: Rework TTI interface to be independent of concrete IR values.
960   SmallVector<const Value *> Arguments;
961   for (const auto &[Idx, Op] : enumerate(operands())) {
962     auto *V = Op->getUnderlyingValue();
963     if (!V) {
964       if (auto *UI = dyn_cast_or_null<CallBase>(getUnderlyingValue())) {
965         Arguments.push_back(UI->getArgOperand(Idx));
966         continue;
967       }
968       Arguments.clear();
969       break;
970     }
971     Arguments.push_back(V);
972   }
973 
974   Type *RetTy = ToVectorTy(Ctx.Types.inferScalarType(this), VF);
975   SmallVector<Type *> ParamTys;
976   for (unsigned I = 0; I != getNumOperands(); ++I)
977     ParamTys.push_back(
978         ToVectorTy(Ctx.Types.inferScalarType(getOperand(I)), VF));
979 
980   // TODO: Rework TTI interface to avoid reliance on underlying IntrinsicInst.
981   FastMathFlags FMF = hasFastMathFlags() ? getFastMathFlags() : FastMathFlags();
982   IntrinsicCostAttributes CostAttrs(
983       VectorIntrinsicID, RetTy, Arguments, ParamTys, FMF,
984       dyn_cast_or_null<IntrinsicInst>(getUnderlyingValue()));
985   return Ctx.TTI.getIntrinsicInstrCost(CostAttrs, CostKind);
986 }
987 
988 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
989 void VPWidenCallRecipe::print(raw_ostream &O, const Twine &Indent,
990                               VPSlotTracker &SlotTracker) const {
991   O << Indent << "WIDEN-CALL ";
992 
993   Function *CalledFn = getCalledScalarFunction();
994   if (CalledFn->getReturnType()->isVoidTy())
995     O << "void ";
996   else {
997     printAsOperand(O, SlotTracker);
998     O << " = ";
999   }
1000 
1001   O << "call";
1002   printFlags(O);
1003   O << "  @" << CalledFn->getName() << "(";
1004   interleaveComma(arg_operands(), O, [&O, &SlotTracker](VPValue *Op) {
1005     Op->printAsOperand(O, SlotTracker);
1006   });
1007   O << ")";
1008 
1009   if (VectorIntrinsicID)
1010     O << " (using vector intrinsic)";
1011   else {
1012     O << " (using library function";
1013     if (Variant->hasName())
1014       O << ": " << Variant->getName();
1015     O << ")";
1016   }
1017 }
1018 #endif
1019 
1020 void VPHistogramRecipe::execute(VPTransformState &State) {
1021   State.setDebugLocFrom(getDebugLoc());
1022   IRBuilderBase &Builder = State.Builder;
1023 
1024   Value *Address = State.get(getOperand(0));
1025   Value *IncAmt = State.get(getOperand(1), /*IsScalar=*/true);
1026   VectorType *VTy = cast<VectorType>(Address->getType());
1027 
1028   // The histogram intrinsic requires a mask even if the recipe doesn't;
1029   // if the mask operand was omitted then all lanes should be executed and
1030   // we just need to synthesize an all-true mask.
1031   Value *Mask = nullptr;
1032   if (VPValue *VPMask = getMask())
1033     Mask = State.get(VPMask);
1034   else
1035     Mask =
1036         Builder.CreateVectorSplat(VTy->getElementCount(), Builder.getInt1(1));
1037 
1038   // If this is a subtract, we want to invert the increment amount. We may
1039   // add a separate intrinsic in future, but for now we'll try this.
1040   if (Opcode == Instruction::Sub)
1041     IncAmt = Builder.CreateNeg(IncAmt);
1042   else
1043     assert(Opcode == Instruction::Add && "only add or sub supported for now");
1044 
1045   State.Builder.CreateIntrinsic(Intrinsic::experimental_vector_histogram_add,
1046                                 {VTy, IncAmt->getType()},
1047                                 {Address, IncAmt, Mask});
1048 }
1049 
1050 InstructionCost VPHistogramRecipe::computeCost(ElementCount VF,
1051                                                VPCostContext &Ctx) const {
1052   // FIXME: Take the gather and scatter into account as well. For now we're
1053   //        generating the same cost as the fallback path, but we'll likely
1054   //        need to create a new TTI method for determining the cost, including
1055   //        whether we can use base + vec-of-smaller-indices or just
1056   //        vec-of-pointers.
1057   assert(VF.isVector() && "Invalid VF for histogram cost");
1058   Type *AddressTy = Ctx.Types.inferScalarType(getOperand(0));
1059   VPValue *IncAmt = getOperand(1);
1060   Type *IncTy = Ctx.Types.inferScalarType(IncAmt);
1061   VectorType *VTy = VectorType::get(IncTy, VF);
1062 
1063   // Assume that a non-constant update value (or a constant != 1) requires
1064   // a multiply, and add that into the cost.
1065   InstructionCost MulCost =
1066       Ctx.TTI.getArithmeticInstrCost(Instruction::Mul, VTy);
1067   if (IncAmt->isLiveIn()) {
1068     ConstantInt *CI = dyn_cast<ConstantInt>(IncAmt->getLiveInIRValue());
1069 
1070     if (CI && CI->getZExtValue() == 1)
1071       MulCost = TTI::TCC_Free;
1072   }
1073 
1074   // Find the cost of the histogram operation itself.
1075   Type *PtrTy = VectorType::get(AddressTy, VF);
1076   Type *MaskTy = VectorType::get(Type::getInt1Ty(Ctx.LLVMCtx), VF);
1077   IntrinsicCostAttributes ICA(Intrinsic::experimental_vector_histogram_add,
1078                               Type::getVoidTy(Ctx.LLVMCtx),
1079                               {PtrTy, IncTy, MaskTy});
1080 
1081   // Add the costs together with the add/sub operation.
1082   return Ctx.TTI.getIntrinsicInstrCost(
1083              ICA, TargetTransformInfo::TCK_RecipThroughput) +
1084          MulCost + Ctx.TTI.getArithmeticInstrCost(Opcode, VTy);
1085 }
1086 
1087 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1088 void VPHistogramRecipe::print(raw_ostream &O, const Twine &Indent,
1089                               VPSlotTracker &SlotTracker) const {
1090   O << Indent << "WIDEN-HISTOGRAM buckets: ";
1091   getOperand(0)->printAsOperand(O, SlotTracker);
1092 
1093   if (Opcode == Instruction::Sub)
1094     O << ", dec: ";
1095   else {
1096     assert(Opcode == Instruction::Add);
1097     O << ", inc: ";
1098   }
1099   getOperand(1)->printAsOperand(O, SlotTracker);
1100 
1101   if (VPValue *Mask = getMask()) {
1102     O << ", mask: ";
1103     Mask->printAsOperand(O, SlotTracker);
1104   }
1105 }
1106 
1107 void VPWidenSelectRecipe::print(raw_ostream &O, const Twine &Indent,
1108                                 VPSlotTracker &SlotTracker) const {
1109   O << Indent << "WIDEN-SELECT ";
1110   printAsOperand(O, SlotTracker);
1111   O << " = select ";
1112   getOperand(0)->printAsOperand(O, SlotTracker);
1113   O << ", ";
1114   getOperand(1)->printAsOperand(O, SlotTracker);
1115   O << ", ";
1116   getOperand(2)->printAsOperand(O, SlotTracker);
1117   O << (isInvariantCond() ? " (condition is loop invariant)" : "");
1118 }
1119 #endif
1120 
1121 void VPWidenSelectRecipe::execute(VPTransformState &State) {
1122   State.setDebugLocFrom(getDebugLoc());
1123 
1124   // The condition can be loop invariant but still defined inside the
1125   // loop. This means that we can't just use the original 'cond' value.
1126   // We have to take the 'vectorized' value and pick the first lane.
1127   // Instcombine will make this a no-op.
1128   auto *InvarCond =
1129       isInvariantCond() ? State.get(getCond(), VPLane(0)) : nullptr;
1130 
1131   Value *Cond = InvarCond ? InvarCond : State.get(getCond());
1132   Value *Op0 = State.get(getOperand(1));
1133   Value *Op1 = State.get(getOperand(2));
1134   Value *Sel = State.Builder.CreateSelect(Cond, Op0, Op1);
1135   State.set(this, Sel);
1136   State.addMetadata(Sel, dyn_cast_or_null<Instruction>(getUnderlyingValue()));
1137 }
1138 
1139 VPRecipeWithIRFlags::FastMathFlagsTy::FastMathFlagsTy(
1140     const FastMathFlags &FMF) {
1141   AllowReassoc = FMF.allowReassoc();
1142   NoNaNs = FMF.noNaNs();
1143   NoInfs = FMF.noInfs();
1144   NoSignedZeros = FMF.noSignedZeros();
1145   AllowReciprocal = FMF.allowReciprocal();
1146   AllowContract = FMF.allowContract();
1147   ApproxFunc = FMF.approxFunc();
1148 }
1149 
1150 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1151 void VPRecipeWithIRFlags::printFlags(raw_ostream &O) const {
1152   switch (OpType) {
1153   case OperationType::Cmp:
1154     O << " " << CmpInst::getPredicateName(getPredicate());
1155     break;
1156   case OperationType::DisjointOp:
1157     if (DisjointFlags.IsDisjoint)
1158       O << " disjoint";
1159     break;
1160   case OperationType::PossiblyExactOp:
1161     if (ExactFlags.IsExact)
1162       O << " exact";
1163     break;
1164   case OperationType::OverflowingBinOp:
1165     if (WrapFlags.HasNUW)
1166       O << " nuw";
1167     if (WrapFlags.HasNSW)
1168       O << " nsw";
1169     break;
1170   case OperationType::FPMathOp:
1171     getFastMathFlags().print(O);
1172     break;
1173   case OperationType::GEPOp:
1174     if (GEPFlags.IsInBounds)
1175       O << " inbounds";
1176     break;
1177   case OperationType::NonNegOp:
1178     if (NonNegFlags.NonNeg)
1179       O << " nneg";
1180     break;
1181   case OperationType::Other:
1182     break;
1183   }
1184   if (getNumOperands() > 0)
1185     O << " ";
1186 }
1187 #endif
1188 
1189 void VPWidenRecipe::execute(VPTransformState &State) {
1190   State.setDebugLocFrom(getDebugLoc());
1191   auto &Builder = State.Builder;
1192   switch (Opcode) {
1193   case Instruction::Call:
1194   case Instruction::Br:
1195   case Instruction::PHI:
1196   case Instruction::GetElementPtr:
1197   case Instruction::Select:
1198     llvm_unreachable("This instruction is handled by a different recipe.");
1199   case Instruction::UDiv:
1200   case Instruction::SDiv:
1201   case Instruction::SRem:
1202   case Instruction::URem:
1203   case Instruction::Add:
1204   case Instruction::FAdd:
1205   case Instruction::Sub:
1206   case Instruction::FSub:
1207   case Instruction::FNeg:
1208   case Instruction::Mul:
1209   case Instruction::FMul:
1210   case Instruction::FDiv:
1211   case Instruction::FRem:
1212   case Instruction::Shl:
1213   case Instruction::LShr:
1214   case Instruction::AShr:
1215   case Instruction::And:
1216   case Instruction::Or:
1217   case Instruction::Xor: {
1218     // Just widen unops and binops.
1219     SmallVector<Value *, 2> Ops;
1220     for (VPValue *VPOp : operands())
1221       Ops.push_back(State.get(VPOp));
1222 
1223     Value *V = Builder.CreateNAryOp(Opcode, Ops);
1224 
1225     if (auto *VecOp = dyn_cast<Instruction>(V))
1226       setFlags(VecOp);
1227 
1228     // Use this vector value for all users of the original instruction.
1229     State.set(this, V);
1230     State.addMetadata(V, dyn_cast_or_null<Instruction>(getUnderlyingValue()));
1231     break;
1232   }
1233   case Instruction::Freeze: {
1234     Value *Op = State.get(getOperand(0));
1235 
1236     Value *Freeze = Builder.CreateFreeze(Op);
1237     State.set(this, Freeze);
1238     break;
1239   }
1240   case Instruction::ICmp:
1241   case Instruction::FCmp: {
1242     // Widen compares. Generate vector compares.
1243     bool FCmp = Opcode == Instruction::FCmp;
1244     Value *A = State.get(getOperand(0));
1245     Value *B = State.get(getOperand(1));
1246     Value *C = nullptr;
1247     if (FCmp) {
1248       // Propagate fast math flags.
1249       IRBuilder<>::FastMathFlagGuard FMFG(Builder);
1250       if (auto *I = dyn_cast_or_null<Instruction>(getUnderlyingValue()))
1251         Builder.setFastMathFlags(I->getFastMathFlags());
1252       C = Builder.CreateFCmp(getPredicate(), A, B);
1253     } else {
1254       C = Builder.CreateICmp(getPredicate(), A, B);
1255     }
1256     State.set(this, C);
1257     State.addMetadata(C, dyn_cast_or_null<Instruction>(getUnderlyingValue()));
1258     break;
1259   }
1260   default:
1261     // This instruction is not vectorized by simple widening.
1262     LLVM_DEBUG(dbgs() << "LV: Found an unhandled opcode : "
1263                       << Instruction::getOpcodeName(Opcode));
1264     llvm_unreachable("Unhandled instruction!");
1265   } // end of switch.
1266 
1267 #if !defined(NDEBUG)
1268   // Verify that VPlan type inference results agree with the type of the
1269   // generated values.
1270   assert(VectorType::get(State.TypeAnalysis.inferScalarType(this), State.VF) ==
1271              State.get(this)->getType() &&
1272          "inferred type and type from generated instructions do not match");
1273 #endif
1274 }
1275 
1276 InstructionCost VPWidenRecipe::computeCost(ElementCount VF,
1277                                            VPCostContext &Ctx) const {
1278   TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
1279   switch (Opcode) {
1280   case Instruction::FNeg: {
1281     Type *VectorTy = ToVectorTy(Ctx.Types.inferScalarType(this), VF);
1282     return Ctx.TTI.getArithmeticInstrCost(
1283         Opcode, VectorTy, CostKind,
1284         {TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None},
1285         {TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None});
1286   }
1287 
1288   case Instruction::UDiv:
1289   case Instruction::SDiv:
1290   case Instruction::SRem:
1291   case Instruction::URem:
1292     // More complex computation, let the legacy cost-model handle this for now.
1293     return Ctx.getLegacyCost(cast<Instruction>(getUnderlyingValue()), VF);
1294   case Instruction::Add:
1295   case Instruction::FAdd:
1296   case Instruction::Sub:
1297   case Instruction::FSub:
1298   case Instruction::Mul:
1299   case Instruction::FMul:
1300   case Instruction::FDiv:
1301   case Instruction::FRem:
1302   case Instruction::Shl:
1303   case Instruction::LShr:
1304   case Instruction::AShr:
1305   case Instruction::And:
1306   case Instruction::Or:
1307   case Instruction::Xor: {
1308     VPValue *RHS = getOperand(1);
1309     // Certain instructions can be cheaper to vectorize if they have a constant
1310     // second vector operand. One example of this are shifts on x86.
1311     TargetTransformInfo::OperandValueInfo RHSInfo = {
1312         TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None};
1313     if (RHS->isLiveIn())
1314       RHSInfo = Ctx.TTI.getOperandInfo(RHS->getLiveInIRValue());
1315 
1316     if (RHSInfo.Kind == TargetTransformInfo::OK_AnyValue &&
1317         getOperand(1)->isDefinedOutsideLoopRegions())
1318       RHSInfo.Kind = TargetTransformInfo::OK_UniformValue;
1319     Type *VectorTy = ToVectorTy(Ctx.Types.inferScalarType(this), VF);
1320     Instruction *CtxI = dyn_cast_or_null<Instruction>(getUnderlyingValue());
1321 
1322     SmallVector<const Value *, 4> Operands;
1323     if (CtxI)
1324       Operands.append(CtxI->value_op_begin(), CtxI->value_op_end());
1325     return Ctx.TTI.getArithmeticInstrCost(
1326         Opcode, VectorTy, CostKind,
1327         {TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None},
1328         RHSInfo, Operands, CtxI, &Ctx.TLI);
1329   }
1330   case Instruction::Freeze: {
1331     // This opcode is unknown. Assume that it is the same as 'mul'.
1332     Type *VectorTy = ToVectorTy(Ctx.Types.inferScalarType(this), VF);
1333     return Ctx.TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind);
1334   }
1335   case Instruction::ICmp:
1336   case Instruction::FCmp: {
1337     Instruction *CtxI = dyn_cast_or_null<Instruction>(getUnderlyingValue());
1338     Type *VectorTy = ToVectorTy(Ctx.Types.inferScalarType(getOperand(0)), VF);
1339     return Ctx.TTI.getCmpSelInstrCost(Opcode, VectorTy, nullptr, getPredicate(),
1340                                       CostKind,
1341                                       {TTI::OK_AnyValue, TTI::OP_None},
1342                                       {TTI::OK_AnyValue, TTI::OP_None}, CtxI);
1343   }
1344   default:
1345     llvm_unreachable("Unsupported opcode for instruction");
1346   }
1347 }
1348 
1349 void VPWidenEVLRecipe::execute(VPTransformState &State) {
1350   unsigned Opcode = getOpcode();
1351   // TODO: Support other opcodes
1352   if (!Instruction::isBinaryOp(Opcode) && !Instruction::isUnaryOp(Opcode))
1353     llvm_unreachable("Unsupported opcode in VPWidenEVLRecipe::execute");
1354 
1355   State.setDebugLocFrom(getDebugLoc());
1356 
1357   assert(State.get(getOperand(0))->getType()->isVectorTy() &&
1358          "VPWidenEVLRecipe should not be used for scalars");
1359 
1360   VPValue *EVL = getEVL();
1361   Value *EVLArg = State.get(EVL, /*NeedsScalar=*/true);
1362   IRBuilderBase &BuilderIR = State.Builder;
1363   VectorBuilder Builder(BuilderIR);
1364   Value *Mask = BuilderIR.CreateVectorSplat(State.VF, BuilderIR.getTrue());
1365 
1366   SmallVector<Value *, 4> Ops;
1367   for (unsigned I = 0, E = getNumOperands() - 1; I < E; ++I) {
1368     VPValue *VPOp = getOperand(I);
1369     Ops.push_back(State.get(VPOp));
1370   }
1371 
1372   Builder.setMask(Mask).setEVL(EVLArg);
1373   Value *VPInst =
1374       Builder.createVectorInstruction(Opcode, Ops[0]->getType(), Ops, "vp.op");
1375   // Currently vp-intrinsics only accept FMF flags.
1376   // TODO: Enable other flags when support is added.
1377   if (isa<FPMathOperator>(VPInst))
1378     setFlags(cast<Instruction>(VPInst));
1379 
1380   State.set(this, VPInst);
1381   State.addMetadata(VPInst,
1382                     dyn_cast_or_null<Instruction>(getUnderlyingValue()));
1383 }
1384 
1385 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1386 void VPWidenRecipe::print(raw_ostream &O, const Twine &Indent,
1387                           VPSlotTracker &SlotTracker) const {
1388   O << Indent << "WIDEN ";
1389   printAsOperand(O, SlotTracker);
1390   O << " = " << Instruction::getOpcodeName(Opcode);
1391   printFlags(O);
1392   printOperands(O, SlotTracker);
1393 }
1394 
1395 void VPWidenEVLRecipe::print(raw_ostream &O, const Twine &Indent,
1396                              VPSlotTracker &SlotTracker) const {
1397   O << Indent << "WIDEN ";
1398   printAsOperand(O, SlotTracker);
1399   O << " = vp." << Instruction::getOpcodeName(getOpcode());
1400   printFlags(O);
1401   printOperands(O, SlotTracker);
1402 }
1403 #endif
1404 
1405 void VPWidenCastRecipe::execute(VPTransformState &State) {
1406   State.setDebugLocFrom(getDebugLoc());
1407   auto &Builder = State.Builder;
1408   /// Vectorize casts.
1409   assert(State.VF.isVector() && "Not vectorizing?");
1410   Type *DestTy = VectorType::get(getResultType(), State.VF);
1411   VPValue *Op = getOperand(0);
1412   Value *A = State.get(Op);
1413   Value *Cast = Builder.CreateCast(Instruction::CastOps(Opcode), A, DestTy);
1414   State.set(this, Cast);
1415   State.addMetadata(Cast, cast_or_null<Instruction>(getUnderlyingValue()));
1416 }
1417 
1418 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1419 void VPWidenCastRecipe::print(raw_ostream &O, const Twine &Indent,
1420                               VPSlotTracker &SlotTracker) const {
1421   O << Indent << "WIDEN-CAST ";
1422   printAsOperand(O, SlotTracker);
1423   O << " = " << Instruction::getOpcodeName(Opcode) << " ";
1424   printFlags(O);
1425   printOperands(O, SlotTracker);
1426   O << " to " << *getResultType();
1427 }
1428 #endif
1429 
1430 /// This function adds
1431 /// (StartIdx * Step, (StartIdx + 1) * Step, (StartIdx + 2) * Step, ...)
1432 /// to each vector element of Val. The sequence starts at StartIndex.
1433 /// \p Opcode is relevant for FP induction variable.
1434 static Value *getStepVector(Value *Val, Value *StartIdx, Value *Step,
1435                             Instruction::BinaryOps BinOp, ElementCount VF,
1436                             IRBuilderBase &Builder) {
1437   assert(VF.isVector() && "only vector VFs are supported");
1438 
1439   // Create and check the types.
1440   auto *ValVTy = cast<VectorType>(Val->getType());
1441   ElementCount VLen = ValVTy->getElementCount();
1442 
1443   Type *STy = Val->getType()->getScalarType();
1444   assert((STy->isIntegerTy() || STy->isFloatingPointTy()) &&
1445          "Induction Step must be an integer or FP");
1446   assert(Step->getType() == STy && "Step has wrong type");
1447 
1448   SmallVector<Constant *, 8> Indices;
1449 
1450   // Create a vector of consecutive numbers from zero to VF.
1451   VectorType *InitVecValVTy = ValVTy;
1452   if (STy->isFloatingPointTy()) {
1453     Type *InitVecValSTy =
1454         IntegerType::get(STy->getContext(), STy->getScalarSizeInBits());
1455     InitVecValVTy = VectorType::get(InitVecValSTy, VLen);
1456   }
1457   Value *InitVec = Builder.CreateStepVector(InitVecValVTy);
1458 
1459   // Splat the StartIdx
1460   Value *StartIdxSplat = Builder.CreateVectorSplat(VLen, StartIdx);
1461 
1462   if (STy->isIntegerTy()) {
1463     InitVec = Builder.CreateAdd(InitVec, StartIdxSplat);
1464     Step = Builder.CreateVectorSplat(VLen, Step);
1465     assert(Step->getType() == Val->getType() && "Invalid step vec");
1466     // FIXME: The newly created binary instructions should contain nsw/nuw
1467     // flags, which can be found from the original scalar operations.
1468     Step = Builder.CreateMul(InitVec, Step);
1469     return Builder.CreateAdd(Val, Step, "induction");
1470   }
1471 
1472   // Floating point induction.
1473   assert((BinOp == Instruction::FAdd || BinOp == Instruction::FSub) &&
1474          "Binary Opcode should be specified for FP induction");
1475   InitVec = Builder.CreateUIToFP(InitVec, ValVTy);
1476   InitVec = Builder.CreateFAdd(InitVec, StartIdxSplat);
1477 
1478   Step = Builder.CreateVectorSplat(VLen, Step);
1479   Value *MulOp = Builder.CreateFMul(InitVec, Step);
1480   return Builder.CreateBinOp(BinOp, Val, MulOp, "induction");
1481 }
1482 
1483 /// A helper function that returns an integer or floating-point constant with
1484 /// value C.
1485 static Constant *getSignedIntOrFpConstant(Type *Ty, int64_t C) {
1486   return Ty->isIntegerTy() ? ConstantInt::getSigned(Ty, C)
1487                            : ConstantFP::get(Ty, C);
1488 }
1489 
1490 void VPWidenIntOrFpInductionRecipe::execute(VPTransformState &State) {
1491   assert(!State.Lane && "Int or FP induction being replicated.");
1492 
1493   Value *Start = getStartValue()->getLiveInIRValue();
1494   const InductionDescriptor &ID = getInductionDescriptor();
1495   TruncInst *Trunc = getTruncInst();
1496   IRBuilderBase &Builder = State.Builder;
1497   assert(IV->getType() == ID.getStartValue()->getType() && "Types must match");
1498   assert(State.VF.isVector() && "must have vector VF");
1499 
1500   // The value from the original loop to which we are mapping the new induction
1501   // variable.
1502   Instruction *EntryVal = Trunc ? cast<Instruction>(Trunc) : IV;
1503 
1504   // Fast-math-flags propagate from the original induction instruction.
1505   IRBuilder<>::FastMathFlagGuard FMFG(Builder);
1506   if (ID.getInductionBinOp() && isa<FPMathOperator>(ID.getInductionBinOp()))
1507     Builder.setFastMathFlags(ID.getInductionBinOp()->getFastMathFlags());
1508 
1509   // Now do the actual transformations, and start with fetching the step value.
1510   Value *Step = State.get(getStepValue(), VPLane(0));
1511 
1512   assert((isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) &&
1513          "Expected either an induction phi-node or a truncate of it!");
1514 
1515   // Construct the initial value of the vector IV in the vector loop preheader
1516   auto CurrIP = Builder.saveIP();
1517   BasicBlock *VectorPH = State.CFG.getPreheaderBBFor(this);
1518   Builder.SetInsertPoint(VectorPH->getTerminator());
1519   if (isa<TruncInst>(EntryVal)) {
1520     assert(Start->getType()->isIntegerTy() &&
1521            "Truncation requires an integer type");
1522     auto *TruncType = cast<IntegerType>(EntryVal->getType());
1523     Step = Builder.CreateTrunc(Step, TruncType);
1524     Start = Builder.CreateCast(Instruction::Trunc, Start, TruncType);
1525   }
1526 
1527   Value *Zero = getSignedIntOrFpConstant(Start->getType(), 0);
1528   Value *SplatStart = Builder.CreateVectorSplat(State.VF, Start);
1529   Value *SteppedStart = getStepVector(
1530       SplatStart, Zero, Step, ID.getInductionOpcode(), State.VF, State.Builder);
1531 
1532   // We create vector phi nodes for both integer and floating-point induction
1533   // variables. Here, we determine the kind of arithmetic we will perform.
1534   Instruction::BinaryOps AddOp;
1535   Instruction::BinaryOps MulOp;
1536   if (Step->getType()->isIntegerTy()) {
1537     AddOp = Instruction::Add;
1538     MulOp = Instruction::Mul;
1539   } else {
1540     AddOp = ID.getInductionOpcode();
1541     MulOp = Instruction::FMul;
1542   }
1543 
1544   Value *SplatVF;
1545   if (VPValue *SplatVFOperand = getSplatVFValue()) {
1546     // The recipe has been unrolled. In that case, fetch the splat value for the
1547     // induction increment.
1548     SplatVF = State.get(SplatVFOperand);
1549   } else {
1550     // Multiply the vectorization factor by the step using integer or
1551     // floating-point arithmetic as appropriate.
1552     Type *StepType = Step->getType();
1553     Value *RuntimeVF = State.get(getVFValue(), VPLane(0));
1554     if (Step->getType()->isFloatingPointTy())
1555       RuntimeVF = Builder.CreateUIToFP(RuntimeVF, StepType);
1556     else
1557       RuntimeVF = Builder.CreateZExtOrTrunc(RuntimeVF, StepType);
1558     Value *Mul = Builder.CreateBinOp(MulOp, Step, RuntimeVF);
1559 
1560     // Create a vector splat to use in the induction update.
1561     //
1562     // FIXME: If the step is non-constant, we create the vector splat with
1563     //        IRBuilder. IRBuilder can constant-fold the multiply, but it
1564     //        doesn't handle a constant vector splat.
1565     SplatVF = isa<Constant>(Mul)
1566                   ? ConstantVector::getSplat(State.VF, cast<Constant>(Mul))
1567                   : Builder.CreateVectorSplat(State.VF, Mul);
1568   }
1569 
1570   Builder.restoreIP(CurrIP);
1571 
1572   // We may need to add the step a number of times, depending on the unroll
1573   // factor. The last of those goes into the PHI.
1574   PHINode *VecInd = PHINode::Create(SteppedStart->getType(), 2, "vec.ind");
1575   VecInd->insertBefore(State.CFG.PrevBB->getFirstInsertionPt());
1576   VecInd->setDebugLoc(EntryVal->getDebugLoc());
1577   State.set(this, VecInd);
1578 
1579   Instruction *LastInduction = cast<Instruction>(
1580       Builder.CreateBinOp(AddOp, VecInd, SplatVF, "vec.ind.next"));
1581   if (isa<TruncInst>(EntryVal))
1582     State.addMetadata(LastInduction, EntryVal);
1583   LastInduction->setDebugLoc(EntryVal->getDebugLoc());
1584 
1585   VecInd->addIncoming(SteppedStart, VectorPH);
1586   // Add induction update using an incorrect block temporarily. The phi node
1587   // will be fixed after VPlan execution. Note that at this point the latch
1588   // block cannot be used, as it does not exist yet.
1589   // TODO: Model increment value in VPlan, by turning the recipe into a
1590   // multi-def and a subclass of VPHeaderPHIRecipe.
1591   VecInd->addIncoming(LastInduction, VectorPH);
1592 }
1593 
1594 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1595 void VPWidenIntOrFpInductionRecipe::print(raw_ostream &O, const Twine &Indent,
1596                                           VPSlotTracker &SlotTracker) const {
1597   O << Indent << "WIDEN-INDUCTION";
1598   if (getTruncInst()) {
1599     O << "\\l\"";
1600     O << " +\n" << Indent << "\"  " << VPlanIngredient(IV) << "\\l\"";
1601     O << " +\n" << Indent << "\"  ";
1602     getVPValue(0)->printAsOperand(O, SlotTracker);
1603   } else
1604     O << " " << VPlanIngredient(IV);
1605 
1606   O << ", ";
1607   getStepValue()->printAsOperand(O, SlotTracker);
1608 
1609   O << ", ";
1610   getVFValue()->printAsOperand(O, SlotTracker);
1611 }
1612 #endif
1613 
1614 bool VPWidenIntOrFpInductionRecipe::isCanonical() const {
1615   // The step may be defined by a recipe in the preheader (e.g. if it requires
1616   // SCEV expansion), but for the canonical induction the step is required to be
1617   // 1, which is represented as live-in.
1618   if (getStepValue()->getDefiningRecipe())
1619     return false;
1620   auto *StepC = dyn_cast<ConstantInt>(getStepValue()->getLiveInIRValue());
1621   auto *StartC = dyn_cast<ConstantInt>(getStartValue()->getLiveInIRValue());
1622   auto *CanIV = cast<VPCanonicalIVPHIRecipe>(&*getParent()->begin());
1623   return StartC && StartC->isZero() && StepC && StepC->isOne() &&
1624          getScalarType() == CanIV->getScalarType();
1625 }
1626 
1627 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1628 void VPDerivedIVRecipe::print(raw_ostream &O, const Twine &Indent,
1629                               VPSlotTracker &SlotTracker) const {
1630   O << Indent;
1631   printAsOperand(O, SlotTracker);
1632   O << " = DERIVED-IV ";
1633   getStartValue()->printAsOperand(O, SlotTracker);
1634   O << " + ";
1635   getOperand(1)->printAsOperand(O, SlotTracker);
1636   O << " * ";
1637   getStepValue()->printAsOperand(O, SlotTracker);
1638 }
1639 #endif
1640 
1641 void VPScalarIVStepsRecipe::execute(VPTransformState &State) {
1642   // Fast-math-flags propagate from the original induction instruction.
1643   IRBuilder<>::FastMathFlagGuard FMFG(State.Builder);
1644   if (hasFastMathFlags())
1645     State.Builder.setFastMathFlags(getFastMathFlags());
1646 
1647   /// Compute scalar induction steps. \p ScalarIV is the scalar induction
1648   /// variable on which to base the steps, \p Step is the size of the step.
1649 
1650   Value *BaseIV = State.get(getOperand(0), VPLane(0));
1651   Value *Step = State.get(getStepValue(), VPLane(0));
1652   IRBuilderBase &Builder = State.Builder;
1653 
1654   // Ensure step has the same type as that of scalar IV.
1655   Type *BaseIVTy = BaseIV->getType()->getScalarType();
1656   assert(BaseIVTy == Step->getType() && "Types of BaseIV and Step must match!");
1657 
1658   // We build scalar steps for both integer and floating-point induction
1659   // variables. Here, we determine the kind of arithmetic we will perform.
1660   Instruction::BinaryOps AddOp;
1661   Instruction::BinaryOps MulOp;
1662   if (BaseIVTy->isIntegerTy()) {
1663     AddOp = Instruction::Add;
1664     MulOp = Instruction::Mul;
1665   } else {
1666     AddOp = InductionOpcode;
1667     MulOp = Instruction::FMul;
1668   }
1669 
1670   // Determine the number of scalars we need to generate for each unroll
1671   // iteration.
1672   bool FirstLaneOnly = vputils::onlyFirstLaneUsed(this);
1673   // Compute the scalar steps and save the results in State.
1674   Type *IntStepTy =
1675       IntegerType::get(BaseIVTy->getContext(), BaseIVTy->getScalarSizeInBits());
1676   Type *VecIVTy = nullptr;
1677   Value *UnitStepVec = nullptr, *SplatStep = nullptr, *SplatIV = nullptr;
1678   if (!FirstLaneOnly && State.VF.isScalable()) {
1679     VecIVTy = VectorType::get(BaseIVTy, State.VF);
1680     UnitStepVec =
1681         Builder.CreateStepVector(VectorType::get(IntStepTy, State.VF));
1682     SplatStep = Builder.CreateVectorSplat(State.VF, Step);
1683     SplatIV = Builder.CreateVectorSplat(State.VF, BaseIV);
1684   }
1685 
1686   unsigned StartLane = 0;
1687   unsigned EndLane = FirstLaneOnly ? 1 : State.VF.getKnownMinValue();
1688   if (State.Lane) {
1689     StartLane = State.Lane->getKnownLane();
1690     EndLane = StartLane + 1;
1691   }
1692   Value *StartIdx0 =
1693       createStepForVF(Builder, IntStepTy, State.VF, getUnrollPart(*this));
1694 
1695   if (!FirstLaneOnly && State.VF.isScalable()) {
1696     auto *SplatStartIdx = Builder.CreateVectorSplat(State.VF, StartIdx0);
1697     auto *InitVec = Builder.CreateAdd(SplatStartIdx, UnitStepVec);
1698     if (BaseIVTy->isFloatingPointTy())
1699       InitVec = Builder.CreateSIToFP(InitVec, VecIVTy);
1700     auto *Mul = Builder.CreateBinOp(MulOp, InitVec, SplatStep);
1701     auto *Add = Builder.CreateBinOp(AddOp, SplatIV, Mul);
1702     State.set(this, Add);
1703     // It's useful to record the lane values too for the known minimum number
1704     // of elements so we do those below. This improves the code quality when
1705     // trying to extract the first element, for example.
1706   }
1707 
1708   if (BaseIVTy->isFloatingPointTy())
1709     StartIdx0 = Builder.CreateSIToFP(StartIdx0, BaseIVTy);
1710 
1711   for (unsigned Lane = StartLane; Lane < EndLane; ++Lane) {
1712     Value *StartIdx = Builder.CreateBinOp(
1713         AddOp, StartIdx0, getSignedIntOrFpConstant(BaseIVTy, Lane));
1714     // The step returned by `createStepForVF` is a runtime-evaluated value
1715     // when VF is scalable. Otherwise, it should be folded into a Constant.
1716     assert((State.VF.isScalable() || isa<Constant>(StartIdx)) &&
1717            "Expected StartIdx to be folded to a constant when VF is not "
1718            "scalable");
1719     auto *Mul = Builder.CreateBinOp(MulOp, StartIdx, Step);
1720     auto *Add = Builder.CreateBinOp(AddOp, BaseIV, Mul);
1721     State.set(this, Add, VPLane(Lane));
1722   }
1723 }
1724 
1725 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1726 void VPScalarIVStepsRecipe::print(raw_ostream &O, const Twine &Indent,
1727                                   VPSlotTracker &SlotTracker) const {
1728   O << Indent;
1729   printAsOperand(O, SlotTracker);
1730   O << " = SCALAR-STEPS ";
1731   printOperands(O, SlotTracker);
1732 }
1733 #endif
1734 
1735 void VPWidenGEPRecipe::execute(VPTransformState &State) {
1736   assert(State.VF.isVector() && "not widening");
1737   auto *GEP = cast<GetElementPtrInst>(getUnderlyingInstr());
1738   // Construct a vector GEP by widening the operands of the scalar GEP as
1739   // necessary. We mark the vector GEP 'inbounds' if appropriate. A GEP
1740   // results in a vector of pointers when at least one operand of the GEP
1741   // is vector-typed. Thus, to keep the representation compact, we only use
1742   // vector-typed operands for loop-varying values.
1743 
1744   if (areAllOperandsInvariant()) {
1745     // If we are vectorizing, but the GEP has only loop-invariant operands,
1746     // the GEP we build (by only using vector-typed operands for
1747     // loop-varying values) would be a scalar pointer. Thus, to ensure we
1748     // produce a vector of pointers, we need to either arbitrarily pick an
1749     // operand to broadcast, or broadcast a clone of the original GEP.
1750     // Here, we broadcast a clone of the original.
1751     //
1752     // TODO: If at some point we decide to scalarize instructions having
1753     //       loop-invariant operands, this special case will no longer be
1754     //       required. We would add the scalarization decision to
1755     //       collectLoopScalars() and teach getVectorValue() to broadcast
1756     //       the lane-zero scalar value.
1757     SmallVector<Value *> Ops;
1758     for (unsigned I = 0, E = getNumOperands(); I != E; I++)
1759       Ops.push_back(State.get(getOperand(I), VPLane(0)));
1760 
1761     auto *NewGEP =
1762         State.Builder.CreateGEP(GEP->getSourceElementType(), Ops[0],
1763                                 ArrayRef(Ops).drop_front(), "", isInBounds());
1764     Value *Splat = State.Builder.CreateVectorSplat(State.VF, NewGEP);
1765     State.set(this, Splat);
1766     State.addMetadata(Splat, GEP);
1767   } else {
1768     // If the GEP has at least one loop-varying operand, we are sure to
1769     // produce a vector of pointers unless VF is scalar.
1770     // The pointer operand of the new GEP. If it's loop-invariant, we
1771     // won't broadcast it.
1772     auto *Ptr = isPointerLoopInvariant() ? State.get(getOperand(0), VPLane(0))
1773                                          : State.get(getOperand(0));
1774 
1775     // Collect all the indices for the new GEP. If any index is
1776     // loop-invariant, we won't broadcast it.
1777     SmallVector<Value *, 4> Indices;
1778     for (unsigned I = 1, E = getNumOperands(); I < E; I++) {
1779       VPValue *Operand = getOperand(I);
1780       if (isIndexLoopInvariant(I - 1))
1781         Indices.push_back(State.get(Operand, VPLane(0)));
1782       else
1783         Indices.push_back(State.get(Operand));
1784     }
1785 
1786     // Create the new GEP. Note that this GEP may be a scalar if VF == 1,
1787     // but it should be a vector, otherwise.
1788     auto *NewGEP = State.Builder.CreateGEP(GEP->getSourceElementType(), Ptr,
1789                                            Indices, "", isInBounds());
1790     assert((State.VF.isScalar() || NewGEP->getType()->isVectorTy()) &&
1791            "NewGEP is not a pointer vector");
1792     State.set(this, NewGEP);
1793     State.addMetadata(NewGEP, GEP);
1794   }
1795 }
1796 
1797 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1798 void VPWidenGEPRecipe::print(raw_ostream &O, const Twine &Indent,
1799                              VPSlotTracker &SlotTracker) const {
1800   O << Indent << "WIDEN-GEP ";
1801   O << (isPointerLoopInvariant() ? "Inv" : "Var");
1802   for (size_t I = 0; I < getNumOperands() - 1; ++I)
1803     O << "[" << (isIndexLoopInvariant(I) ? "Inv" : "Var") << "]";
1804 
1805   O << " ";
1806   printAsOperand(O, SlotTracker);
1807   O << " = getelementptr";
1808   printFlags(O);
1809   printOperands(O, SlotTracker);
1810 }
1811 #endif
1812 
1813 void VPVectorPointerRecipe ::execute(VPTransformState &State) {
1814   auto &Builder = State.Builder;
1815   State.setDebugLocFrom(getDebugLoc());
1816   unsigned CurrentPart = getUnrollPart(*this);
1817   // Use i32 for the gep index type when the value is constant,
1818   // or query DataLayout for a more suitable index type otherwise.
1819   const DataLayout &DL = Builder.GetInsertBlock()->getDataLayout();
1820   Type *IndexTy = State.VF.isScalable() && (IsReverse || CurrentPart > 0)
1821                       ? DL.getIndexType(Builder.getPtrTy(0))
1822                       : Builder.getInt32Ty();
1823   Value *Ptr = State.get(getOperand(0), VPLane(0));
1824   bool InBounds = isInBounds();
1825 
1826   Value *ResultPtr = nullptr;
1827   if (IsReverse) {
1828     // If the address is consecutive but reversed, then the
1829     // wide store needs to start at the last vector element.
1830     // RunTimeVF =  VScale * VF.getKnownMinValue()
1831     // For fixed-width VScale is 1, then RunTimeVF = VF.getKnownMinValue()
1832     Value *RunTimeVF = getRuntimeVF(Builder, IndexTy, State.VF);
1833     // NumElt = -CurrentPart * RunTimeVF
1834     Value *NumElt = Builder.CreateMul(
1835         ConstantInt::get(IndexTy, -(int64_t)CurrentPart), RunTimeVF);
1836     // LastLane = 1 - RunTimeVF
1837     Value *LastLane =
1838         Builder.CreateSub(ConstantInt::get(IndexTy, 1), RunTimeVF);
1839     ResultPtr = Builder.CreateGEP(IndexedTy, Ptr, NumElt, "", InBounds);
1840     ResultPtr = Builder.CreateGEP(IndexedTy, ResultPtr, LastLane, "", InBounds);
1841   } else {
1842     Value *Increment = createStepForVF(Builder, IndexTy, State.VF, CurrentPart);
1843     ResultPtr = Builder.CreateGEP(IndexedTy, Ptr, Increment, "", InBounds);
1844   }
1845 
1846   State.set(this, ResultPtr, /*IsScalar*/ true);
1847 }
1848 
1849 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1850 void VPVectorPointerRecipe::print(raw_ostream &O, const Twine &Indent,
1851                                   VPSlotTracker &SlotTracker) const {
1852   O << Indent;
1853   printAsOperand(O, SlotTracker);
1854   O << " = vector-pointer ";
1855   if (IsReverse)
1856     O << "(reverse) ";
1857 
1858   printOperands(O, SlotTracker);
1859 }
1860 #endif
1861 
1862 void VPBlendRecipe::execute(VPTransformState &State) {
1863   assert(isNormalized() && "Expected blend to be normalized!");
1864   State.setDebugLocFrom(getDebugLoc());
1865   // We know that all PHIs in non-header blocks are converted into
1866   // selects, so we don't have to worry about the insertion order and we
1867   // can just use the builder.
1868   // At this point we generate the predication tree. There may be
1869   // duplications since this is a simple recursive scan, but future
1870   // optimizations will clean it up.
1871 
1872   unsigned NumIncoming = getNumIncomingValues();
1873 
1874   // Generate a sequence of selects of the form:
1875   // SELECT(Mask3, In3,
1876   //        SELECT(Mask2, In2,
1877   //               SELECT(Mask1, In1,
1878   //                      In0)))
1879   // Note that Mask0 is never used: lanes for which no path reaches this phi and
1880   // are essentially undef are taken from In0.
1881   bool OnlyFirstLaneUsed = vputils::onlyFirstLaneUsed(this);
1882   Value *Result = nullptr;
1883   for (unsigned In = 0; In < NumIncoming; ++In) {
1884     // We might have single edge PHIs (blocks) - use an identity
1885     // 'select' for the first PHI operand.
1886     Value *In0 = State.get(getIncomingValue(In), OnlyFirstLaneUsed);
1887     if (In == 0)
1888       Result = In0; // Initialize with the first incoming value.
1889     else {
1890       // Select between the current value and the previous incoming edge
1891       // based on the incoming mask.
1892       Value *Cond = State.get(getMask(In), OnlyFirstLaneUsed);
1893       Result = State.Builder.CreateSelect(Cond, In0, Result, "predphi");
1894     }
1895   }
1896   State.set(this, Result, OnlyFirstLaneUsed);
1897 }
1898 
1899 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1900 void VPBlendRecipe::print(raw_ostream &O, const Twine &Indent,
1901                           VPSlotTracker &SlotTracker) const {
1902   O << Indent << "BLEND ";
1903   printAsOperand(O, SlotTracker);
1904   O << " =";
1905   if (getNumIncomingValues() == 1) {
1906     // Not a User of any mask: not really blending, this is a
1907     // single-predecessor phi.
1908     O << " ";
1909     getIncomingValue(0)->printAsOperand(O, SlotTracker);
1910   } else {
1911     for (unsigned I = 0, E = getNumIncomingValues(); I < E; ++I) {
1912       O << " ";
1913       getIncomingValue(I)->printAsOperand(O, SlotTracker);
1914       if (I == 0)
1915         continue;
1916       O << "/";
1917       getMask(I)->printAsOperand(O, SlotTracker);
1918     }
1919   }
1920 }
1921 #endif
1922 
1923 void VPReductionRecipe::execute(VPTransformState &State) {
1924   assert(!State.Lane && "Reduction being replicated.");
1925   Value *PrevInChain = State.get(getChainOp(), /*IsScalar*/ true);
1926   RecurKind Kind = RdxDesc.getRecurrenceKind();
1927   // Propagate the fast-math flags carried by the underlying instruction.
1928   IRBuilderBase::FastMathFlagGuard FMFGuard(State.Builder);
1929   State.Builder.setFastMathFlags(RdxDesc.getFastMathFlags());
1930   Value *NewVecOp = State.get(getVecOp());
1931   if (VPValue *Cond = getCondOp()) {
1932     Value *NewCond = State.get(Cond, State.VF.isScalar());
1933     VectorType *VecTy = dyn_cast<VectorType>(NewVecOp->getType());
1934     Type *ElementTy = VecTy ? VecTy->getElementType() : NewVecOp->getType();
1935 
1936     Value *Start;
1937     if (RecurrenceDescriptor::isAnyOfRecurrenceKind(Kind))
1938       Start = RdxDesc.getRecurrenceStartValue();
1939     else
1940       Start = llvm::getRecurrenceIdentity(Kind, ElementTy,
1941                                           RdxDesc.getFastMathFlags());
1942     if (State.VF.isVector())
1943       Start = State.Builder.CreateVectorSplat(VecTy->getElementCount(), Start);
1944 
1945     Value *Select = State.Builder.CreateSelect(NewCond, NewVecOp, Start);
1946     NewVecOp = Select;
1947   }
1948   Value *NewRed;
1949   Value *NextInChain;
1950   if (IsOrdered) {
1951     if (State.VF.isVector())
1952       NewRed =
1953           createOrderedReduction(State.Builder, RdxDesc, NewVecOp, PrevInChain);
1954     else
1955       NewRed = State.Builder.CreateBinOp(
1956           (Instruction::BinaryOps)RdxDesc.getOpcode(Kind), PrevInChain,
1957           NewVecOp);
1958     PrevInChain = NewRed;
1959     NextInChain = NewRed;
1960   } else {
1961     PrevInChain = State.get(getChainOp(), /*IsScalar*/ true);
1962     NewRed = createReduction(State.Builder, RdxDesc, NewVecOp);
1963     if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind))
1964       NextInChain = createMinMaxOp(State.Builder, RdxDesc.getRecurrenceKind(),
1965                                    NewRed, PrevInChain);
1966     else
1967       NextInChain = State.Builder.CreateBinOp(
1968           (Instruction::BinaryOps)RdxDesc.getOpcode(Kind), NewRed, PrevInChain);
1969   }
1970   State.set(this, NextInChain, /*IsScalar*/ true);
1971 }
1972 
1973 void VPReductionEVLRecipe::execute(VPTransformState &State) {
1974   assert(!State.Lane && "Reduction being replicated.");
1975 
1976   auto &Builder = State.Builder;
1977   // Propagate the fast-math flags carried by the underlying instruction.
1978   IRBuilderBase::FastMathFlagGuard FMFGuard(Builder);
1979   const RecurrenceDescriptor &RdxDesc = getRecurrenceDescriptor();
1980   Builder.setFastMathFlags(RdxDesc.getFastMathFlags());
1981 
1982   RecurKind Kind = RdxDesc.getRecurrenceKind();
1983   Value *Prev = State.get(getChainOp(), /*IsScalar*/ true);
1984   Value *VecOp = State.get(getVecOp());
1985   Value *EVL = State.get(getEVL(), VPLane(0));
1986 
1987   VectorBuilder VBuilder(Builder);
1988   VBuilder.setEVL(EVL);
1989   Value *Mask;
1990   // TODO: move the all-true mask generation into VectorBuilder.
1991   if (VPValue *CondOp = getCondOp())
1992     Mask = State.get(CondOp);
1993   else
1994     Mask = Builder.CreateVectorSplat(State.VF, Builder.getTrue());
1995   VBuilder.setMask(Mask);
1996 
1997   Value *NewRed;
1998   if (isOrdered()) {
1999     NewRed = createOrderedReduction(VBuilder, RdxDesc, VecOp, Prev);
2000   } else {
2001     NewRed = createSimpleReduction(VBuilder, VecOp, RdxDesc);
2002     if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind))
2003       NewRed = createMinMaxOp(Builder, Kind, NewRed, Prev);
2004     else
2005       NewRed = Builder.CreateBinOp(
2006           (Instruction::BinaryOps)RdxDesc.getOpcode(Kind), NewRed, Prev);
2007   }
2008   State.set(this, NewRed, /*IsScalar*/ true);
2009 }
2010 
2011 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2012 void VPReductionRecipe::print(raw_ostream &O, const Twine &Indent,
2013                               VPSlotTracker &SlotTracker) const {
2014   O << Indent << "REDUCE ";
2015   printAsOperand(O, SlotTracker);
2016   O << " = ";
2017   getChainOp()->printAsOperand(O, SlotTracker);
2018   O << " +";
2019   if (isa<FPMathOperator>(getUnderlyingInstr()))
2020     O << getUnderlyingInstr()->getFastMathFlags();
2021   O << " reduce." << Instruction::getOpcodeName(RdxDesc.getOpcode()) << " (";
2022   getVecOp()->printAsOperand(O, SlotTracker);
2023   if (isConditional()) {
2024     O << ", ";
2025     getCondOp()->printAsOperand(O, SlotTracker);
2026   }
2027   O << ")";
2028   if (RdxDesc.IntermediateStore)
2029     O << " (with final reduction value stored in invariant address sank "
2030          "outside of loop)";
2031 }
2032 
2033 void VPReductionEVLRecipe::print(raw_ostream &O, const Twine &Indent,
2034                                  VPSlotTracker &SlotTracker) const {
2035   const RecurrenceDescriptor &RdxDesc = getRecurrenceDescriptor();
2036   O << Indent << "REDUCE ";
2037   printAsOperand(O, SlotTracker);
2038   O << " = ";
2039   getChainOp()->printAsOperand(O, SlotTracker);
2040   O << " +";
2041   if (isa<FPMathOperator>(getUnderlyingInstr()))
2042     O << getUnderlyingInstr()->getFastMathFlags();
2043   O << " vp.reduce." << Instruction::getOpcodeName(RdxDesc.getOpcode()) << " (";
2044   getVecOp()->printAsOperand(O, SlotTracker);
2045   O << ", ";
2046   getEVL()->printAsOperand(O, SlotTracker);
2047   if (isConditional()) {
2048     O << ", ";
2049     getCondOp()->printAsOperand(O, SlotTracker);
2050   }
2051   O << ")";
2052   if (RdxDesc.IntermediateStore)
2053     O << " (with final reduction value stored in invariant address sank "
2054          "outside of loop)";
2055 }
2056 #endif
2057 
2058 bool VPReplicateRecipe::shouldPack() const {
2059   // Find if the recipe is used by a widened recipe via an intervening
2060   // VPPredInstPHIRecipe. In this case, also pack the scalar values in a vector.
2061   return any_of(users(), [](const VPUser *U) {
2062     if (auto *PredR = dyn_cast<VPPredInstPHIRecipe>(U))
2063       return any_of(PredR->users(), [PredR](const VPUser *U) {
2064         return !U->usesScalars(PredR);
2065       });
2066     return false;
2067   });
2068 }
2069 
2070 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2071 void VPReplicateRecipe::print(raw_ostream &O, const Twine &Indent,
2072                               VPSlotTracker &SlotTracker) const {
2073   O << Indent << (IsUniform ? "CLONE " : "REPLICATE ");
2074 
2075   if (!getUnderlyingInstr()->getType()->isVoidTy()) {
2076     printAsOperand(O, SlotTracker);
2077     O << " = ";
2078   }
2079   if (auto *CB = dyn_cast<CallBase>(getUnderlyingInstr())) {
2080     O << "call";
2081     printFlags(O);
2082     O << "@" << CB->getCalledFunction()->getName() << "(";
2083     interleaveComma(make_range(op_begin(), op_begin() + (getNumOperands() - 1)),
2084                     O, [&O, &SlotTracker](VPValue *Op) {
2085                       Op->printAsOperand(O, SlotTracker);
2086                     });
2087     O << ")";
2088   } else {
2089     O << Instruction::getOpcodeName(getUnderlyingInstr()->getOpcode());
2090     printFlags(O);
2091     printOperands(O, SlotTracker);
2092   }
2093 
2094   if (shouldPack())
2095     O << " (S->V)";
2096 }
2097 #endif
2098 
2099 Value *VPScalarCastRecipe ::generate(VPTransformState &State) {
2100   assert(vputils::onlyFirstLaneUsed(this) &&
2101          "Codegen only implemented for first lane.");
2102   switch (Opcode) {
2103   case Instruction::SExt:
2104   case Instruction::ZExt:
2105   case Instruction::Trunc: {
2106     // Note: SExt/ZExt not used yet.
2107     Value *Op = State.get(getOperand(0), VPLane(0));
2108     return State.Builder.CreateCast(Instruction::CastOps(Opcode), Op, ResultTy);
2109   }
2110   default:
2111     llvm_unreachable("opcode not implemented yet");
2112   }
2113 }
2114 
2115 void VPScalarCastRecipe ::execute(VPTransformState &State) {
2116   State.set(this, generate(State), VPLane(0));
2117 }
2118 
2119 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2120 void VPScalarCastRecipe ::print(raw_ostream &O, const Twine &Indent,
2121                                 VPSlotTracker &SlotTracker) const {
2122   O << Indent << "SCALAR-CAST ";
2123   printAsOperand(O, SlotTracker);
2124   O << " = " << Instruction::getOpcodeName(Opcode) << " ";
2125   printOperands(O, SlotTracker);
2126   O << " to " << *ResultTy;
2127 }
2128 #endif
2129 
2130 void VPBranchOnMaskRecipe::execute(VPTransformState &State) {
2131   assert(State.Lane && "Branch on Mask works only on single instance.");
2132 
2133   unsigned Lane = State.Lane->getKnownLane();
2134 
2135   Value *ConditionBit = nullptr;
2136   VPValue *BlockInMask = getMask();
2137   if (BlockInMask) {
2138     ConditionBit = State.get(BlockInMask);
2139     if (ConditionBit->getType()->isVectorTy())
2140       ConditionBit = State.Builder.CreateExtractElement(
2141           ConditionBit, State.Builder.getInt32(Lane));
2142   } else // Block in mask is all-one.
2143     ConditionBit = State.Builder.getTrue();
2144 
2145   // Replace the temporary unreachable terminator with a new conditional branch,
2146   // whose two destinations will be set later when they are created.
2147   auto *CurrentTerminator = State.CFG.PrevBB->getTerminator();
2148   assert(isa<UnreachableInst>(CurrentTerminator) &&
2149          "Expected to replace unreachable terminator with conditional branch.");
2150   auto *CondBr = BranchInst::Create(State.CFG.PrevBB, nullptr, ConditionBit);
2151   CondBr->setSuccessor(0, nullptr);
2152   ReplaceInstWithInst(CurrentTerminator, CondBr);
2153 }
2154 
2155 void VPPredInstPHIRecipe::execute(VPTransformState &State) {
2156   assert(State.Lane && "Predicated instruction PHI works per instance.");
2157   Instruction *ScalarPredInst =
2158       cast<Instruction>(State.get(getOperand(0), *State.Lane));
2159   BasicBlock *PredicatedBB = ScalarPredInst->getParent();
2160   BasicBlock *PredicatingBB = PredicatedBB->getSinglePredecessor();
2161   assert(PredicatingBB && "Predicated block has no single predecessor.");
2162   assert(isa<VPReplicateRecipe>(getOperand(0)) &&
2163          "operand must be VPReplicateRecipe");
2164 
2165   // By current pack/unpack logic we need to generate only a single phi node: if
2166   // a vector value for the predicated instruction exists at this point it means
2167   // the instruction has vector users only, and a phi for the vector value is
2168   // needed. In this case the recipe of the predicated instruction is marked to
2169   // also do that packing, thereby "hoisting" the insert-element sequence.
2170   // Otherwise, a phi node for the scalar value is needed.
2171   if (State.hasVectorValue(getOperand(0))) {
2172     Value *VectorValue = State.get(getOperand(0));
2173     InsertElementInst *IEI = cast<InsertElementInst>(VectorValue);
2174     PHINode *VPhi = State.Builder.CreatePHI(IEI->getType(), 2);
2175     VPhi->addIncoming(IEI->getOperand(0), PredicatingBB); // Unmodified vector.
2176     VPhi->addIncoming(IEI, PredicatedBB); // New vector with inserted element.
2177     if (State.hasVectorValue(this))
2178       State.reset(this, VPhi);
2179     else
2180       State.set(this, VPhi);
2181     // NOTE: Currently we need to update the value of the operand, so the next
2182     // predicated iteration inserts its generated value in the correct vector.
2183     State.reset(getOperand(0), VPhi);
2184   } else {
2185     if (vputils::onlyFirstLaneUsed(this) && !State.Lane->isFirstLane())
2186       return;
2187 
2188     Type *PredInstType = getOperand(0)->getUnderlyingValue()->getType();
2189     PHINode *Phi = State.Builder.CreatePHI(PredInstType, 2);
2190     Phi->addIncoming(PoisonValue::get(ScalarPredInst->getType()),
2191                      PredicatingBB);
2192     Phi->addIncoming(ScalarPredInst, PredicatedBB);
2193     if (State.hasScalarValue(this, *State.Lane))
2194       State.reset(this, Phi, *State.Lane);
2195     else
2196       State.set(this, Phi, *State.Lane);
2197     // NOTE: Currently we need to update the value of the operand, so the next
2198     // predicated iteration inserts its generated value in the correct vector.
2199     State.reset(getOperand(0), Phi, *State.Lane);
2200   }
2201 }
2202 
2203 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2204 void VPPredInstPHIRecipe::print(raw_ostream &O, const Twine &Indent,
2205                                 VPSlotTracker &SlotTracker) const {
2206   O << Indent << "PHI-PREDICATED-INSTRUCTION ";
2207   printAsOperand(O, SlotTracker);
2208   O << " = ";
2209   printOperands(O, SlotTracker);
2210 }
2211 #endif
2212 
2213 InstructionCost VPWidenMemoryRecipe::computeCost(ElementCount VF,
2214                                                  VPCostContext &Ctx) const {
2215   Type *Ty = ToVectorTy(getLoadStoreType(&Ingredient), VF);
2216   const Align Alignment =
2217       getLoadStoreAlignment(const_cast<Instruction *>(&Ingredient));
2218   unsigned AS =
2219       getLoadStoreAddressSpace(const_cast<Instruction *>(&Ingredient));
2220   TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
2221 
2222   if (!Consecutive) {
2223     // TODO: Using the original IR may not be accurate.
2224     // Currently, ARM will use the underlying IR to calculate gather/scatter
2225     // instruction cost.
2226     const Value *Ptr = getLoadStorePointerOperand(&Ingredient);
2227     assert(!Reverse &&
2228            "Inconsecutive memory access should not have the order.");
2229     return Ctx.TTI.getAddressComputationCost(Ty) +
2230            Ctx.TTI.getGatherScatterOpCost(Ingredient.getOpcode(), Ty, Ptr,
2231                                           IsMasked, Alignment, CostKind,
2232                                           &Ingredient);
2233   }
2234 
2235   InstructionCost Cost = 0;
2236   if (IsMasked) {
2237     Cost += Ctx.TTI.getMaskedMemoryOpCost(Ingredient.getOpcode(), Ty, Alignment,
2238                                           AS, CostKind);
2239   } else {
2240     TTI::OperandValueInfo OpInfo =
2241         Ctx.TTI.getOperandInfo(Ingredient.getOperand(0));
2242     Cost += Ctx.TTI.getMemoryOpCost(Ingredient.getOpcode(), Ty, Alignment, AS,
2243                                     CostKind, OpInfo, &Ingredient);
2244   }
2245   if (!Reverse)
2246     return Cost;
2247 
2248   return Cost += Ctx.TTI.getShuffleCost(TargetTransformInfo::SK_Reverse,
2249                                         cast<VectorType>(Ty), {}, CostKind, 0);
2250 }
2251 
2252 void VPWidenLoadRecipe::execute(VPTransformState &State) {
2253   auto *LI = cast<LoadInst>(&Ingredient);
2254 
2255   Type *ScalarDataTy = getLoadStoreType(&Ingredient);
2256   auto *DataTy = VectorType::get(ScalarDataTy, State.VF);
2257   const Align Alignment = getLoadStoreAlignment(&Ingredient);
2258   bool CreateGather = !isConsecutive();
2259 
2260   auto &Builder = State.Builder;
2261   State.setDebugLocFrom(getDebugLoc());
2262   Value *Mask = nullptr;
2263   if (auto *VPMask = getMask()) {
2264     // Mask reversal is only needed for non-all-one (null) masks, as reverse
2265     // of a null all-one mask is a null mask.
2266     Mask = State.get(VPMask);
2267     if (isReverse())
2268       Mask = Builder.CreateVectorReverse(Mask, "reverse");
2269   }
2270 
2271   Value *Addr = State.get(getAddr(), /*IsScalar*/ !CreateGather);
2272   Value *NewLI;
2273   if (CreateGather) {
2274     NewLI = Builder.CreateMaskedGather(DataTy, Addr, Alignment, Mask, nullptr,
2275                                        "wide.masked.gather");
2276   } else if (Mask) {
2277     NewLI =
2278         Builder.CreateMaskedLoad(DataTy, Addr, Alignment, Mask,
2279                                  PoisonValue::get(DataTy), "wide.masked.load");
2280   } else {
2281     NewLI = Builder.CreateAlignedLoad(DataTy, Addr, Alignment, "wide.load");
2282   }
2283   // Add metadata to the load, but setVectorValue to the reverse shuffle.
2284   State.addMetadata(NewLI, LI);
2285   if (Reverse)
2286     NewLI = Builder.CreateVectorReverse(NewLI, "reverse");
2287   State.set(this, NewLI);
2288 }
2289 
2290 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2291 void VPWidenLoadRecipe::print(raw_ostream &O, const Twine &Indent,
2292                               VPSlotTracker &SlotTracker) const {
2293   O << Indent << "WIDEN ";
2294   printAsOperand(O, SlotTracker);
2295   O << " = load ";
2296   printOperands(O, SlotTracker);
2297 }
2298 #endif
2299 
2300 /// Use all-true mask for reverse rather than actual mask, as it avoids a
2301 /// dependence w/o affecting the result.
2302 static Instruction *createReverseEVL(IRBuilderBase &Builder, Value *Operand,
2303                                      Value *EVL, const Twine &Name) {
2304   VectorType *ValTy = cast<VectorType>(Operand->getType());
2305   Value *AllTrueMask =
2306       Builder.CreateVectorSplat(ValTy->getElementCount(), Builder.getTrue());
2307   return Builder.CreateIntrinsic(ValTy, Intrinsic::experimental_vp_reverse,
2308                                  {Operand, AllTrueMask, EVL}, nullptr, Name);
2309 }
2310 
2311 void VPWidenLoadEVLRecipe::execute(VPTransformState &State) {
2312   auto *LI = cast<LoadInst>(&Ingredient);
2313 
2314   Type *ScalarDataTy = getLoadStoreType(&Ingredient);
2315   auto *DataTy = VectorType::get(ScalarDataTy, State.VF);
2316   const Align Alignment = getLoadStoreAlignment(&Ingredient);
2317   bool CreateGather = !isConsecutive();
2318 
2319   auto &Builder = State.Builder;
2320   State.setDebugLocFrom(getDebugLoc());
2321   CallInst *NewLI;
2322   Value *EVL = State.get(getEVL(), VPLane(0));
2323   Value *Addr = State.get(getAddr(), !CreateGather);
2324   Value *Mask = nullptr;
2325   if (VPValue *VPMask = getMask()) {
2326     Mask = State.get(VPMask);
2327     if (isReverse())
2328       Mask = createReverseEVL(Builder, Mask, EVL, "vp.reverse.mask");
2329   } else {
2330     Mask = Builder.CreateVectorSplat(State.VF, Builder.getTrue());
2331   }
2332 
2333   if (CreateGather) {
2334     NewLI =
2335         Builder.CreateIntrinsic(DataTy, Intrinsic::vp_gather, {Addr, Mask, EVL},
2336                                 nullptr, "wide.masked.gather");
2337   } else {
2338     VectorBuilder VBuilder(Builder);
2339     VBuilder.setEVL(EVL).setMask(Mask);
2340     NewLI = cast<CallInst>(VBuilder.createVectorInstruction(
2341         Instruction::Load, DataTy, Addr, "vp.op.load"));
2342   }
2343   NewLI->addParamAttr(
2344       0, Attribute::getWithAlignment(NewLI->getContext(), Alignment));
2345   State.addMetadata(NewLI, LI);
2346   Instruction *Res = NewLI;
2347   if (isReverse())
2348     Res = createReverseEVL(Builder, Res, EVL, "vp.reverse");
2349   State.set(this, Res);
2350 }
2351 
2352 InstructionCost VPWidenLoadEVLRecipe::computeCost(ElementCount VF,
2353                                                   VPCostContext &Ctx) const {
2354   if (!Consecutive || IsMasked)
2355     return VPWidenMemoryRecipe::computeCost(VF, Ctx);
2356 
2357   // We need to use the getMaskedMemoryOpCost() instead of getMemoryOpCost()
2358   // here because the EVL recipes using EVL to replace the tail mask. But in the
2359   // legacy model, it will always calculate the cost of mask.
2360   // TODO: Using getMemoryOpCost() instead of getMaskedMemoryOpCost when we
2361   // don't need to compare to the legacy cost model.
2362   Type *Ty = ToVectorTy(getLoadStoreType(&Ingredient), VF);
2363   const Align Alignment =
2364       getLoadStoreAlignment(const_cast<Instruction *>(&Ingredient));
2365   unsigned AS =
2366       getLoadStoreAddressSpace(const_cast<Instruction *>(&Ingredient));
2367   TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
2368   InstructionCost Cost = Ctx.TTI.getMaskedMemoryOpCost(
2369       Ingredient.getOpcode(), Ty, Alignment, AS, CostKind);
2370   if (!Reverse)
2371     return Cost;
2372 
2373   return Cost + Ctx.TTI.getShuffleCost(TargetTransformInfo::SK_Reverse,
2374                                        cast<VectorType>(Ty), {}, CostKind, 0);
2375 }
2376 
2377 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2378 void VPWidenLoadEVLRecipe::print(raw_ostream &O, const Twine &Indent,
2379                                  VPSlotTracker &SlotTracker) const {
2380   O << Indent << "WIDEN ";
2381   printAsOperand(O, SlotTracker);
2382   O << " = vp.load ";
2383   printOperands(O, SlotTracker);
2384 }
2385 #endif
2386 
2387 void VPWidenStoreRecipe::execute(VPTransformState &State) {
2388   auto *SI = cast<StoreInst>(&Ingredient);
2389 
2390   VPValue *StoredVPValue = getStoredValue();
2391   bool CreateScatter = !isConsecutive();
2392   const Align Alignment = getLoadStoreAlignment(&Ingredient);
2393 
2394   auto &Builder = State.Builder;
2395   State.setDebugLocFrom(getDebugLoc());
2396 
2397   Value *Mask = nullptr;
2398   if (auto *VPMask = getMask()) {
2399     // Mask reversal is only needed for non-all-one (null) masks, as reverse
2400     // of a null all-one mask is a null mask.
2401     Mask = State.get(VPMask);
2402     if (isReverse())
2403       Mask = Builder.CreateVectorReverse(Mask, "reverse");
2404   }
2405 
2406   Value *StoredVal = State.get(StoredVPValue);
2407   if (isReverse()) {
2408     // If we store to reverse consecutive memory locations, then we need
2409     // to reverse the order of elements in the stored value.
2410     StoredVal = Builder.CreateVectorReverse(StoredVal, "reverse");
2411     // We don't want to update the value in the map as it might be used in
2412     // another expression. So don't call resetVectorValue(StoredVal).
2413   }
2414   Value *Addr = State.get(getAddr(), /*IsScalar*/ !CreateScatter);
2415   Instruction *NewSI = nullptr;
2416   if (CreateScatter)
2417     NewSI = Builder.CreateMaskedScatter(StoredVal, Addr, Alignment, Mask);
2418   else if (Mask)
2419     NewSI = Builder.CreateMaskedStore(StoredVal, Addr, Alignment, Mask);
2420   else
2421     NewSI = Builder.CreateAlignedStore(StoredVal, Addr, Alignment);
2422   State.addMetadata(NewSI, SI);
2423 }
2424 
2425 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2426 void VPWidenStoreRecipe::print(raw_ostream &O, const Twine &Indent,
2427                                VPSlotTracker &SlotTracker) const {
2428   O << Indent << "WIDEN store ";
2429   printOperands(O, SlotTracker);
2430 }
2431 #endif
2432 
2433 void VPWidenStoreEVLRecipe::execute(VPTransformState &State) {
2434   auto *SI = cast<StoreInst>(&Ingredient);
2435 
2436   VPValue *StoredValue = getStoredValue();
2437   bool CreateScatter = !isConsecutive();
2438   const Align Alignment = getLoadStoreAlignment(&Ingredient);
2439 
2440   auto &Builder = State.Builder;
2441   State.setDebugLocFrom(getDebugLoc());
2442 
2443   CallInst *NewSI = nullptr;
2444   Value *StoredVal = State.get(StoredValue);
2445   Value *EVL = State.get(getEVL(), VPLane(0));
2446   if (isReverse())
2447     StoredVal = createReverseEVL(Builder, StoredVal, EVL, "vp.reverse");
2448   Value *Mask = nullptr;
2449   if (VPValue *VPMask = getMask()) {
2450     Mask = State.get(VPMask);
2451     if (isReverse())
2452       Mask = createReverseEVL(Builder, Mask, EVL, "vp.reverse.mask");
2453   } else {
2454     Mask = Builder.CreateVectorSplat(State.VF, Builder.getTrue());
2455   }
2456   Value *Addr = State.get(getAddr(), !CreateScatter);
2457   if (CreateScatter) {
2458     NewSI = Builder.CreateIntrinsic(Type::getVoidTy(EVL->getContext()),
2459                                     Intrinsic::vp_scatter,
2460                                     {StoredVal, Addr, Mask, EVL});
2461   } else {
2462     VectorBuilder VBuilder(Builder);
2463     VBuilder.setEVL(EVL).setMask(Mask);
2464     NewSI = cast<CallInst>(VBuilder.createVectorInstruction(
2465         Instruction::Store, Type::getVoidTy(EVL->getContext()),
2466         {StoredVal, Addr}));
2467   }
2468   NewSI->addParamAttr(
2469       1, Attribute::getWithAlignment(NewSI->getContext(), Alignment));
2470   State.addMetadata(NewSI, SI);
2471 }
2472 
2473 InstructionCost VPWidenStoreEVLRecipe::computeCost(ElementCount VF,
2474                                                    VPCostContext &Ctx) const {
2475   if (!Consecutive || IsMasked)
2476     return VPWidenMemoryRecipe::computeCost(VF, Ctx);
2477 
2478   // We need to use the getMaskedMemoryOpCost() instead of getMemoryOpCost()
2479   // here because the EVL recipes using EVL to replace the tail mask. But in the
2480   // legacy model, it will always calculate the cost of mask.
2481   // TODO: Using getMemoryOpCost() instead of getMaskedMemoryOpCost when we
2482   // don't need to compare to the legacy cost model.
2483   Type *Ty = ToVectorTy(getLoadStoreType(&Ingredient), VF);
2484   const Align Alignment =
2485       getLoadStoreAlignment(const_cast<Instruction *>(&Ingredient));
2486   unsigned AS =
2487       getLoadStoreAddressSpace(const_cast<Instruction *>(&Ingredient));
2488   TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
2489   InstructionCost Cost = Ctx.TTI.getMaskedMemoryOpCost(
2490       Ingredient.getOpcode(), Ty, Alignment, AS, CostKind);
2491   if (!Reverse)
2492     return Cost;
2493 
2494   return Cost + Ctx.TTI.getShuffleCost(TargetTransformInfo::SK_Reverse,
2495                                        cast<VectorType>(Ty), {}, CostKind, 0);
2496 }
2497 
2498 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2499 void VPWidenStoreEVLRecipe::print(raw_ostream &O, const Twine &Indent,
2500                                   VPSlotTracker &SlotTracker) const {
2501   O << Indent << "WIDEN vp.store ";
2502   printOperands(O, SlotTracker);
2503 }
2504 #endif
2505 
2506 static Value *createBitOrPointerCast(IRBuilderBase &Builder, Value *V,
2507                                      VectorType *DstVTy, const DataLayout &DL) {
2508   // Verify that V is a vector type with same number of elements as DstVTy.
2509   auto VF = DstVTy->getElementCount();
2510   auto *SrcVecTy = cast<VectorType>(V->getType());
2511   assert(VF == SrcVecTy->getElementCount() && "Vector dimensions do not match");
2512   Type *SrcElemTy = SrcVecTy->getElementType();
2513   Type *DstElemTy = DstVTy->getElementType();
2514   assert((DL.getTypeSizeInBits(SrcElemTy) == DL.getTypeSizeInBits(DstElemTy)) &&
2515          "Vector elements must have same size");
2516 
2517   // Do a direct cast if element types are castable.
2518   if (CastInst::isBitOrNoopPointerCastable(SrcElemTy, DstElemTy, DL)) {
2519     return Builder.CreateBitOrPointerCast(V, DstVTy);
2520   }
2521   // V cannot be directly casted to desired vector type.
2522   // May happen when V is a floating point vector but DstVTy is a vector of
2523   // pointers or vice-versa. Handle this using a two-step bitcast using an
2524   // intermediate Integer type for the bitcast i.e. Ptr <-> Int <-> Float.
2525   assert((DstElemTy->isPointerTy() != SrcElemTy->isPointerTy()) &&
2526          "Only one type should be a pointer type");
2527   assert((DstElemTy->isFloatingPointTy() != SrcElemTy->isFloatingPointTy()) &&
2528          "Only one type should be a floating point type");
2529   Type *IntTy =
2530       IntegerType::getIntNTy(V->getContext(), DL.getTypeSizeInBits(SrcElemTy));
2531   auto *VecIntTy = VectorType::get(IntTy, VF);
2532   Value *CastVal = Builder.CreateBitOrPointerCast(V, VecIntTy);
2533   return Builder.CreateBitOrPointerCast(CastVal, DstVTy);
2534 }
2535 
2536 /// Return a vector containing interleaved elements from multiple
2537 /// smaller input vectors.
2538 static Value *interleaveVectors(IRBuilderBase &Builder, ArrayRef<Value *> Vals,
2539                                 const Twine &Name) {
2540   unsigned Factor = Vals.size();
2541   assert(Factor > 1 && "Tried to interleave invalid number of vectors");
2542 
2543   VectorType *VecTy = cast<VectorType>(Vals[0]->getType());
2544 #ifndef NDEBUG
2545   for (Value *Val : Vals)
2546     assert(Val->getType() == VecTy && "Tried to interleave mismatched types");
2547 #endif
2548 
2549   // Scalable vectors cannot use arbitrary shufflevectors (only splats), so
2550   // must use intrinsics to interleave.
2551   if (VecTy->isScalableTy()) {
2552     VectorType *WideVecTy = VectorType::getDoubleElementsVectorType(VecTy);
2553     return Builder.CreateIntrinsic(WideVecTy, Intrinsic::vector_interleave2,
2554                                    Vals,
2555                                    /*FMFSource=*/nullptr, Name);
2556   }
2557 
2558   // Fixed length. Start by concatenating all vectors into a wide vector.
2559   Value *WideVec = concatenateVectors(Builder, Vals);
2560 
2561   // Interleave the elements into the wide vector.
2562   const unsigned NumElts = VecTy->getElementCount().getFixedValue();
2563   return Builder.CreateShuffleVector(
2564       WideVec, createInterleaveMask(NumElts, Factor), Name);
2565 }
2566 
2567 // Try to vectorize the interleave group that \p Instr belongs to.
2568 //
2569 // E.g. Translate following interleaved load group (factor = 3):
2570 //   for (i = 0; i < N; i+=3) {
2571 //     R = Pic[i];             // Member of index 0
2572 //     G = Pic[i+1];           // Member of index 1
2573 //     B = Pic[i+2];           // Member of index 2
2574 //     ... // do something to R, G, B
2575 //   }
2576 // To:
2577 //   %wide.vec = load <12 x i32>                       ; Read 4 tuples of R,G,B
2578 //   %R.vec = shuffle %wide.vec, poison, <0, 3, 6, 9>   ; R elements
2579 //   %G.vec = shuffle %wide.vec, poison, <1, 4, 7, 10>  ; G elements
2580 //   %B.vec = shuffle %wide.vec, poison, <2, 5, 8, 11>  ; B elements
2581 //
2582 // Or translate following interleaved store group (factor = 3):
2583 //   for (i = 0; i < N; i+=3) {
2584 //     ... do something to R, G, B
2585 //     Pic[i]   = R;           // Member of index 0
2586 //     Pic[i+1] = G;           // Member of index 1
2587 //     Pic[i+2] = B;           // Member of index 2
2588 //   }
2589 // To:
2590 //   %R_G.vec = shuffle %R.vec, %G.vec, <0, 1, 2, ..., 7>
2591 //   %B_U.vec = shuffle %B.vec, poison, <0, 1, 2, 3, u, u, u, u>
2592 //   %interleaved.vec = shuffle %R_G.vec, %B_U.vec,
2593 //        <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11>    ; Interleave R,G,B elements
2594 //   store <12 x i32> %interleaved.vec              ; Write 4 tuples of R,G,B
2595 void VPInterleaveRecipe::execute(VPTransformState &State) {
2596   assert(!State.Lane && "Interleave group being replicated.");
2597   const InterleaveGroup<Instruction> *Group = IG;
2598   Instruction *Instr = Group->getInsertPos();
2599 
2600   // Prepare for the vector type of the interleaved load/store.
2601   Type *ScalarTy = getLoadStoreType(Instr);
2602   unsigned InterleaveFactor = Group->getFactor();
2603   auto *VecTy = VectorType::get(ScalarTy, State.VF * InterleaveFactor);
2604 
2605   // TODO: extend the masked interleaved-group support to reversed access.
2606   VPValue *BlockInMask = getMask();
2607   assert((!BlockInMask || !Group->isReverse()) &&
2608          "Reversed masked interleave-group not supported.");
2609 
2610   VPValue *Addr = getAddr();
2611   Value *ResAddr = State.get(Addr, VPLane(0));
2612   if (auto *I = dyn_cast<Instruction>(ResAddr))
2613     State.setDebugLocFrom(I->getDebugLoc());
2614 
2615   // If the group is reverse, adjust the index to refer to the last vector lane
2616   // instead of the first. We adjust the index from the first vector lane,
2617   // rather than directly getting the pointer for lane VF - 1, because the
2618   // pointer operand of the interleaved access is supposed to be uniform.
2619   if (Group->isReverse()) {
2620     Value *RuntimeVF =
2621         getRuntimeVF(State.Builder, State.Builder.getInt32Ty(), State.VF);
2622     Value *Index =
2623         State.Builder.CreateSub(RuntimeVF, State.Builder.getInt32(1));
2624     Index = State.Builder.CreateMul(Index,
2625                                     State.Builder.getInt32(Group->getFactor()));
2626     Index = State.Builder.CreateNeg(Index);
2627 
2628     bool InBounds = false;
2629     if (auto *Gep = dyn_cast<GetElementPtrInst>(ResAddr->stripPointerCasts()))
2630       InBounds = Gep->isInBounds();
2631     ResAddr = State.Builder.CreateGEP(ScalarTy, ResAddr, Index, "", InBounds);
2632   }
2633 
2634   State.setDebugLocFrom(Instr->getDebugLoc());
2635   Value *PoisonVec = PoisonValue::get(VecTy);
2636 
2637   auto CreateGroupMask = [&BlockInMask, &State,
2638                           &InterleaveFactor](Value *MaskForGaps) -> Value * {
2639     if (State.VF.isScalable()) {
2640       assert(!MaskForGaps && "Interleaved groups with gaps are not supported.");
2641       assert(InterleaveFactor == 2 &&
2642              "Unsupported deinterleave factor for scalable vectors");
2643       auto *ResBlockInMask = State.get(BlockInMask);
2644       SmallVector<Value *, 2> Ops = {ResBlockInMask, ResBlockInMask};
2645       auto *MaskTy = VectorType::get(State.Builder.getInt1Ty(),
2646                                      State.VF.getKnownMinValue() * 2, true);
2647       return State.Builder.CreateIntrinsic(
2648           MaskTy, Intrinsic::vector_interleave2, Ops,
2649           /*FMFSource=*/nullptr, "interleaved.mask");
2650     }
2651 
2652     if (!BlockInMask)
2653       return MaskForGaps;
2654 
2655     Value *ResBlockInMask = State.get(BlockInMask);
2656     Value *ShuffledMask = State.Builder.CreateShuffleVector(
2657         ResBlockInMask,
2658         createReplicatedMask(InterleaveFactor, State.VF.getKnownMinValue()),
2659         "interleaved.mask");
2660     return MaskForGaps ? State.Builder.CreateBinOp(Instruction::And,
2661                                                    ShuffledMask, MaskForGaps)
2662                        : ShuffledMask;
2663   };
2664 
2665   const DataLayout &DL = Instr->getDataLayout();
2666   // Vectorize the interleaved load group.
2667   if (isa<LoadInst>(Instr)) {
2668     Value *MaskForGaps = nullptr;
2669     if (NeedsMaskForGaps) {
2670       MaskForGaps = createBitMaskForGaps(State.Builder,
2671                                          State.VF.getKnownMinValue(), *Group);
2672       assert(MaskForGaps && "Mask for Gaps is required but it is null");
2673     }
2674 
2675     Instruction *NewLoad;
2676     if (BlockInMask || MaskForGaps) {
2677       Value *GroupMask = CreateGroupMask(MaskForGaps);
2678       NewLoad = State.Builder.CreateMaskedLoad(VecTy, ResAddr,
2679                                                Group->getAlign(), GroupMask,
2680                                                PoisonVec, "wide.masked.vec");
2681     } else
2682       NewLoad = State.Builder.CreateAlignedLoad(VecTy, ResAddr,
2683                                                 Group->getAlign(), "wide.vec");
2684     Group->addMetadata(NewLoad);
2685 
2686     ArrayRef<VPValue *> VPDefs = definedValues();
2687     const DataLayout &DL = State.CFG.PrevBB->getDataLayout();
2688     if (VecTy->isScalableTy()) {
2689       assert(InterleaveFactor == 2 &&
2690              "Unsupported deinterleave factor for scalable vectors");
2691 
2692         // Scalable vectors cannot use arbitrary shufflevectors (only splats),
2693         // so must use intrinsics to deinterleave.
2694       Value *DI = State.Builder.CreateIntrinsic(
2695           Intrinsic::vector_deinterleave2, VecTy, NewLoad,
2696           /*FMFSource=*/nullptr, "strided.vec");
2697       unsigned J = 0;
2698       for (unsigned I = 0; I < InterleaveFactor; ++I) {
2699         Instruction *Member = Group->getMember(I);
2700 
2701         if (!Member)
2702           continue;
2703 
2704         Value *StridedVec = State.Builder.CreateExtractValue(DI, I);
2705         // If this member has different type, cast the result type.
2706         if (Member->getType() != ScalarTy) {
2707           VectorType *OtherVTy = VectorType::get(Member->getType(), State.VF);
2708           StridedVec =
2709               createBitOrPointerCast(State.Builder, StridedVec, OtherVTy, DL);
2710         }
2711 
2712         if (Group->isReverse())
2713           StridedVec = State.Builder.CreateVectorReverse(StridedVec, "reverse");
2714 
2715         State.set(VPDefs[J], StridedVec);
2716         ++J;
2717       }
2718 
2719       return;
2720     }
2721 
2722     // For each member in the group, shuffle out the appropriate data from the
2723     // wide loads.
2724     unsigned J = 0;
2725     for (unsigned I = 0; I < InterleaveFactor; ++I) {
2726       Instruction *Member = Group->getMember(I);
2727 
2728       // Skip the gaps in the group.
2729       if (!Member)
2730         continue;
2731 
2732       auto StrideMask =
2733           createStrideMask(I, InterleaveFactor, State.VF.getKnownMinValue());
2734       Value *StridedVec =
2735           State.Builder.CreateShuffleVector(NewLoad, StrideMask, "strided.vec");
2736 
2737       // If this member has different type, cast the result type.
2738       if (Member->getType() != ScalarTy) {
2739         assert(!State.VF.isScalable() && "VF is assumed to be non scalable.");
2740         VectorType *OtherVTy = VectorType::get(Member->getType(), State.VF);
2741         StridedVec =
2742             createBitOrPointerCast(State.Builder, StridedVec, OtherVTy, DL);
2743       }
2744 
2745       if (Group->isReverse())
2746         StridedVec = State.Builder.CreateVectorReverse(StridedVec, "reverse");
2747 
2748       State.set(VPDefs[J], StridedVec);
2749       ++J;
2750     }
2751     return;
2752   }
2753 
2754   // The sub vector type for current instruction.
2755   auto *SubVT = VectorType::get(ScalarTy, State.VF);
2756 
2757   // Vectorize the interleaved store group.
2758   Value *MaskForGaps =
2759       createBitMaskForGaps(State.Builder, State.VF.getKnownMinValue(), *Group);
2760   assert((!MaskForGaps || !State.VF.isScalable()) &&
2761          "masking gaps for scalable vectors is not yet supported.");
2762   ArrayRef<VPValue *> StoredValues = getStoredValues();
2763   // Collect the stored vector from each member.
2764   SmallVector<Value *, 4> StoredVecs;
2765   unsigned StoredIdx = 0;
2766   for (unsigned i = 0; i < InterleaveFactor; i++) {
2767     assert((Group->getMember(i) || MaskForGaps) &&
2768            "Fail to get a member from an interleaved store group");
2769     Instruction *Member = Group->getMember(i);
2770 
2771     // Skip the gaps in the group.
2772     if (!Member) {
2773       Value *Undef = PoisonValue::get(SubVT);
2774       StoredVecs.push_back(Undef);
2775       continue;
2776     }
2777 
2778     Value *StoredVec = State.get(StoredValues[StoredIdx]);
2779     ++StoredIdx;
2780 
2781     if (Group->isReverse())
2782       StoredVec = State.Builder.CreateVectorReverse(StoredVec, "reverse");
2783 
2784     // If this member has different type, cast it to a unified type.
2785 
2786     if (StoredVec->getType() != SubVT)
2787       StoredVec = createBitOrPointerCast(State.Builder, StoredVec, SubVT, DL);
2788 
2789     StoredVecs.push_back(StoredVec);
2790   }
2791 
2792   // Interleave all the smaller vectors into one wider vector.
2793   Value *IVec = interleaveVectors(State.Builder, StoredVecs, "interleaved.vec");
2794   Instruction *NewStoreInstr;
2795   if (BlockInMask || MaskForGaps) {
2796     Value *GroupMask = CreateGroupMask(MaskForGaps);
2797     NewStoreInstr = State.Builder.CreateMaskedStore(
2798         IVec, ResAddr, Group->getAlign(), GroupMask);
2799   } else
2800     NewStoreInstr =
2801         State.Builder.CreateAlignedStore(IVec, ResAddr, Group->getAlign());
2802 
2803   Group->addMetadata(NewStoreInstr);
2804 }
2805 
2806 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2807 void VPInterleaveRecipe::print(raw_ostream &O, const Twine &Indent,
2808                                VPSlotTracker &SlotTracker) const {
2809   O << Indent << "INTERLEAVE-GROUP with factor " << IG->getFactor() << " at ";
2810   IG->getInsertPos()->printAsOperand(O, false);
2811   O << ", ";
2812   getAddr()->printAsOperand(O, SlotTracker);
2813   VPValue *Mask = getMask();
2814   if (Mask) {
2815     O << ", ";
2816     Mask->printAsOperand(O, SlotTracker);
2817   }
2818 
2819   unsigned OpIdx = 0;
2820   for (unsigned i = 0; i < IG->getFactor(); ++i) {
2821     if (!IG->getMember(i))
2822       continue;
2823     if (getNumStoreOperands() > 0) {
2824       O << "\n" << Indent << "  store ";
2825       getOperand(1 + OpIdx)->printAsOperand(O, SlotTracker);
2826       O << " to index " << i;
2827     } else {
2828       O << "\n" << Indent << "  ";
2829       getVPValue(OpIdx)->printAsOperand(O, SlotTracker);
2830       O << " = load from index " << i;
2831     }
2832     ++OpIdx;
2833   }
2834 }
2835 #endif
2836 
2837 void VPCanonicalIVPHIRecipe::execute(VPTransformState &State) {
2838   Value *Start = getStartValue()->getLiveInIRValue();
2839   PHINode *Phi = PHINode::Create(Start->getType(), 2, "index");
2840   Phi->insertBefore(State.CFG.PrevBB->getFirstInsertionPt());
2841 
2842   BasicBlock *VectorPH = State.CFG.getPreheaderBBFor(this);
2843   Phi->addIncoming(Start, VectorPH);
2844   Phi->setDebugLoc(getDebugLoc());
2845   State.set(this, Phi, /*IsScalar*/ true);
2846 }
2847 
2848 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2849 void VPCanonicalIVPHIRecipe::print(raw_ostream &O, const Twine &Indent,
2850                                    VPSlotTracker &SlotTracker) const {
2851   O << Indent << "EMIT ";
2852   printAsOperand(O, SlotTracker);
2853   O << " = CANONICAL-INDUCTION ";
2854   printOperands(O, SlotTracker);
2855 }
2856 #endif
2857 
2858 bool VPCanonicalIVPHIRecipe::isCanonical(
2859     InductionDescriptor::InductionKind Kind, VPValue *Start,
2860     VPValue *Step) const {
2861   // Must be an integer induction.
2862   if (Kind != InductionDescriptor::IK_IntInduction)
2863     return false;
2864   // Start must match the start value of this canonical induction.
2865   if (Start != getStartValue())
2866     return false;
2867 
2868   // If the step is defined by a recipe, it is not a ConstantInt.
2869   if (Step->getDefiningRecipe())
2870     return false;
2871 
2872   ConstantInt *StepC = dyn_cast<ConstantInt>(Step->getLiveInIRValue());
2873   return StepC && StepC->isOne();
2874 }
2875 
2876 bool VPWidenPointerInductionRecipe::onlyScalarsGenerated(bool IsScalable) {
2877   return IsScalarAfterVectorization &&
2878          (!IsScalable || vputils::onlyFirstLaneUsed(this));
2879 }
2880 
2881 void VPWidenPointerInductionRecipe::execute(VPTransformState &State) {
2882   assert(IndDesc.getKind() == InductionDescriptor::IK_PtrInduction &&
2883          "Not a pointer induction according to InductionDescriptor!");
2884   assert(cast<PHINode>(getUnderlyingInstr())->getType()->isPointerTy() &&
2885          "Unexpected type.");
2886   assert(!onlyScalarsGenerated(State.VF.isScalable()) &&
2887          "Recipe should have been replaced");
2888 
2889   auto *IVR = getParent()->getPlan()->getCanonicalIV();
2890   PHINode *CanonicalIV = cast<PHINode>(State.get(IVR, /*IsScalar*/ true));
2891   unsigned CurrentPart = getUnrollPart(*this);
2892 
2893   // Build a pointer phi
2894   Value *ScalarStartValue = getStartValue()->getLiveInIRValue();
2895   Type *ScStValueType = ScalarStartValue->getType();
2896 
2897   BasicBlock *VectorPH = State.CFG.getPreheaderBBFor(this);
2898   PHINode *NewPointerPhi = nullptr;
2899   if (CurrentPart == 0) {
2900     NewPointerPhi = PHINode::Create(ScStValueType, 2, "pointer.phi",
2901                                     CanonicalIV->getIterator());
2902     NewPointerPhi->addIncoming(ScalarStartValue, VectorPH);
2903   } else {
2904     // The recipe has been unrolled. In that case, fetch the single pointer phi
2905     // shared among all unrolled parts of the recipe.
2906     auto *GEP =
2907         cast<GetElementPtrInst>(State.get(getFirstUnrolledPartOperand()));
2908     NewPointerPhi = cast<PHINode>(GEP->getPointerOperand());
2909   }
2910 
2911   // A pointer induction, performed by using a gep
2912   BasicBlock::iterator InductionLoc = State.Builder.GetInsertPoint();
2913   Value *ScalarStepValue = State.get(getOperand(1), VPLane(0));
2914   Type *PhiType = IndDesc.getStep()->getType();
2915   Value *RuntimeVF = getRuntimeVF(State.Builder, PhiType, State.VF);
2916   // Add induction update using an incorrect block temporarily. The phi node
2917   // will be fixed after VPlan execution. Note that at this point the latch
2918   // block cannot be used, as it does not exist yet.
2919   // TODO: Model increment value in VPlan, by turning the recipe into a
2920   // multi-def and a subclass of VPHeaderPHIRecipe.
2921   if (CurrentPart == 0) {
2922     // The recipe represents the first part of the pointer induction. Create the
2923     // GEP to increment the phi across all unrolled parts.
2924     unsigned UF = CurrentPart == 0 ? getParent()->getPlan()->getUF() : 1;
2925     Value *NumUnrolledElems =
2926         State.Builder.CreateMul(RuntimeVF, ConstantInt::get(PhiType, UF));
2927 
2928     Value *InductionGEP = GetElementPtrInst::Create(
2929         State.Builder.getInt8Ty(), NewPointerPhi,
2930         State.Builder.CreateMul(ScalarStepValue, NumUnrolledElems), "ptr.ind",
2931         InductionLoc);
2932 
2933     NewPointerPhi->addIncoming(InductionGEP, VectorPH);
2934   }
2935 
2936   // Create actual address geps that use the pointer phi as base and a
2937   // vectorized version of the step value (<step*0, ..., step*N>) as offset.
2938   Type *VecPhiType = VectorType::get(PhiType, State.VF);
2939   Value *StartOffsetScalar = State.Builder.CreateMul(
2940       RuntimeVF, ConstantInt::get(PhiType, CurrentPart));
2941   Value *StartOffset =
2942       State.Builder.CreateVectorSplat(State.VF, StartOffsetScalar);
2943   // Create a vector of consecutive numbers from zero to VF.
2944   StartOffset = State.Builder.CreateAdd(
2945       StartOffset, State.Builder.CreateStepVector(VecPhiType));
2946 
2947   assert(ScalarStepValue == State.get(getOperand(1), VPLane(0)) &&
2948          "scalar step must be the same across all parts");
2949   Value *GEP = State.Builder.CreateGEP(
2950       State.Builder.getInt8Ty(), NewPointerPhi,
2951       State.Builder.CreateMul(StartOffset, State.Builder.CreateVectorSplat(
2952                                                State.VF, ScalarStepValue)),
2953       "vector.gep");
2954   State.set(this, GEP);
2955 }
2956 
2957 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2958 void VPWidenPointerInductionRecipe::print(raw_ostream &O, const Twine &Indent,
2959                                           VPSlotTracker &SlotTracker) const {
2960   assert((getNumOperands() == 2 || getNumOperands() == 4) &&
2961          "unexpected number of operands");
2962   O << Indent << "EMIT ";
2963   printAsOperand(O, SlotTracker);
2964   O << " = WIDEN-POINTER-INDUCTION ";
2965   getStartValue()->printAsOperand(O, SlotTracker);
2966   O << ", " << *IndDesc.getStep();
2967   if (getNumOperands() == 4) {
2968     O << ", ";
2969     getOperand(2)->printAsOperand(O, SlotTracker);
2970     O << ", ";
2971     getOperand(3)->printAsOperand(O, SlotTracker);
2972   }
2973 }
2974 #endif
2975 
2976 void VPExpandSCEVRecipe::execute(VPTransformState &State) {
2977   assert(!State.Lane && "cannot be used in per-lane");
2978   const DataLayout &DL = State.CFG.PrevBB->getDataLayout();
2979   SCEVExpander Exp(SE, DL, "induction");
2980 
2981   Value *Res = Exp.expandCodeFor(Expr, Expr->getType(),
2982                                  &*State.Builder.GetInsertPoint());
2983   assert(!State.ExpandedSCEVs.contains(Expr) &&
2984          "Same SCEV expanded multiple times");
2985   State.ExpandedSCEVs[Expr] = Res;
2986   State.set(this, Res, VPLane(0));
2987 }
2988 
2989 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2990 void VPExpandSCEVRecipe::print(raw_ostream &O, const Twine &Indent,
2991                                VPSlotTracker &SlotTracker) const {
2992   O << Indent << "EMIT ";
2993   printAsOperand(O, SlotTracker);
2994   O << " = EXPAND SCEV " << *Expr;
2995 }
2996 #endif
2997 
2998 void VPWidenCanonicalIVRecipe::execute(VPTransformState &State) {
2999   Value *CanonicalIV = State.get(getOperand(0), /*IsScalar*/ true);
3000   Type *STy = CanonicalIV->getType();
3001   IRBuilder<> Builder(State.CFG.PrevBB->getTerminator());
3002   ElementCount VF = State.VF;
3003   Value *VStart = VF.isScalar()
3004                       ? CanonicalIV
3005                       : Builder.CreateVectorSplat(VF, CanonicalIV, "broadcast");
3006   Value *VStep = createStepForVF(Builder, STy, VF, getUnrollPart(*this));
3007   if (VF.isVector()) {
3008     VStep = Builder.CreateVectorSplat(VF, VStep);
3009     VStep =
3010         Builder.CreateAdd(VStep, Builder.CreateStepVector(VStep->getType()));
3011   }
3012   Value *CanonicalVectorIV = Builder.CreateAdd(VStart, VStep, "vec.iv");
3013   State.set(this, CanonicalVectorIV);
3014 }
3015 
3016 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
3017 void VPWidenCanonicalIVRecipe::print(raw_ostream &O, const Twine &Indent,
3018                                      VPSlotTracker &SlotTracker) const {
3019   O << Indent << "EMIT ";
3020   printAsOperand(O, SlotTracker);
3021   O << " = WIDEN-CANONICAL-INDUCTION ";
3022   printOperands(O, SlotTracker);
3023 }
3024 #endif
3025 
3026 void VPFirstOrderRecurrencePHIRecipe::execute(VPTransformState &State) {
3027   auto &Builder = State.Builder;
3028   // Create a vector from the initial value.
3029   auto *VectorInit = getStartValue()->getLiveInIRValue();
3030 
3031   Type *VecTy = State.VF.isScalar()
3032                     ? VectorInit->getType()
3033                     : VectorType::get(VectorInit->getType(), State.VF);
3034 
3035   BasicBlock *VectorPH = State.CFG.getPreheaderBBFor(this);
3036   if (State.VF.isVector()) {
3037     auto *IdxTy = Builder.getInt32Ty();
3038     auto *One = ConstantInt::get(IdxTy, 1);
3039     IRBuilder<>::InsertPointGuard Guard(Builder);
3040     Builder.SetInsertPoint(VectorPH->getTerminator());
3041     auto *RuntimeVF = getRuntimeVF(Builder, IdxTy, State.VF);
3042     auto *LastIdx = Builder.CreateSub(RuntimeVF, One);
3043     VectorInit = Builder.CreateInsertElement(
3044         PoisonValue::get(VecTy), VectorInit, LastIdx, "vector.recur.init");
3045   }
3046 
3047   // Create a phi node for the new recurrence.
3048   PHINode *Phi = PHINode::Create(VecTy, 2, "vector.recur");
3049   Phi->insertBefore(State.CFG.PrevBB->getFirstInsertionPt());
3050   Phi->addIncoming(VectorInit, VectorPH);
3051   State.set(this, Phi);
3052 }
3053 
3054 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
3055 void VPFirstOrderRecurrencePHIRecipe::print(raw_ostream &O, const Twine &Indent,
3056                                             VPSlotTracker &SlotTracker) const {
3057   O << Indent << "FIRST-ORDER-RECURRENCE-PHI ";
3058   printAsOperand(O, SlotTracker);
3059   O << " = phi ";
3060   printOperands(O, SlotTracker);
3061 }
3062 #endif
3063 
3064 void VPReductionPHIRecipe::execute(VPTransformState &State) {
3065   auto &Builder = State.Builder;
3066 
3067   // Reductions do not have to start at zero. They can start with
3068   // any loop invariant values.
3069   VPValue *StartVPV = getStartValue();
3070   Value *StartV = StartVPV->getLiveInIRValue();
3071 
3072   // In order to support recurrences we need to be able to vectorize Phi nodes.
3073   // Phi nodes have cycles, so we need to vectorize them in two stages. This is
3074   // stage #1: We create a new vector PHI node with no incoming edges. We'll use
3075   // this value when we vectorize all of the instructions that use the PHI.
3076   bool ScalarPHI = State.VF.isScalar() || IsInLoop;
3077   Type *VecTy = ScalarPHI ? StartV->getType()
3078                           : VectorType::get(StartV->getType(), State.VF);
3079 
3080   BasicBlock *HeaderBB = State.CFG.PrevBB;
3081   assert(State.CurrentVectorLoop->getHeader() == HeaderBB &&
3082          "recipe must be in the vector loop header");
3083   auto *Phi = PHINode::Create(VecTy, 2, "vec.phi");
3084   Phi->insertBefore(HeaderBB->getFirstInsertionPt());
3085   State.set(this, Phi, IsInLoop);
3086 
3087   BasicBlock *VectorPH = State.CFG.getPreheaderBBFor(this);
3088 
3089   Value *Iden = nullptr;
3090   RecurKind RK = RdxDesc.getRecurrenceKind();
3091   unsigned CurrentPart = getUnrollPart(*this);
3092 
3093   if (RecurrenceDescriptor::isMinMaxRecurrenceKind(RK) ||
3094       RecurrenceDescriptor::isAnyOfRecurrenceKind(RK)) {
3095     // MinMax and AnyOf reductions have the start value as their identity.
3096     if (ScalarPHI) {
3097       Iden = StartV;
3098     } else {
3099       IRBuilderBase::InsertPointGuard IPBuilder(Builder);
3100       Builder.SetInsertPoint(VectorPH->getTerminator());
3101       StartV = Iden = State.get(StartVPV);
3102     }
3103   } else {
3104     Iden = llvm::getRecurrenceIdentity(RK, VecTy->getScalarType(),
3105                                        RdxDesc.getFastMathFlags());
3106 
3107     if (!ScalarPHI) {
3108       if (CurrentPart == 0) {
3109         // Create start and identity vector values for the reduction in the
3110         // preheader.
3111         // TODO: Introduce recipes in VPlan preheader to create initial values.
3112         Iden = Builder.CreateVectorSplat(State.VF, Iden);
3113         IRBuilderBase::InsertPointGuard IPBuilder(Builder);
3114         Builder.SetInsertPoint(VectorPH->getTerminator());
3115         Constant *Zero = Builder.getInt32(0);
3116         StartV = Builder.CreateInsertElement(Iden, StartV, Zero);
3117       } else {
3118         Iden = Builder.CreateVectorSplat(State.VF, Iden);
3119       }
3120     }
3121   }
3122 
3123   Phi = cast<PHINode>(State.get(this, IsInLoop));
3124   Value *StartVal = (CurrentPart == 0) ? StartV : Iden;
3125   Phi->addIncoming(StartVal, VectorPH);
3126 }
3127 
3128 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
3129 void VPReductionPHIRecipe::print(raw_ostream &O, const Twine &Indent,
3130                                  VPSlotTracker &SlotTracker) const {
3131   O << Indent << "WIDEN-REDUCTION-PHI ";
3132 
3133   printAsOperand(O, SlotTracker);
3134   O << " = phi ";
3135   printOperands(O, SlotTracker);
3136 }
3137 #endif
3138 
3139 void VPWidenPHIRecipe::execute(VPTransformState &State) {
3140   assert(EnableVPlanNativePath &&
3141          "Non-native vplans are not expected to have VPWidenPHIRecipes.");
3142 
3143   Value *Op0 = State.get(getOperand(0));
3144   Type *VecTy = Op0->getType();
3145   Value *VecPhi = State.Builder.CreatePHI(VecTy, 2, "vec.phi");
3146   State.set(this, VecPhi);
3147 }
3148 
3149 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
3150 void VPWidenPHIRecipe::print(raw_ostream &O, const Twine &Indent,
3151                              VPSlotTracker &SlotTracker) const {
3152   O << Indent << "WIDEN-PHI ";
3153 
3154   auto *OriginalPhi = cast<PHINode>(getUnderlyingValue());
3155   // Unless all incoming values are modeled in VPlan  print the original PHI
3156   // directly.
3157   // TODO: Remove once all VPWidenPHIRecipe instances keep all relevant incoming
3158   // values as VPValues.
3159   if (getNumOperands() != OriginalPhi->getNumOperands()) {
3160     O << VPlanIngredient(OriginalPhi);
3161     return;
3162   }
3163 
3164   printAsOperand(O, SlotTracker);
3165   O << " = phi ";
3166   printOperands(O, SlotTracker);
3167 }
3168 #endif
3169 
3170 // TODO: It would be good to use the existing VPWidenPHIRecipe instead and
3171 // remove VPActiveLaneMaskPHIRecipe.
3172 void VPActiveLaneMaskPHIRecipe::execute(VPTransformState &State) {
3173   BasicBlock *VectorPH = State.CFG.getPreheaderBBFor(this);
3174   Value *StartMask = State.get(getOperand(0));
3175   PHINode *Phi =
3176       State.Builder.CreatePHI(StartMask->getType(), 2, "active.lane.mask");
3177   Phi->addIncoming(StartMask, VectorPH);
3178   Phi->setDebugLoc(getDebugLoc());
3179   State.set(this, Phi);
3180 }
3181 
3182 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
3183 void VPActiveLaneMaskPHIRecipe::print(raw_ostream &O, const Twine &Indent,
3184                                       VPSlotTracker &SlotTracker) const {
3185   O << Indent << "ACTIVE-LANE-MASK-PHI ";
3186 
3187   printAsOperand(O, SlotTracker);
3188   O << " = phi ";
3189   printOperands(O, SlotTracker);
3190 }
3191 #endif
3192 
3193 void VPEVLBasedIVPHIRecipe::execute(VPTransformState &State) {
3194   BasicBlock *VectorPH = State.CFG.getPreheaderBBFor(this);
3195   Value *Start = State.get(getOperand(0), VPLane(0));
3196   PHINode *Phi = State.Builder.CreatePHI(Start->getType(), 2, "evl.based.iv");
3197   Phi->addIncoming(Start, VectorPH);
3198   Phi->setDebugLoc(getDebugLoc());
3199   State.set(this, Phi, /*IsScalar=*/true);
3200 }
3201 
3202 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
3203 void VPEVLBasedIVPHIRecipe::print(raw_ostream &O, const Twine &Indent,
3204                                   VPSlotTracker &SlotTracker) const {
3205   O << Indent << "EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI ";
3206 
3207   printAsOperand(O, SlotTracker);
3208   O << " = phi ";
3209   printOperands(O, SlotTracker);
3210 }
3211 #endif
3212