xref: /llvm-project/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp (revision 3ec6f805c5184b3933cba94e241b44da362437d7)
1 //===- VPlanRecipes.cpp - Implementations for VPlan recipes ---------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 ///
9 /// \file
10 /// This file contains implementations for different VPlan recipes.
11 ///
12 //===----------------------------------------------------------------------===//
13 
14 #include "VPlan.h"
15 #include "VPlanAnalysis.h"
16 #include "VPlanUtils.h"
17 #include "llvm/ADT/STLExtras.h"
18 #include "llvm/ADT/SmallVector.h"
19 #include "llvm/ADT/Twine.h"
20 #include "llvm/Analysis/IVDescriptors.h"
21 #include "llvm/IR/BasicBlock.h"
22 #include "llvm/IR/IRBuilder.h"
23 #include "llvm/IR/Instruction.h"
24 #include "llvm/IR/Instructions.h"
25 #include "llvm/IR/Intrinsics.h"
26 #include "llvm/IR/Type.h"
27 #include "llvm/IR/Value.h"
28 #include "llvm/IR/VectorBuilder.h"
29 #include "llvm/Support/Casting.h"
30 #include "llvm/Support/CommandLine.h"
31 #include "llvm/Support/Debug.h"
32 #include "llvm/Support/raw_ostream.h"
33 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
34 #include "llvm/Transforms/Utils/LoopUtils.h"
35 #include "llvm/Transforms/Utils/ScalarEvolutionExpander.h"
36 #include <cassert>
37 
38 using namespace llvm;
39 
40 using VectorParts = SmallVector<Value *, 2>;
41 
42 namespace llvm {
43 extern cl::opt<bool> EnableVPlanNativePath;
44 }
45 extern cl::opt<unsigned> ForceTargetInstructionCost;
46 
47 #define LV_NAME "loop-vectorize"
48 #define DEBUG_TYPE LV_NAME
49 
50 bool VPRecipeBase::mayWriteToMemory() const {
51   switch (getVPDefID()) {
52   case VPInstructionSC:
53     if (Instruction::isBinaryOp(cast<VPInstruction>(this)->getOpcode()))
54       return false;
55     switch (cast<VPInstruction>(this)->getOpcode()) {
56     case Instruction::Or:
57     case Instruction::ICmp:
58     case Instruction::Select:
59     case VPInstruction::Not:
60     case VPInstruction::CalculateTripCountMinusVF:
61     case VPInstruction::CanonicalIVIncrementForPart:
62     case VPInstruction::ExtractFromEnd:
63     case VPInstruction::FirstOrderRecurrenceSplice:
64     case VPInstruction::LogicalAnd:
65     case VPInstruction::PtrAdd:
66       return false;
67     default:
68       return true;
69     }
70   case VPInterleaveSC:
71     return cast<VPInterleaveRecipe>(this)->getNumStoreOperands() > 0;
72   case VPWidenStoreEVLSC:
73   case VPWidenStoreSC:
74     return true;
75   case VPReplicateSC:
76     return cast<Instruction>(getVPSingleValue()->getUnderlyingValue())
77         ->mayWriteToMemory();
78   case VPWidenCallSC:
79     return !cast<VPWidenCallRecipe>(this)
80                 ->getCalledScalarFunction()
81                 ->onlyReadsMemory();
82   case VPBranchOnMaskSC:
83   case VPScalarIVStepsSC:
84   case VPPredInstPHISC:
85     return false;
86   case VPBlendSC:
87   case VPReductionEVLSC:
88   case VPReductionSC:
89   case VPWidenCanonicalIVSC:
90   case VPWidenCastSC:
91   case VPWidenGEPSC:
92   case VPWidenIntOrFpInductionSC:
93   case VPWidenLoadEVLSC:
94   case VPWidenLoadSC:
95   case VPWidenPHISC:
96   case VPWidenSC:
97   case VPWidenEVLSC:
98   case VPWidenSelectSC: {
99     const Instruction *I =
100         dyn_cast_or_null<Instruction>(getVPSingleValue()->getUnderlyingValue());
101     (void)I;
102     assert((!I || !I->mayWriteToMemory()) &&
103            "underlying instruction may write to memory");
104     return false;
105   }
106   default:
107     return true;
108   }
109 }
110 
111 bool VPRecipeBase::mayReadFromMemory() const {
112   switch (getVPDefID()) {
113   case VPWidenLoadEVLSC:
114   case VPWidenLoadSC:
115     return true;
116   case VPReplicateSC:
117     return cast<Instruction>(getVPSingleValue()->getUnderlyingValue())
118         ->mayReadFromMemory();
119   case VPWidenCallSC:
120     return !cast<VPWidenCallRecipe>(this)
121                 ->getCalledScalarFunction()
122                 ->onlyWritesMemory();
123   case VPBranchOnMaskSC:
124   case VPPredInstPHISC:
125   case VPScalarIVStepsSC:
126   case VPWidenStoreEVLSC:
127   case VPWidenStoreSC:
128     return false;
129   case VPBlendSC:
130   case VPReductionEVLSC:
131   case VPReductionSC:
132   case VPWidenCanonicalIVSC:
133   case VPWidenCastSC:
134   case VPWidenGEPSC:
135   case VPWidenIntOrFpInductionSC:
136   case VPWidenPHISC:
137   case VPWidenSC:
138   case VPWidenEVLSC:
139   case VPWidenSelectSC: {
140     const Instruction *I =
141         dyn_cast_or_null<Instruction>(getVPSingleValue()->getUnderlyingValue());
142     (void)I;
143     assert((!I || !I->mayReadFromMemory()) &&
144            "underlying instruction may read from memory");
145     return false;
146   }
147   default:
148     return true;
149   }
150 }
151 
152 bool VPRecipeBase::mayHaveSideEffects() const {
153   switch (getVPDefID()) {
154   case VPDerivedIVSC:
155   case VPPredInstPHISC:
156   case VPScalarCastSC:
157     return false;
158   case VPInstructionSC:
159     return mayWriteToMemory();
160   case VPWidenCallSC: {
161     Function *Fn = cast<VPWidenCallRecipe>(this)->getCalledScalarFunction();
162     return mayWriteToMemory() || !Fn->doesNotThrow() || !Fn->willReturn();
163   }
164   case VPBlendSC:
165   case VPReductionEVLSC:
166   case VPReductionSC:
167   case VPScalarIVStepsSC:
168   case VPWidenCanonicalIVSC:
169   case VPWidenCastSC:
170   case VPWidenGEPSC:
171   case VPWidenIntOrFpInductionSC:
172   case VPWidenPHISC:
173   case VPWidenPointerInductionSC:
174   case VPWidenSC:
175   case VPWidenEVLSC:
176   case VPWidenSelectSC: {
177     const Instruction *I =
178         dyn_cast_or_null<Instruction>(getVPSingleValue()->getUnderlyingValue());
179     (void)I;
180     assert((!I || !I->mayHaveSideEffects()) &&
181            "underlying instruction has side-effects");
182     return false;
183   }
184   case VPInterleaveSC:
185     return mayWriteToMemory();
186   case VPWidenLoadEVLSC:
187   case VPWidenLoadSC:
188   case VPWidenStoreEVLSC:
189   case VPWidenStoreSC:
190     assert(
191         cast<VPWidenMemoryRecipe>(this)->getIngredient().mayHaveSideEffects() ==
192             mayWriteToMemory() &&
193         "mayHaveSideffects result for ingredient differs from this "
194         "implementation");
195     return mayWriteToMemory();
196   case VPReplicateSC: {
197     auto *R = cast<VPReplicateRecipe>(this);
198     return R->getUnderlyingInstr()->mayHaveSideEffects();
199   }
200   default:
201     return true;
202   }
203 }
204 
205 void VPLiveOut::fixPhi(VPlan &Plan, VPTransformState &State) {
206   VPValue *ExitValue = getOperand(0);
207   VPBasicBlock *MiddleVPBB =
208       cast<VPBasicBlock>(Plan.getVectorLoopRegion()->getSingleSuccessor());
209   VPRecipeBase *ExitingRecipe = ExitValue->getDefiningRecipe();
210   auto *ExitingVPBB = ExitingRecipe ? ExitingRecipe->getParent() : nullptr;
211   // Values leaving the vector loop reach live out phi's in the exiting block
212   // via middle block.
213   auto *PredVPBB = !ExitingVPBB || ExitingVPBB->getEnclosingLoopRegion()
214                        ? MiddleVPBB
215                        : ExitingVPBB;
216   BasicBlock *PredBB = State.CFG.VPBB2IRBB[PredVPBB];
217   Value *V = State.get(ExitValue, VPLane(0));
218   if (Phi->getBasicBlockIndex(PredBB) != -1)
219     Phi->setIncomingValueForBlock(PredBB, V);
220   else
221     Phi->addIncoming(V, PredBB);
222 }
223 
224 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
225 void VPLiveOut::print(raw_ostream &O, VPSlotTracker &SlotTracker) const {
226   O << "Live-out ";
227   getPhi()->printAsOperand(O);
228   O << " = ";
229   getOperand(0)->printAsOperand(O, SlotTracker);
230   O << "\n";
231 }
232 #endif
233 
234 void VPRecipeBase::insertBefore(VPRecipeBase *InsertPos) {
235   assert(!Parent && "Recipe already in some VPBasicBlock");
236   assert(InsertPos->getParent() &&
237          "Insertion position not in any VPBasicBlock");
238   InsertPos->getParent()->insert(this, InsertPos->getIterator());
239 }
240 
241 void VPRecipeBase::insertBefore(VPBasicBlock &BB,
242                                 iplist<VPRecipeBase>::iterator I) {
243   assert(!Parent && "Recipe already in some VPBasicBlock");
244   assert(I == BB.end() || I->getParent() == &BB);
245   BB.insert(this, I);
246 }
247 
248 void VPRecipeBase::insertAfter(VPRecipeBase *InsertPos) {
249   assert(!Parent && "Recipe already in some VPBasicBlock");
250   assert(InsertPos->getParent() &&
251          "Insertion position not in any VPBasicBlock");
252   InsertPos->getParent()->insert(this, std::next(InsertPos->getIterator()));
253 }
254 
255 void VPRecipeBase::removeFromParent() {
256   assert(getParent() && "Recipe not in any VPBasicBlock");
257   getParent()->getRecipeList().remove(getIterator());
258   Parent = nullptr;
259 }
260 
261 iplist<VPRecipeBase>::iterator VPRecipeBase::eraseFromParent() {
262   assert(getParent() && "Recipe not in any VPBasicBlock");
263   return getParent()->getRecipeList().erase(getIterator());
264 }
265 
266 void VPRecipeBase::moveAfter(VPRecipeBase *InsertPos) {
267   removeFromParent();
268   insertAfter(InsertPos);
269 }
270 
271 void VPRecipeBase::moveBefore(VPBasicBlock &BB,
272                               iplist<VPRecipeBase>::iterator I) {
273   removeFromParent();
274   insertBefore(BB, I);
275 }
276 
277 /// Return the underlying instruction to be used for computing \p R's cost via
278 /// the legacy cost model. Return nullptr if there's no suitable instruction.
279 static Instruction *getInstructionForCost(const VPRecipeBase *R) {
280   if (auto *S = dyn_cast<VPSingleDefRecipe>(R))
281     return dyn_cast_or_null<Instruction>(S->getUnderlyingValue());
282   if (auto *IG = dyn_cast<VPInterleaveRecipe>(R))
283     return IG->getInsertPos();
284   // Currently the legacy cost model only calculates the instruction cost with
285   // underlying instruction. Removing the WidenMem here will prevent
286   // force-target-instruction-cost overwriting the cost of recipe with
287   // underlying instruction which is inconsistent with the legacy model.
288   // TODO: Remove WidenMem from this function when we don't need to compare to
289   // the legacy model.
290   if (auto *WidenMem = dyn_cast<VPWidenMemoryRecipe>(R))
291     return &WidenMem->getIngredient();
292   return nullptr;
293 }
294 
295 InstructionCost VPRecipeBase::cost(ElementCount VF, VPCostContext &Ctx) {
296   auto *UI = getInstructionForCost(this);
297   if (UI && Ctx.skipCostComputation(UI, VF.isVector()))
298     return 0;
299 
300   InstructionCost RecipeCost = computeCost(VF, Ctx);
301   if (UI && ForceTargetInstructionCost.getNumOccurrences() > 0 &&
302       RecipeCost.isValid())
303     RecipeCost = InstructionCost(ForceTargetInstructionCost);
304 
305   LLVM_DEBUG({
306     dbgs() << "Cost of " << RecipeCost << " for VF " << VF << ": ";
307     dump();
308   });
309   return RecipeCost;
310 }
311 
312 InstructionCost VPRecipeBase::computeCost(ElementCount VF,
313                                           VPCostContext &Ctx) const {
314   // Compute the cost for the recipe falling back to the legacy cost model using
315   // the underlying instruction. If there is no underlying instruction, returns
316   // 0.
317   Instruction *UI = getInstructionForCost(this);
318   if (UI && isa<VPReplicateRecipe>(this)) {
319     // VPReplicateRecipe may be cloned as part of an existing VPlan-to-VPlan
320     // transform, avoid computing their cost multiple times for now.
321     Ctx.SkipCostComputation.insert(UI);
322   }
323   return UI ? Ctx.getLegacyCost(UI, VF) : 0;
324 }
325 
326 FastMathFlags VPRecipeWithIRFlags::getFastMathFlags() const {
327   assert(OpType == OperationType::FPMathOp &&
328          "recipe doesn't have fast math flags");
329   FastMathFlags Res;
330   Res.setAllowReassoc(FMFs.AllowReassoc);
331   Res.setNoNaNs(FMFs.NoNaNs);
332   Res.setNoInfs(FMFs.NoInfs);
333   Res.setNoSignedZeros(FMFs.NoSignedZeros);
334   Res.setAllowReciprocal(FMFs.AllowReciprocal);
335   Res.setAllowContract(FMFs.AllowContract);
336   Res.setApproxFunc(FMFs.ApproxFunc);
337   return Res;
338 }
339 
340 template <unsigned PartOpIdx>
341 VPValue *
342 VPUnrollPartAccessor<PartOpIdx>::getUnrollPartOperand(VPUser &U) const {
343   if (U.getNumOperands() == PartOpIdx + 1)
344     return U.getOperand(PartOpIdx);
345   return nullptr;
346 }
347 
348 template <unsigned PartOpIdx>
349 unsigned VPUnrollPartAccessor<PartOpIdx>::getUnrollPart(VPUser &U) const {
350   if (auto *UnrollPartOp = getUnrollPartOperand(U))
351     return cast<ConstantInt>(UnrollPartOp->getLiveInIRValue())->getZExtValue();
352   return 0;
353 }
354 
355 VPInstruction::VPInstruction(unsigned Opcode, CmpInst::Predicate Pred,
356                              VPValue *A, VPValue *B, DebugLoc DL,
357                              const Twine &Name)
358     : VPRecipeWithIRFlags(VPDef::VPInstructionSC, ArrayRef<VPValue *>({A, B}),
359                           Pred, DL),
360       Opcode(Opcode), Name(Name.str()) {
361   assert(Opcode == Instruction::ICmp &&
362          "only ICmp predicates supported at the moment");
363 }
364 
365 VPInstruction::VPInstruction(unsigned Opcode,
366                              std::initializer_list<VPValue *> Operands,
367                              FastMathFlags FMFs, DebugLoc DL, const Twine &Name)
368     : VPRecipeWithIRFlags(VPDef::VPInstructionSC, Operands, FMFs, DL),
369       Opcode(Opcode), Name(Name.str()) {
370   // Make sure the VPInstruction is a floating-point operation.
371   assert(isFPMathOp() && "this op can't take fast-math flags");
372 }
373 
374 bool VPInstruction::doesGeneratePerAllLanes() const {
375   return Opcode == VPInstruction::PtrAdd && !vputils::onlyFirstLaneUsed(this);
376 }
377 
378 bool VPInstruction::canGenerateScalarForFirstLane() const {
379   if (Instruction::isBinaryOp(getOpcode()))
380     return true;
381   if (isSingleScalar() || isVectorToScalar())
382     return true;
383   switch (Opcode) {
384   case Instruction::ICmp:
385   case VPInstruction::BranchOnCond:
386   case VPInstruction::BranchOnCount:
387   case VPInstruction::CalculateTripCountMinusVF:
388   case VPInstruction::CanonicalIVIncrementForPart:
389   case VPInstruction::PtrAdd:
390   case VPInstruction::ExplicitVectorLength:
391     return true;
392   default:
393     return false;
394   }
395 }
396 
397 Value *VPInstruction::generatePerLane(VPTransformState &State,
398                                       const VPLane &Lane) {
399   IRBuilderBase &Builder = State.Builder;
400 
401   assert(getOpcode() == VPInstruction::PtrAdd &&
402          "only PtrAdd opcodes are supported for now");
403   return Builder.CreatePtrAdd(State.get(getOperand(0), Lane),
404                               State.get(getOperand(1), Lane), Name);
405 }
406 
407 Value *VPInstruction::generate(VPTransformState &State) {
408   IRBuilderBase &Builder = State.Builder;
409 
410   if (Instruction::isBinaryOp(getOpcode())) {
411     bool OnlyFirstLaneUsed = vputils::onlyFirstLaneUsed(this);
412     Value *A = State.get(getOperand(0), OnlyFirstLaneUsed);
413     Value *B = State.get(getOperand(1), OnlyFirstLaneUsed);
414     auto *Res =
415         Builder.CreateBinOp((Instruction::BinaryOps)getOpcode(), A, B, Name);
416     if (auto *I = dyn_cast<Instruction>(Res))
417       setFlags(I);
418     return Res;
419   }
420 
421   switch (getOpcode()) {
422   case VPInstruction::Not: {
423     Value *A = State.get(getOperand(0));
424     return Builder.CreateNot(A, Name);
425   }
426   case Instruction::ICmp: {
427     bool OnlyFirstLaneUsed = vputils::onlyFirstLaneUsed(this);
428     Value *A = State.get(getOperand(0), OnlyFirstLaneUsed);
429     Value *B = State.get(getOperand(1), OnlyFirstLaneUsed);
430     return Builder.CreateCmp(getPredicate(), A, B, Name);
431   }
432   case Instruction::Select: {
433     Value *Cond = State.get(getOperand(0));
434     Value *Op1 = State.get(getOperand(1));
435     Value *Op2 = State.get(getOperand(2));
436     return Builder.CreateSelect(Cond, Op1, Op2, Name);
437   }
438   case VPInstruction::ActiveLaneMask: {
439     // Get first lane of vector induction variable.
440     Value *VIVElem0 = State.get(getOperand(0), VPLane(0));
441     // Get the original loop tripcount.
442     Value *ScalarTC = State.get(getOperand(1), VPLane(0));
443 
444     // If this part of the active lane mask is scalar, generate the CMP directly
445     // to avoid unnecessary extracts.
446     if (State.VF.isScalar())
447       return Builder.CreateCmp(CmpInst::Predicate::ICMP_ULT, VIVElem0, ScalarTC,
448                                Name);
449 
450     auto *Int1Ty = Type::getInt1Ty(Builder.getContext());
451     auto *PredTy = VectorType::get(Int1Ty, State.VF);
452     return Builder.CreateIntrinsic(Intrinsic::get_active_lane_mask,
453                                    {PredTy, ScalarTC->getType()},
454                                    {VIVElem0, ScalarTC}, nullptr, Name);
455   }
456   case VPInstruction::FirstOrderRecurrenceSplice: {
457     // Generate code to combine the previous and current values in vector v3.
458     //
459     //   vector.ph:
460     //     v_init = vector(..., ..., ..., a[-1])
461     //     br vector.body
462     //
463     //   vector.body
464     //     i = phi [0, vector.ph], [i+4, vector.body]
465     //     v1 = phi [v_init, vector.ph], [v2, vector.body]
466     //     v2 = a[i, i+1, i+2, i+3];
467     //     v3 = vector(v1(3), v2(0, 1, 2))
468 
469     auto *V1 = State.get(getOperand(0));
470     if (!V1->getType()->isVectorTy())
471       return V1;
472     Value *V2 = State.get(getOperand(1));
473     return Builder.CreateVectorSplice(V1, V2, -1, Name);
474   }
475   case VPInstruction::CalculateTripCountMinusVF: {
476     unsigned UF = getParent()->getPlan()->getUF();
477     Value *ScalarTC = State.get(getOperand(0), VPLane(0));
478     Value *Step = createStepForVF(Builder, ScalarTC->getType(), State.VF, UF);
479     Value *Sub = Builder.CreateSub(ScalarTC, Step);
480     Value *Cmp = Builder.CreateICmp(CmpInst::Predicate::ICMP_UGT, ScalarTC, Step);
481     Value *Zero = ConstantInt::get(ScalarTC->getType(), 0);
482     return Builder.CreateSelect(Cmp, Sub, Zero);
483   }
484   case VPInstruction::ExplicitVectorLength: {
485     // TODO: Restructure this code with an explicit remainder loop, vsetvli can
486     // be outside of the main loop.
487     Value *AVL = State.get(getOperand(0), /*IsScalar*/ true);
488     // Compute EVL
489     assert(AVL->getType()->isIntegerTy() &&
490            "Requested vector length should be an integer.");
491 
492     assert(State.VF.isScalable() && "Expected scalable vector factor.");
493     Value *VFArg = State.Builder.getInt32(State.VF.getKnownMinValue());
494 
495     Value *EVL = State.Builder.CreateIntrinsic(
496         State.Builder.getInt32Ty(), Intrinsic::experimental_get_vector_length,
497         {AVL, VFArg, State.Builder.getTrue()});
498     return EVL;
499   }
500   case VPInstruction::CanonicalIVIncrementForPart: {
501     unsigned Part = getUnrollPart(*this);
502     auto *IV = State.get(getOperand(0), VPLane(0));
503     assert(Part != 0 && "Must have a positive part");
504     // The canonical IV is incremented by the vectorization factor (num of
505     // SIMD elements) times the unroll part.
506     Value *Step = createStepForVF(Builder, IV->getType(), State.VF, Part);
507     return Builder.CreateAdd(IV, Step, Name, hasNoUnsignedWrap(),
508                              hasNoSignedWrap());
509   }
510   case VPInstruction::BranchOnCond: {
511     Value *Cond = State.get(getOperand(0), VPLane(0));
512     // Replace the temporary unreachable terminator with a new conditional
513     // branch, hooking it up to backward destination for exiting blocks now and
514     // to forward destination(s) later when they are created.
515     BranchInst *CondBr =
516         Builder.CreateCondBr(Cond, Builder.GetInsertBlock(), nullptr);
517     CondBr->setSuccessor(0, nullptr);
518     Builder.GetInsertBlock()->getTerminator()->eraseFromParent();
519 
520     if (!getParent()->isExiting())
521       return CondBr;
522 
523     VPRegionBlock *ParentRegion = getParent()->getParent();
524     VPBasicBlock *Header = ParentRegion->getEntryBasicBlock();
525     CondBr->setSuccessor(1, State.CFG.VPBB2IRBB[Header]);
526     return CondBr;
527   }
528   case VPInstruction::BranchOnCount: {
529     // First create the compare.
530     Value *IV = State.get(getOperand(0), /*IsScalar*/ true);
531     Value *TC = State.get(getOperand(1), /*IsScalar*/ true);
532     Value *Cond = Builder.CreateICmpEQ(IV, TC);
533 
534     // Now create the branch.
535     auto *Plan = getParent()->getPlan();
536     VPRegionBlock *TopRegion = Plan->getVectorLoopRegion();
537     VPBasicBlock *Header = TopRegion->getEntry()->getEntryBasicBlock();
538 
539     // Replace the temporary unreachable terminator with a new conditional
540     // branch, hooking it up to backward destination (the header) now and to the
541     // forward destination (the exit/middle block) later when it is created.
542     // Note that CreateCondBr expects a valid BB as first argument, so we need
543     // to set it to nullptr later.
544     BranchInst *CondBr = Builder.CreateCondBr(Cond, Builder.GetInsertBlock(),
545                                               State.CFG.VPBB2IRBB[Header]);
546     CondBr->setSuccessor(0, nullptr);
547     Builder.GetInsertBlock()->getTerminator()->eraseFromParent();
548     return CondBr;
549   }
550   case VPInstruction::ComputeReductionResult: {
551     // FIXME: The cross-recipe dependency on VPReductionPHIRecipe is temporary
552     // and will be removed by breaking up the recipe further.
553     auto *PhiR = cast<VPReductionPHIRecipe>(getOperand(0));
554     auto *OrigPhi = cast<PHINode>(PhiR->getUnderlyingValue());
555     // Get its reduction variable descriptor.
556     const RecurrenceDescriptor &RdxDesc = PhiR->getRecurrenceDescriptor();
557 
558     RecurKind RK = RdxDesc.getRecurrenceKind();
559 
560     Type *PhiTy = OrigPhi->getType();
561     // The recipe's operands are the reduction phi, followed by one operand for
562     // each part of the reduction.
563     unsigned UF = getNumOperands() - 1;
564     VectorParts RdxParts(UF);
565     for (unsigned Part = 0; Part < UF; ++Part)
566       RdxParts[Part] = State.get(getOperand(1 + Part), PhiR->isInLoop());
567 
568     // If the vector reduction can be performed in a smaller type, we truncate
569     // then extend the loop exit value to enable InstCombine to evaluate the
570     // entire expression in the smaller type.
571     // TODO: Handle this in truncateToMinBW.
572     if (State.VF.isVector() && PhiTy != RdxDesc.getRecurrenceType()) {
573       Type *RdxVecTy = VectorType::get(RdxDesc.getRecurrenceType(), State.VF);
574       for (unsigned Part = 0; Part < UF; ++Part)
575         RdxParts[Part] = Builder.CreateTrunc(RdxParts[Part], RdxVecTy);
576     }
577     // Reduce all of the unrolled parts into a single vector.
578     Value *ReducedPartRdx = RdxParts[0];
579     unsigned Op = RecurrenceDescriptor::getOpcode(RK);
580     if (RecurrenceDescriptor::isAnyOfRecurrenceKind(RK))
581       Op = Instruction::Or;
582 
583     if (PhiR->isOrdered()) {
584       ReducedPartRdx = RdxParts[UF - 1];
585     } else {
586       // Floating-point operations should have some FMF to enable the reduction.
587       IRBuilderBase::FastMathFlagGuard FMFG(Builder);
588       Builder.setFastMathFlags(RdxDesc.getFastMathFlags());
589       for (unsigned Part = 1; Part < UF; ++Part) {
590         Value *RdxPart = RdxParts[Part];
591         if (Op != Instruction::ICmp && Op != Instruction::FCmp)
592           ReducedPartRdx = Builder.CreateBinOp(
593               (Instruction::BinaryOps)Op, RdxPart, ReducedPartRdx, "bin.rdx");
594         else
595           ReducedPartRdx = createMinMaxOp(Builder, RK, ReducedPartRdx, RdxPart);
596       }
597     }
598 
599     // Create the reduction after the loop. Note that inloop reductions create
600     // the target reduction in the loop using a Reduction recipe.
601     if ((State.VF.isVector() ||
602          RecurrenceDescriptor::isAnyOfRecurrenceKind(RK)) &&
603         !PhiR->isInLoop()) {
604       ReducedPartRdx =
605           createReduction(Builder, RdxDesc, ReducedPartRdx, OrigPhi);
606       // If the reduction can be performed in a smaller type, we need to extend
607       // the reduction to the wider type before we branch to the original loop.
608       if (PhiTy != RdxDesc.getRecurrenceType())
609         ReducedPartRdx = RdxDesc.isSigned()
610                              ? Builder.CreateSExt(ReducedPartRdx, PhiTy)
611                              : Builder.CreateZExt(ReducedPartRdx, PhiTy);
612     }
613 
614     return ReducedPartRdx;
615   }
616   case VPInstruction::ExtractFromEnd: {
617     auto *CI = cast<ConstantInt>(getOperand(1)->getLiveInIRValue());
618     unsigned Offset = CI->getZExtValue();
619     assert(Offset > 0 && "Offset from end must be positive");
620     Value *Res;
621     if (State.VF.isVector()) {
622       assert(Offset <= State.VF.getKnownMinValue() &&
623              "invalid offset to extract from");
624       // Extract lane VF - Offset from the operand.
625       Res = State.get(getOperand(0), VPLane::getLaneFromEnd(State.VF, Offset));
626     } else {
627       assert(Offset <= 1 && "invalid offset to extract from");
628       Res = State.get(getOperand(0));
629     }
630     if (isa<ExtractElementInst>(Res))
631       Res->setName(Name);
632     return Res;
633   }
634   case VPInstruction::LogicalAnd: {
635     Value *A = State.get(getOperand(0));
636     Value *B = State.get(getOperand(1));
637     return Builder.CreateLogicalAnd(A, B, Name);
638   }
639   case VPInstruction::PtrAdd: {
640     assert(vputils::onlyFirstLaneUsed(this) &&
641            "can only generate first lane for PtrAdd");
642     Value *Ptr = State.get(getOperand(0), /* IsScalar */ true);
643     Value *Addend = State.get(getOperand(1), /* IsScalar */ true);
644     return isInBounds() ? Builder.CreateInBoundsPtrAdd(Ptr, Addend, Name)
645                         : Builder.CreatePtrAdd(Ptr, Addend, Name);
646   }
647   case VPInstruction::ResumePhi: {
648     Value *IncomingFromVPlanPred =
649         State.get(getOperand(0), /* IsScalar */ true);
650     Value *IncomingFromOtherPreds =
651         State.get(getOperand(1), /* IsScalar */ true);
652     auto *NewPhi =
653         Builder.CreatePHI(IncomingFromOtherPreds->getType(), 2, Name);
654     BasicBlock *VPlanPred =
655         State.CFG
656             .VPBB2IRBB[cast<VPBasicBlock>(getParent()->getSinglePredecessor())];
657     NewPhi->addIncoming(IncomingFromVPlanPred, VPlanPred);
658     for (auto *OtherPred : predecessors(Builder.GetInsertBlock())) {
659       assert(OtherPred != VPlanPred &&
660              "VPlan predecessors should not be connected yet");
661       NewPhi->addIncoming(IncomingFromOtherPreds, OtherPred);
662     }
663     return NewPhi;
664   }
665 
666   default:
667     llvm_unreachable("Unsupported opcode for instruction");
668   }
669 }
670 
671 bool VPInstruction::isVectorToScalar() const {
672   return getOpcode() == VPInstruction::ExtractFromEnd ||
673          getOpcode() == VPInstruction::ComputeReductionResult;
674 }
675 
676 bool VPInstruction::isSingleScalar() const {
677   return getOpcode() == VPInstruction::ResumePhi;
678 }
679 
680 #if !defined(NDEBUG)
681 bool VPInstruction::isFPMathOp() const {
682   // Inspired by FPMathOperator::classof. Notable differences are that we don't
683   // support Call, PHI and Select opcodes here yet.
684   return Opcode == Instruction::FAdd || Opcode == Instruction::FMul ||
685          Opcode == Instruction::FNeg || Opcode == Instruction::FSub ||
686          Opcode == Instruction::FDiv || Opcode == Instruction::FRem ||
687          Opcode == Instruction::FCmp || Opcode == Instruction::Select;
688 }
689 #endif
690 
691 void VPInstruction::execute(VPTransformState &State) {
692   assert(!State.Lane && "VPInstruction executing an Lane");
693   IRBuilderBase::FastMathFlagGuard FMFGuard(State.Builder);
694   assert((hasFastMathFlags() == isFPMathOp() ||
695           getOpcode() == Instruction::Select) &&
696          "Recipe not a FPMathOp but has fast-math flags?");
697   if (hasFastMathFlags())
698     State.Builder.setFastMathFlags(getFastMathFlags());
699   State.setDebugLocFrom(getDebugLoc());
700   bool GeneratesPerFirstLaneOnly = canGenerateScalarForFirstLane() &&
701                                    (vputils::onlyFirstLaneUsed(this) ||
702                                     isVectorToScalar() || isSingleScalar());
703   bool GeneratesPerAllLanes = doesGeneratePerAllLanes();
704   if (GeneratesPerAllLanes) {
705     for (unsigned Lane = 0, NumLanes = State.VF.getKnownMinValue();
706          Lane != NumLanes; ++Lane) {
707       Value *GeneratedValue = generatePerLane(State, VPLane(Lane));
708       assert(GeneratedValue && "generatePerLane must produce a value");
709       State.set(this, GeneratedValue, VPLane(Lane));
710     }
711     return;
712   }
713 
714   Value *GeneratedValue = generate(State);
715   if (!hasResult())
716     return;
717   assert(GeneratedValue && "generate must produce a value");
718   assert(
719       (GeneratedValue->getType()->isVectorTy() == !GeneratesPerFirstLaneOnly ||
720        State.VF.isScalar()) &&
721       "scalar value but not only first lane defined");
722   State.set(this, GeneratedValue,
723             /*IsScalar*/ GeneratesPerFirstLaneOnly);
724 }
725 
726 bool VPInstruction::onlyFirstLaneUsed(const VPValue *Op) const {
727   assert(is_contained(operands(), Op) && "Op must be an operand of the recipe");
728   if (Instruction::isBinaryOp(getOpcode()))
729     return vputils::onlyFirstLaneUsed(this);
730 
731   switch (getOpcode()) {
732   default:
733     return false;
734   case Instruction::ICmp:
735   case VPInstruction::PtrAdd:
736     // TODO: Cover additional opcodes.
737     return vputils::onlyFirstLaneUsed(this);
738   case VPInstruction::ActiveLaneMask:
739   case VPInstruction::ExplicitVectorLength:
740   case VPInstruction::CalculateTripCountMinusVF:
741   case VPInstruction::CanonicalIVIncrementForPart:
742   case VPInstruction::BranchOnCount:
743   case VPInstruction::BranchOnCond:
744   case VPInstruction::ResumePhi:
745     return true;
746   };
747   llvm_unreachable("switch should return");
748 }
749 
750 bool VPInstruction::onlyFirstPartUsed(const VPValue *Op) const {
751   assert(is_contained(operands(), Op) && "Op must be an operand of the recipe");
752   if (Instruction::isBinaryOp(getOpcode()))
753     return vputils::onlyFirstPartUsed(this);
754 
755   switch (getOpcode()) {
756   default:
757     return false;
758   case Instruction::ICmp:
759   case Instruction::Select:
760     return vputils::onlyFirstPartUsed(this);
761   case VPInstruction::BranchOnCount:
762   case VPInstruction::BranchOnCond:
763   case VPInstruction::CanonicalIVIncrementForPart:
764     return true;
765   };
766   llvm_unreachable("switch should return");
767 }
768 
769 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
770 void VPInstruction::dump() const {
771   VPSlotTracker SlotTracker(getParent()->getPlan());
772   print(dbgs(), "", SlotTracker);
773 }
774 
775 void VPInstruction::print(raw_ostream &O, const Twine &Indent,
776                           VPSlotTracker &SlotTracker) const {
777   O << Indent << "EMIT ";
778 
779   if (hasResult()) {
780     printAsOperand(O, SlotTracker);
781     O << " = ";
782   }
783 
784   switch (getOpcode()) {
785   case VPInstruction::Not:
786     O << "not";
787     break;
788   case VPInstruction::SLPLoad:
789     O << "combined load";
790     break;
791   case VPInstruction::SLPStore:
792     O << "combined store";
793     break;
794   case VPInstruction::ActiveLaneMask:
795     O << "active lane mask";
796     break;
797   case VPInstruction::ResumePhi:
798     O << "resume-phi";
799     break;
800   case VPInstruction::ExplicitVectorLength:
801     O << "EXPLICIT-VECTOR-LENGTH";
802     break;
803   case VPInstruction::FirstOrderRecurrenceSplice:
804     O << "first-order splice";
805     break;
806   case VPInstruction::BranchOnCond:
807     O << "branch-on-cond";
808     break;
809   case VPInstruction::CalculateTripCountMinusVF:
810     O << "TC > VF ? TC - VF : 0";
811     break;
812   case VPInstruction::CanonicalIVIncrementForPart:
813     O << "VF * Part +";
814     break;
815   case VPInstruction::BranchOnCount:
816     O << "branch-on-count";
817     break;
818   case VPInstruction::ExtractFromEnd:
819     O << "extract-from-end";
820     break;
821   case VPInstruction::ComputeReductionResult:
822     O << "compute-reduction-result";
823     break;
824   case VPInstruction::LogicalAnd:
825     O << "logical-and";
826     break;
827   case VPInstruction::PtrAdd:
828     O << "ptradd";
829     break;
830   default:
831     O << Instruction::getOpcodeName(getOpcode());
832   }
833 
834   printFlags(O);
835   printOperands(O, SlotTracker);
836 
837   if (auto DL = getDebugLoc()) {
838     O << ", !dbg ";
839     DL.print(O);
840   }
841 }
842 #endif
843 
844 void VPIRInstruction::execute(VPTransformState &State) {
845   assert((isa<PHINode>(&I) || getNumOperands() == 0) &&
846          "Only PHINodes can have extra operands");
847   if (getNumOperands() == 1) {
848     VPValue *ExitValue = getOperand(0);
849     auto Lane = vputils::isUniformAfterVectorization(ExitValue)
850                     ? VPLane::getFirstLane()
851                     : VPLane::getLastLaneForVF(State.VF);
852     auto *PredVPBB = cast<VPBasicBlock>(getParent()->getSinglePredecessor());
853     BasicBlock *PredBB = State.CFG.VPBB2IRBB[PredVPBB];
854     // Set insertion point in PredBB in case an extract needs to be generated.
855     // TODO: Model extracts explicitly.
856     State.Builder.SetInsertPoint(PredBB, PredBB->getFirstNonPHIIt());
857     Value *V = State.get(ExitValue, VPLane(Lane));
858     auto *Phi = cast<PHINode>(&I);
859     Phi->addIncoming(V, PredBB);
860   }
861 
862   // Advance the insert point after the wrapped IR instruction. This allows
863   // interleaving VPIRInstructions and other recipes.
864   State.Builder.SetInsertPoint(I.getParent(), std::next(I.getIterator()));
865 }
866 
867 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
868 void VPIRInstruction::print(raw_ostream &O, const Twine &Indent,
869                             VPSlotTracker &SlotTracker) const {
870   O << Indent << "IR " << I;
871 
872   if (getNumOperands() != 0) {
873     assert(getNumOperands() == 1 && "can have at most 1 operand");
874     O << " (extra operand: ";
875     printOperands(O, SlotTracker);
876     O << ")";
877   }
878 }
879 #endif
880 
881 void VPWidenCallRecipe::execute(VPTransformState &State) {
882   assert(State.VF.isVector() && "not widening");
883   Function *CalledScalarFn = getCalledScalarFunction();
884   assert(!isDbgInfoIntrinsic(CalledScalarFn->getIntrinsicID()) &&
885          "DbgInfoIntrinsic should have been dropped during VPlan construction");
886   State.setDebugLocFrom(getDebugLoc());
887 
888   bool UseIntrinsic = VectorIntrinsicID != Intrinsic::not_intrinsic;
889   FunctionType *VFTy = nullptr;
890   if (Variant)
891     VFTy = Variant->getFunctionType();
892   SmallVector<Type *, 2> TysForDecl;
893   // Add return type if intrinsic is overloaded on it.
894   if (UseIntrinsic &&
895       isVectorIntrinsicWithOverloadTypeAtArg(VectorIntrinsicID, -1))
896     TysForDecl.push_back(VectorType::get(
897         CalledScalarFn->getReturnType()->getScalarType(), State.VF));
898   SmallVector<Value *, 4> Args;
899   for (const auto &I : enumerate(arg_operands())) {
900     // Some intrinsics have a scalar argument - don't replace it with a
901     // vector.
902     Value *Arg;
903     if (UseIntrinsic &&
904         isVectorIntrinsicWithScalarOpAtArg(VectorIntrinsicID, I.index()))
905       Arg = State.get(I.value(), VPLane(0));
906     // Some vectorized function variants may also take a scalar argument,
907     // e.g. linear parameters for pointers. This needs to be the scalar value
908     // from the start of the respective part when interleaving.
909     else if (VFTy && !VFTy->getParamType(I.index())->isVectorTy())
910       Arg = State.get(I.value(), VPLane(0));
911     else
912       Arg = State.get(I.value());
913     if (UseIntrinsic &&
914         isVectorIntrinsicWithOverloadTypeAtArg(VectorIntrinsicID, I.index()))
915       TysForDecl.push_back(Arg->getType());
916     Args.push_back(Arg);
917   }
918 
919   Function *VectorF;
920   if (UseIntrinsic) {
921     // Use vector version of the intrinsic.
922     Module *M = State.Builder.GetInsertBlock()->getModule();
923     VectorF = Intrinsic::getDeclaration(M, VectorIntrinsicID, TysForDecl);
924     assert(VectorF && "Can't retrieve vector intrinsic.");
925   } else {
926 #ifndef NDEBUG
927     assert(Variant != nullptr && "Can't create vector function.");
928 #endif
929     VectorF = Variant;
930   }
931 
932   auto *CI = cast_or_null<CallInst>(getUnderlyingInstr());
933   SmallVector<OperandBundleDef, 1> OpBundles;
934   if (CI)
935     CI->getOperandBundlesAsDefs(OpBundles);
936 
937   CallInst *V = State.Builder.CreateCall(VectorF, Args, OpBundles);
938 
939   setFlags(V);
940 
941   if (!V->getType()->isVoidTy())
942     State.set(this, V);
943   State.addMetadata(V, CI);
944 }
945 
946 InstructionCost VPWidenCallRecipe::computeCost(ElementCount VF,
947                                                VPCostContext &Ctx) const {
948   TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
949   if (Variant) {
950     return Ctx.TTI.getCallInstrCost(nullptr, Variant->getReturnType(),
951                                     Variant->getFunctionType()->params(),
952                                     CostKind);
953   }
954 
955   // Some backends analyze intrinsic arguments to determine cost. Use the
956   // underlying value for the operand if it has one. Otherwise try to use the
957   // operand of the underlying call instruction, if there is one. Otherwise
958   // clear Arguments.
959   // TODO: Rework TTI interface to be independent of concrete IR values.
960   SmallVector<const Value *> Arguments;
961   for (const auto &[Idx, Op] : enumerate(operands())) {
962     auto *V = Op->getUnderlyingValue();
963     if (!V) {
964       if (auto *UI = dyn_cast_or_null<CallBase>(getUnderlyingValue())) {
965         Arguments.push_back(UI->getArgOperand(Idx));
966         continue;
967       }
968       Arguments.clear();
969       break;
970     }
971     Arguments.push_back(V);
972   }
973 
974   Type *RetTy =
975       ToVectorTy(Ctx.Types.inferScalarType(this->getVPSingleValue()), VF);
976   SmallVector<Type *> ParamTys;
977   for (unsigned I = 0; I != getNumOperands(); ++I)
978     ParamTys.push_back(
979         ToVectorTy(Ctx.Types.inferScalarType(getOperand(I)), VF));
980 
981   // TODO: Rework TTI interface to avoid reliance on underlying IntrinsicInst.
982   FastMathFlags FMF = hasFastMathFlags() ? getFastMathFlags() : FastMathFlags();
983   IntrinsicCostAttributes CostAttrs(
984       VectorIntrinsicID, RetTy, Arguments, ParamTys, FMF,
985       dyn_cast_or_null<IntrinsicInst>(getUnderlyingValue()));
986   return Ctx.TTI.getIntrinsicInstrCost(CostAttrs, CostKind);
987 }
988 
989 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
990 void VPWidenCallRecipe::print(raw_ostream &O, const Twine &Indent,
991                               VPSlotTracker &SlotTracker) const {
992   O << Indent << "WIDEN-CALL ";
993 
994   Function *CalledFn = getCalledScalarFunction();
995   if (CalledFn->getReturnType()->isVoidTy())
996     O << "void ";
997   else {
998     printAsOperand(O, SlotTracker);
999     O << " = ";
1000   }
1001 
1002   O << "call";
1003   printFlags(O);
1004   O << "  @" << CalledFn->getName() << "(";
1005   interleaveComma(arg_operands(), O, [&O, &SlotTracker](VPValue *Op) {
1006     Op->printAsOperand(O, SlotTracker);
1007   });
1008   O << ")";
1009 
1010   if (VectorIntrinsicID)
1011     O << " (using vector intrinsic)";
1012   else {
1013     O << " (using library function";
1014     if (Variant->hasName())
1015       O << ": " << Variant->getName();
1016     O << ")";
1017   }
1018 }
1019 #endif
1020 
1021 void VPHistogramRecipe::execute(VPTransformState &State) {
1022   State.setDebugLocFrom(getDebugLoc());
1023   IRBuilderBase &Builder = State.Builder;
1024 
1025   Value *Address = State.get(getOperand(0));
1026   Value *IncAmt = State.get(getOperand(1), /*IsScalar=*/true);
1027   VectorType *VTy = cast<VectorType>(Address->getType());
1028 
1029   // The histogram intrinsic requires a mask even if the recipe doesn't;
1030   // if the mask operand was omitted then all lanes should be executed and
1031   // we just need to synthesize an all-true mask.
1032   Value *Mask = nullptr;
1033   if (VPValue *VPMask = getMask())
1034     Mask = State.get(VPMask);
1035   else
1036     Mask =
1037         Builder.CreateVectorSplat(VTy->getElementCount(), Builder.getInt1(1));
1038 
1039   // If this is a subtract, we want to invert the increment amount. We may
1040   // add a separate intrinsic in future, but for now we'll try this.
1041   if (Opcode == Instruction::Sub)
1042     IncAmt = Builder.CreateNeg(IncAmt);
1043   else
1044     assert(Opcode == Instruction::Add && "only add or sub supported for now");
1045 
1046   State.Builder.CreateIntrinsic(Intrinsic::experimental_vector_histogram_add,
1047                                 {VTy, IncAmt->getType()},
1048                                 {Address, IncAmt, Mask});
1049 }
1050 
1051 InstructionCost VPHistogramRecipe::computeCost(ElementCount VF,
1052                                                VPCostContext &Ctx) const {
1053   // FIXME: Take the gather and scatter into account as well. For now we're
1054   //        generating the same cost as the fallback path, but we'll likely
1055   //        need to create a new TTI method for determining the cost, including
1056   //        whether we can use base + vec-of-smaller-indices or just
1057   //        vec-of-pointers.
1058   assert(VF.isVector() && "Invalid VF for histogram cost");
1059   Type *AddressTy = Ctx.Types.inferScalarType(getOperand(0));
1060   VPValue *IncAmt = getOperand(1);
1061   Type *IncTy = Ctx.Types.inferScalarType(IncAmt);
1062   VectorType *VTy = VectorType::get(IncTy, VF);
1063 
1064   // Assume that a non-constant update value (or a constant != 1) requires
1065   // a multiply, and add that into the cost.
1066   InstructionCost MulCost =
1067       Ctx.TTI.getArithmeticInstrCost(Instruction::Mul, VTy);
1068   if (IncAmt->isLiveIn()) {
1069     ConstantInt *CI = dyn_cast<ConstantInt>(IncAmt->getLiveInIRValue());
1070 
1071     if (CI && CI->getZExtValue() == 1)
1072       MulCost = TTI::TCC_Free;
1073   }
1074 
1075   // Find the cost of the histogram operation itself.
1076   Type *PtrTy = VectorType::get(AddressTy, VF);
1077   Type *MaskTy = VectorType::get(Type::getInt1Ty(Ctx.LLVMCtx), VF);
1078   IntrinsicCostAttributes ICA(Intrinsic::experimental_vector_histogram_add,
1079                               Type::getVoidTy(Ctx.LLVMCtx),
1080                               {PtrTy, IncTy, MaskTy});
1081 
1082   // Add the costs together with the add/sub operation.
1083   return Ctx.TTI.getIntrinsicInstrCost(
1084              ICA, TargetTransformInfo::TCK_RecipThroughput) +
1085          MulCost + Ctx.TTI.getArithmeticInstrCost(Opcode, VTy);
1086 }
1087 
1088 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1089 void VPHistogramRecipe::print(raw_ostream &O, const Twine &Indent,
1090                               VPSlotTracker &SlotTracker) const {
1091   O << Indent << "WIDEN-HISTOGRAM buckets: ";
1092   getOperand(0)->printAsOperand(O, SlotTracker);
1093 
1094   if (Opcode == Instruction::Sub)
1095     O << ", dec: ";
1096   else {
1097     assert(Opcode == Instruction::Add);
1098     O << ", inc: ";
1099   }
1100   getOperand(1)->printAsOperand(O, SlotTracker);
1101 
1102   if (VPValue *Mask = getMask()) {
1103     O << ", mask: ";
1104     Mask->printAsOperand(O, SlotTracker);
1105   }
1106 }
1107 
1108 void VPWidenSelectRecipe::print(raw_ostream &O, const Twine &Indent,
1109                                 VPSlotTracker &SlotTracker) const {
1110   O << Indent << "WIDEN-SELECT ";
1111   printAsOperand(O, SlotTracker);
1112   O << " = select ";
1113   getOperand(0)->printAsOperand(O, SlotTracker);
1114   O << ", ";
1115   getOperand(1)->printAsOperand(O, SlotTracker);
1116   O << ", ";
1117   getOperand(2)->printAsOperand(O, SlotTracker);
1118   O << (isInvariantCond() ? " (condition is loop invariant)" : "");
1119 }
1120 #endif
1121 
1122 void VPWidenSelectRecipe::execute(VPTransformState &State) {
1123   State.setDebugLocFrom(getDebugLoc());
1124 
1125   // The condition can be loop invariant but still defined inside the
1126   // loop. This means that we can't just use the original 'cond' value.
1127   // We have to take the 'vectorized' value and pick the first lane.
1128   // Instcombine will make this a no-op.
1129   auto *InvarCond =
1130       isInvariantCond() ? State.get(getCond(), VPLane(0)) : nullptr;
1131 
1132   Value *Cond = InvarCond ? InvarCond : State.get(getCond());
1133   Value *Op0 = State.get(getOperand(1));
1134   Value *Op1 = State.get(getOperand(2));
1135   Value *Sel = State.Builder.CreateSelect(Cond, Op0, Op1);
1136   State.set(this, Sel);
1137   State.addMetadata(Sel, dyn_cast_or_null<Instruction>(getUnderlyingValue()));
1138 }
1139 
1140 VPRecipeWithIRFlags::FastMathFlagsTy::FastMathFlagsTy(
1141     const FastMathFlags &FMF) {
1142   AllowReassoc = FMF.allowReassoc();
1143   NoNaNs = FMF.noNaNs();
1144   NoInfs = FMF.noInfs();
1145   NoSignedZeros = FMF.noSignedZeros();
1146   AllowReciprocal = FMF.allowReciprocal();
1147   AllowContract = FMF.allowContract();
1148   ApproxFunc = FMF.approxFunc();
1149 }
1150 
1151 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1152 void VPRecipeWithIRFlags::printFlags(raw_ostream &O) const {
1153   switch (OpType) {
1154   case OperationType::Cmp:
1155     O << " " << CmpInst::getPredicateName(getPredicate());
1156     break;
1157   case OperationType::DisjointOp:
1158     if (DisjointFlags.IsDisjoint)
1159       O << " disjoint";
1160     break;
1161   case OperationType::PossiblyExactOp:
1162     if (ExactFlags.IsExact)
1163       O << " exact";
1164     break;
1165   case OperationType::OverflowingBinOp:
1166     if (WrapFlags.HasNUW)
1167       O << " nuw";
1168     if (WrapFlags.HasNSW)
1169       O << " nsw";
1170     break;
1171   case OperationType::FPMathOp:
1172     getFastMathFlags().print(O);
1173     break;
1174   case OperationType::GEPOp:
1175     if (GEPFlags.IsInBounds)
1176       O << " inbounds";
1177     break;
1178   case OperationType::NonNegOp:
1179     if (NonNegFlags.NonNeg)
1180       O << " nneg";
1181     break;
1182   case OperationType::Other:
1183     break;
1184   }
1185   if (getNumOperands() > 0)
1186     O << " ";
1187 }
1188 #endif
1189 
1190 void VPWidenRecipe::execute(VPTransformState &State) {
1191   State.setDebugLocFrom(getDebugLoc());
1192   auto &Builder = State.Builder;
1193   switch (Opcode) {
1194   case Instruction::Call:
1195   case Instruction::Br:
1196   case Instruction::PHI:
1197   case Instruction::GetElementPtr:
1198   case Instruction::Select:
1199     llvm_unreachable("This instruction is handled by a different recipe.");
1200   case Instruction::UDiv:
1201   case Instruction::SDiv:
1202   case Instruction::SRem:
1203   case Instruction::URem:
1204   case Instruction::Add:
1205   case Instruction::FAdd:
1206   case Instruction::Sub:
1207   case Instruction::FSub:
1208   case Instruction::FNeg:
1209   case Instruction::Mul:
1210   case Instruction::FMul:
1211   case Instruction::FDiv:
1212   case Instruction::FRem:
1213   case Instruction::Shl:
1214   case Instruction::LShr:
1215   case Instruction::AShr:
1216   case Instruction::And:
1217   case Instruction::Or:
1218   case Instruction::Xor: {
1219     // Just widen unops and binops.
1220     SmallVector<Value *, 2> Ops;
1221     for (VPValue *VPOp : operands())
1222       Ops.push_back(State.get(VPOp));
1223 
1224     Value *V = Builder.CreateNAryOp(Opcode, Ops);
1225 
1226     if (auto *VecOp = dyn_cast<Instruction>(V))
1227       setFlags(VecOp);
1228 
1229     // Use this vector value for all users of the original instruction.
1230     State.set(this, V);
1231     State.addMetadata(V, dyn_cast_or_null<Instruction>(getUnderlyingValue()));
1232     break;
1233   }
1234   case Instruction::Freeze: {
1235     Value *Op = State.get(getOperand(0));
1236 
1237     Value *Freeze = Builder.CreateFreeze(Op);
1238     State.set(this, Freeze);
1239     break;
1240   }
1241   case Instruction::ICmp:
1242   case Instruction::FCmp: {
1243     // Widen compares. Generate vector compares.
1244     bool FCmp = Opcode == Instruction::FCmp;
1245     Value *A = State.get(getOperand(0));
1246     Value *B = State.get(getOperand(1));
1247     Value *C = nullptr;
1248     if (FCmp) {
1249       // Propagate fast math flags.
1250       IRBuilder<>::FastMathFlagGuard FMFG(Builder);
1251       if (auto *I = dyn_cast_or_null<Instruction>(getUnderlyingValue()))
1252         Builder.setFastMathFlags(I->getFastMathFlags());
1253       C = Builder.CreateFCmp(getPredicate(), A, B);
1254     } else {
1255       C = Builder.CreateICmp(getPredicate(), A, B);
1256     }
1257     State.set(this, C);
1258     State.addMetadata(C, dyn_cast_or_null<Instruction>(getUnderlyingValue()));
1259     break;
1260   }
1261   default:
1262     // This instruction is not vectorized by simple widening.
1263     LLVM_DEBUG(dbgs() << "LV: Found an unhandled opcode : "
1264                       << Instruction::getOpcodeName(Opcode));
1265     llvm_unreachable("Unhandled instruction!");
1266   } // end of switch.
1267 
1268 #if !defined(NDEBUG)
1269   // Verify that VPlan type inference results agree with the type of the
1270   // generated values.
1271   assert(VectorType::get(State.TypeAnalysis.inferScalarType(this), State.VF) ==
1272              State.get(this)->getType() &&
1273          "inferred type and type from generated instructions do not match");
1274 #endif
1275 }
1276 
1277 InstructionCost VPWidenRecipe::computeCost(ElementCount VF,
1278                                            VPCostContext &Ctx) const {
1279   TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
1280   switch (Opcode) {
1281   case Instruction::FNeg: {
1282     Type *VectorTy =
1283         ToVectorTy(Ctx.Types.inferScalarType(this->getVPSingleValue()), VF);
1284     return Ctx.TTI.getArithmeticInstrCost(
1285         Opcode, VectorTy, CostKind,
1286         {TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None},
1287         {TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None});
1288   }
1289 
1290   case Instruction::UDiv:
1291   case Instruction::SDiv:
1292   case Instruction::SRem:
1293   case Instruction::URem:
1294     // More complex computation, let the legacy cost-model handle this for now.
1295     return Ctx.getLegacyCost(cast<Instruction>(getUnderlyingValue()), VF);
1296   case Instruction::Add:
1297   case Instruction::FAdd:
1298   case Instruction::Sub:
1299   case Instruction::FSub:
1300   case Instruction::Mul:
1301   case Instruction::FMul:
1302   case Instruction::FDiv:
1303   case Instruction::FRem:
1304   case Instruction::Shl:
1305   case Instruction::LShr:
1306   case Instruction::AShr:
1307   case Instruction::And:
1308   case Instruction::Or:
1309   case Instruction::Xor: {
1310     VPValue *RHS = getOperand(1);
1311     // Certain instructions can be cheaper to vectorize if they have a constant
1312     // second vector operand. One example of this are shifts on x86.
1313     TargetTransformInfo::OperandValueInfo RHSInfo = {
1314         TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None};
1315     if (RHS->isLiveIn())
1316       RHSInfo = Ctx.TTI.getOperandInfo(RHS->getLiveInIRValue());
1317 
1318     if (RHSInfo.Kind == TargetTransformInfo::OK_AnyValue &&
1319         getOperand(1)->isDefinedOutsideLoopRegions())
1320       RHSInfo.Kind = TargetTransformInfo::OK_UniformValue;
1321     Type *VectorTy =
1322         ToVectorTy(Ctx.Types.inferScalarType(this->getVPSingleValue()), VF);
1323     Instruction *CtxI = dyn_cast_or_null<Instruction>(getUnderlyingValue());
1324 
1325     SmallVector<const Value *, 4> Operands;
1326     if (CtxI)
1327       Operands.append(CtxI->value_op_begin(), CtxI->value_op_end());
1328     return Ctx.TTI.getArithmeticInstrCost(
1329         Opcode, VectorTy, CostKind,
1330         {TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None},
1331         RHSInfo, Operands, CtxI, &Ctx.TLI);
1332   }
1333   case Instruction::Freeze: {
1334     // This opcode is unknown. Assume that it is the same as 'mul'.
1335     Type *VectorTy =
1336         ToVectorTy(Ctx.Types.inferScalarType(this->getVPSingleValue()), VF);
1337     return Ctx.TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy, CostKind);
1338   }
1339   case Instruction::ICmp:
1340   case Instruction::FCmp: {
1341     Instruction *CtxI = dyn_cast_or_null<Instruction>(getUnderlyingValue());
1342     Type *VectorTy = ToVectorTy(Ctx.Types.inferScalarType(getOperand(0)), VF);
1343     return Ctx.TTI.getCmpSelInstrCost(Opcode, VectorTy, nullptr, getPredicate(),
1344                                       CostKind,
1345                                       {TTI::OK_AnyValue, TTI::OP_None},
1346                                       {TTI::OK_AnyValue, TTI::OP_None}, CtxI);
1347   }
1348   default:
1349     llvm_unreachable("Unsupported opcode for instruction");
1350   }
1351 }
1352 
1353 void VPWidenEVLRecipe::execute(VPTransformState &State) {
1354   unsigned Opcode = getOpcode();
1355   // TODO: Support other opcodes
1356   if (!Instruction::isBinaryOp(Opcode) && !Instruction::isUnaryOp(Opcode))
1357     llvm_unreachable("Unsupported opcode in VPWidenEVLRecipe::execute");
1358 
1359   State.setDebugLocFrom(getDebugLoc());
1360 
1361   assert(State.get(getOperand(0))->getType()->isVectorTy() &&
1362          "VPWidenEVLRecipe should not be used for scalars");
1363 
1364   VPValue *EVL = getEVL();
1365   Value *EVLArg = State.get(EVL, /*NeedsScalar=*/true);
1366   IRBuilderBase &BuilderIR = State.Builder;
1367   VectorBuilder Builder(BuilderIR);
1368   Value *Mask = BuilderIR.CreateVectorSplat(State.VF, BuilderIR.getTrue());
1369 
1370   SmallVector<Value *, 4> Ops;
1371   for (unsigned I = 0, E = getNumOperands() - 1; I < E; ++I) {
1372     VPValue *VPOp = getOperand(I);
1373     Ops.push_back(State.get(VPOp));
1374   }
1375 
1376   Builder.setMask(Mask).setEVL(EVLArg);
1377   Value *VPInst =
1378       Builder.createVectorInstruction(Opcode, Ops[0]->getType(), Ops, "vp.op");
1379   // Currently vp-intrinsics only accept FMF flags.
1380   // TODO: Enable other flags when support is added.
1381   if (isa<FPMathOperator>(VPInst))
1382     setFlags(cast<Instruction>(VPInst));
1383 
1384   State.set(this, VPInst);
1385   State.addMetadata(VPInst,
1386                     dyn_cast_or_null<Instruction>(getUnderlyingValue()));
1387 }
1388 
1389 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1390 void VPWidenRecipe::print(raw_ostream &O, const Twine &Indent,
1391                           VPSlotTracker &SlotTracker) const {
1392   O << Indent << "WIDEN ";
1393   printAsOperand(O, SlotTracker);
1394   O << " = " << Instruction::getOpcodeName(Opcode);
1395   printFlags(O);
1396   printOperands(O, SlotTracker);
1397 }
1398 
1399 void VPWidenEVLRecipe::print(raw_ostream &O, const Twine &Indent,
1400                              VPSlotTracker &SlotTracker) const {
1401   O << Indent << "WIDEN ";
1402   printAsOperand(O, SlotTracker);
1403   O << " = vp." << Instruction::getOpcodeName(getOpcode());
1404   printFlags(O);
1405   printOperands(O, SlotTracker);
1406 }
1407 #endif
1408 
1409 void VPWidenCastRecipe::execute(VPTransformState &State) {
1410   State.setDebugLocFrom(getDebugLoc());
1411   auto &Builder = State.Builder;
1412   /// Vectorize casts.
1413   assert(State.VF.isVector() && "Not vectorizing?");
1414   Type *DestTy = VectorType::get(getResultType(), State.VF);
1415   VPValue *Op = getOperand(0);
1416   Value *A = State.get(Op);
1417   Value *Cast = Builder.CreateCast(Instruction::CastOps(Opcode), A, DestTy);
1418   State.set(this, Cast);
1419   State.addMetadata(Cast, cast_or_null<Instruction>(getUnderlyingValue()));
1420 }
1421 
1422 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1423 void VPWidenCastRecipe::print(raw_ostream &O, const Twine &Indent,
1424                               VPSlotTracker &SlotTracker) const {
1425   O << Indent << "WIDEN-CAST ";
1426   printAsOperand(O, SlotTracker);
1427   O << " = " << Instruction::getOpcodeName(Opcode) << " ";
1428   printFlags(O);
1429   printOperands(O, SlotTracker);
1430   O << " to " << *getResultType();
1431 }
1432 #endif
1433 
1434 /// This function adds
1435 /// (StartIdx * Step, (StartIdx + 1) * Step, (StartIdx + 2) * Step, ...)
1436 /// to each vector element of Val. The sequence starts at StartIndex.
1437 /// \p Opcode is relevant for FP induction variable.
1438 static Value *getStepVector(Value *Val, Value *StartIdx, Value *Step,
1439                             Instruction::BinaryOps BinOp, ElementCount VF,
1440                             IRBuilderBase &Builder) {
1441   assert(VF.isVector() && "only vector VFs are supported");
1442 
1443   // Create and check the types.
1444   auto *ValVTy = cast<VectorType>(Val->getType());
1445   ElementCount VLen = ValVTy->getElementCount();
1446 
1447   Type *STy = Val->getType()->getScalarType();
1448   assert((STy->isIntegerTy() || STy->isFloatingPointTy()) &&
1449          "Induction Step must be an integer or FP");
1450   assert(Step->getType() == STy && "Step has wrong type");
1451 
1452   SmallVector<Constant *, 8> Indices;
1453 
1454   // Create a vector of consecutive numbers from zero to VF.
1455   VectorType *InitVecValVTy = ValVTy;
1456   if (STy->isFloatingPointTy()) {
1457     Type *InitVecValSTy =
1458         IntegerType::get(STy->getContext(), STy->getScalarSizeInBits());
1459     InitVecValVTy = VectorType::get(InitVecValSTy, VLen);
1460   }
1461   Value *InitVec = Builder.CreateStepVector(InitVecValVTy);
1462 
1463   // Splat the StartIdx
1464   Value *StartIdxSplat = Builder.CreateVectorSplat(VLen, StartIdx);
1465 
1466   if (STy->isIntegerTy()) {
1467     InitVec = Builder.CreateAdd(InitVec, StartIdxSplat);
1468     Step = Builder.CreateVectorSplat(VLen, Step);
1469     assert(Step->getType() == Val->getType() && "Invalid step vec");
1470     // FIXME: The newly created binary instructions should contain nsw/nuw
1471     // flags, which can be found from the original scalar operations.
1472     Step = Builder.CreateMul(InitVec, Step);
1473     return Builder.CreateAdd(Val, Step, "induction");
1474   }
1475 
1476   // Floating point induction.
1477   assert((BinOp == Instruction::FAdd || BinOp == Instruction::FSub) &&
1478          "Binary Opcode should be specified for FP induction");
1479   InitVec = Builder.CreateUIToFP(InitVec, ValVTy);
1480   InitVec = Builder.CreateFAdd(InitVec, StartIdxSplat);
1481 
1482   Step = Builder.CreateVectorSplat(VLen, Step);
1483   Value *MulOp = Builder.CreateFMul(InitVec, Step);
1484   return Builder.CreateBinOp(BinOp, Val, MulOp, "induction");
1485 }
1486 
1487 /// A helper function that returns an integer or floating-point constant with
1488 /// value C.
1489 static Constant *getSignedIntOrFpConstant(Type *Ty, int64_t C) {
1490   return Ty->isIntegerTy() ? ConstantInt::getSigned(Ty, C)
1491                            : ConstantFP::get(Ty, C);
1492 }
1493 
1494 void VPWidenIntOrFpInductionRecipe::execute(VPTransformState &State) {
1495   assert(!State.Lane && "Int or FP induction being replicated.");
1496 
1497   Value *Start = getStartValue()->getLiveInIRValue();
1498   const InductionDescriptor &ID = getInductionDescriptor();
1499   TruncInst *Trunc = getTruncInst();
1500   IRBuilderBase &Builder = State.Builder;
1501   assert(IV->getType() == ID.getStartValue()->getType() && "Types must match");
1502   assert(State.VF.isVector() && "must have vector VF");
1503 
1504   // The value from the original loop to which we are mapping the new induction
1505   // variable.
1506   Instruction *EntryVal = Trunc ? cast<Instruction>(Trunc) : IV;
1507 
1508   // Fast-math-flags propagate from the original induction instruction.
1509   IRBuilder<>::FastMathFlagGuard FMFG(Builder);
1510   if (ID.getInductionBinOp() && isa<FPMathOperator>(ID.getInductionBinOp()))
1511     Builder.setFastMathFlags(ID.getInductionBinOp()->getFastMathFlags());
1512 
1513   // Now do the actual transformations, and start with fetching the step value.
1514   Value *Step = State.get(getStepValue(), VPLane(0));
1515 
1516   assert((isa<PHINode>(EntryVal) || isa<TruncInst>(EntryVal)) &&
1517          "Expected either an induction phi-node or a truncate of it!");
1518 
1519   // Construct the initial value of the vector IV in the vector loop preheader
1520   auto CurrIP = Builder.saveIP();
1521   BasicBlock *VectorPH = State.CFG.getPreheaderBBFor(this);
1522   Builder.SetInsertPoint(VectorPH->getTerminator());
1523   if (isa<TruncInst>(EntryVal)) {
1524     assert(Start->getType()->isIntegerTy() &&
1525            "Truncation requires an integer type");
1526     auto *TruncType = cast<IntegerType>(EntryVal->getType());
1527     Step = Builder.CreateTrunc(Step, TruncType);
1528     Start = Builder.CreateCast(Instruction::Trunc, Start, TruncType);
1529   }
1530 
1531   Value *Zero = getSignedIntOrFpConstant(Start->getType(), 0);
1532   Value *SplatStart = Builder.CreateVectorSplat(State.VF, Start);
1533   Value *SteppedStart = getStepVector(
1534       SplatStart, Zero, Step, ID.getInductionOpcode(), State.VF, State.Builder);
1535 
1536   // We create vector phi nodes for both integer and floating-point induction
1537   // variables. Here, we determine the kind of arithmetic we will perform.
1538   Instruction::BinaryOps AddOp;
1539   Instruction::BinaryOps MulOp;
1540   if (Step->getType()->isIntegerTy()) {
1541     AddOp = Instruction::Add;
1542     MulOp = Instruction::Mul;
1543   } else {
1544     AddOp = ID.getInductionOpcode();
1545     MulOp = Instruction::FMul;
1546   }
1547 
1548   Value *SplatVF;
1549   if (VPValue *SplatVFOperand = getSplatVFValue()) {
1550     // The recipe has been unrolled. In that case, fetch the splat value for the
1551     // induction increment.
1552     SplatVF = State.get(SplatVFOperand);
1553   } else {
1554     // Multiply the vectorization factor by the step using integer or
1555     // floating-point arithmetic as appropriate.
1556     Type *StepType = Step->getType();
1557     Value *RuntimeVF = State.get(getVFValue(), VPLane(0));
1558     if (Step->getType()->isFloatingPointTy())
1559       RuntimeVF = Builder.CreateUIToFP(RuntimeVF, StepType);
1560     else
1561       RuntimeVF = Builder.CreateZExtOrTrunc(RuntimeVF, StepType);
1562     Value *Mul = Builder.CreateBinOp(MulOp, Step, RuntimeVF);
1563 
1564     // Create a vector splat to use in the induction update.
1565     //
1566     // FIXME: If the step is non-constant, we create the vector splat with
1567     //        IRBuilder. IRBuilder can constant-fold the multiply, but it
1568     //        doesn't handle a constant vector splat.
1569     SplatVF = isa<Constant>(Mul)
1570                   ? ConstantVector::getSplat(State.VF, cast<Constant>(Mul))
1571                   : Builder.CreateVectorSplat(State.VF, Mul);
1572   }
1573 
1574   Builder.restoreIP(CurrIP);
1575 
1576   // We may need to add the step a number of times, depending on the unroll
1577   // factor. The last of those goes into the PHI.
1578   PHINode *VecInd = PHINode::Create(SteppedStart->getType(), 2, "vec.ind");
1579   VecInd->insertBefore(State.CFG.PrevBB->getFirstInsertionPt());
1580   VecInd->setDebugLoc(EntryVal->getDebugLoc());
1581   State.set(this, VecInd);
1582 
1583   Instruction *LastInduction = cast<Instruction>(
1584       Builder.CreateBinOp(AddOp, VecInd, SplatVF, "vec.ind.next"));
1585   if (isa<TruncInst>(EntryVal))
1586     State.addMetadata(LastInduction, EntryVal);
1587   LastInduction->setDebugLoc(EntryVal->getDebugLoc());
1588 
1589   VecInd->addIncoming(SteppedStart, VectorPH);
1590   // Add induction update using an incorrect block temporarily. The phi node
1591   // will be fixed after VPlan execution. Note that at this point the latch
1592   // block cannot be used, as it does not exist yet.
1593   // TODO: Model increment value in VPlan, by turning the recipe into a
1594   // multi-def and a subclass of VPHeaderPHIRecipe.
1595   VecInd->addIncoming(LastInduction, VectorPH);
1596 }
1597 
1598 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1599 void VPWidenIntOrFpInductionRecipe::print(raw_ostream &O, const Twine &Indent,
1600                                           VPSlotTracker &SlotTracker) const {
1601   O << Indent << "WIDEN-INDUCTION";
1602   if (getTruncInst()) {
1603     O << "\\l\"";
1604     O << " +\n" << Indent << "\"  " << VPlanIngredient(IV) << "\\l\"";
1605     O << " +\n" << Indent << "\"  ";
1606     getVPValue(0)->printAsOperand(O, SlotTracker);
1607   } else
1608     O << " " << VPlanIngredient(IV);
1609 
1610   O << ", ";
1611   getStepValue()->printAsOperand(O, SlotTracker);
1612 
1613   O << ", ";
1614   getVFValue()->printAsOperand(O, SlotTracker);
1615 }
1616 #endif
1617 
1618 bool VPWidenIntOrFpInductionRecipe::isCanonical() const {
1619   // The step may be defined by a recipe in the preheader (e.g. if it requires
1620   // SCEV expansion), but for the canonical induction the step is required to be
1621   // 1, which is represented as live-in.
1622   if (getStepValue()->getDefiningRecipe())
1623     return false;
1624   auto *StepC = dyn_cast<ConstantInt>(getStepValue()->getLiveInIRValue());
1625   auto *StartC = dyn_cast<ConstantInt>(getStartValue()->getLiveInIRValue());
1626   auto *CanIV = cast<VPCanonicalIVPHIRecipe>(&*getParent()->begin());
1627   return StartC && StartC->isZero() && StepC && StepC->isOne() &&
1628          getScalarType() == CanIV->getScalarType();
1629 }
1630 
1631 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1632 void VPDerivedIVRecipe::print(raw_ostream &O, const Twine &Indent,
1633                               VPSlotTracker &SlotTracker) const {
1634   O << Indent;
1635   printAsOperand(O, SlotTracker);
1636   O << " = DERIVED-IV ";
1637   getStartValue()->printAsOperand(O, SlotTracker);
1638   O << " + ";
1639   getOperand(1)->printAsOperand(O, SlotTracker);
1640   O << " * ";
1641   getStepValue()->printAsOperand(O, SlotTracker);
1642 }
1643 #endif
1644 
1645 void VPScalarIVStepsRecipe::execute(VPTransformState &State) {
1646   // Fast-math-flags propagate from the original induction instruction.
1647   IRBuilder<>::FastMathFlagGuard FMFG(State.Builder);
1648   if (hasFastMathFlags())
1649     State.Builder.setFastMathFlags(getFastMathFlags());
1650 
1651   /// Compute scalar induction steps. \p ScalarIV is the scalar induction
1652   /// variable on which to base the steps, \p Step is the size of the step.
1653 
1654   Value *BaseIV = State.get(getOperand(0), VPLane(0));
1655   Value *Step = State.get(getStepValue(), VPLane(0));
1656   IRBuilderBase &Builder = State.Builder;
1657 
1658   // Ensure step has the same type as that of scalar IV.
1659   Type *BaseIVTy = BaseIV->getType()->getScalarType();
1660   assert(BaseIVTy == Step->getType() && "Types of BaseIV and Step must match!");
1661 
1662   // We build scalar steps for both integer and floating-point induction
1663   // variables. Here, we determine the kind of arithmetic we will perform.
1664   Instruction::BinaryOps AddOp;
1665   Instruction::BinaryOps MulOp;
1666   if (BaseIVTy->isIntegerTy()) {
1667     AddOp = Instruction::Add;
1668     MulOp = Instruction::Mul;
1669   } else {
1670     AddOp = InductionOpcode;
1671     MulOp = Instruction::FMul;
1672   }
1673 
1674   // Determine the number of scalars we need to generate for each unroll
1675   // iteration.
1676   bool FirstLaneOnly = vputils::onlyFirstLaneUsed(this);
1677   // Compute the scalar steps and save the results in State.
1678   Type *IntStepTy =
1679       IntegerType::get(BaseIVTy->getContext(), BaseIVTy->getScalarSizeInBits());
1680   Type *VecIVTy = nullptr;
1681   Value *UnitStepVec = nullptr, *SplatStep = nullptr, *SplatIV = nullptr;
1682   if (!FirstLaneOnly && State.VF.isScalable()) {
1683     VecIVTy = VectorType::get(BaseIVTy, State.VF);
1684     UnitStepVec =
1685         Builder.CreateStepVector(VectorType::get(IntStepTy, State.VF));
1686     SplatStep = Builder.CreateVectorSplat(State.VF, Step);
1687     SplatIV = Builder.CreateVectorSplat(State.VF, BaseIV);
1688   }
1689 
1690   unsigned StartLane = 0;
1691   unsigned EndLane = FirstLaneOnly ? 1 : State.VF.getKnownMinValue();
1692   if (State.Lane) {
1693     StartLane = State.Lane->getKnownLane();
1694     EndLane = StartLane + 1;
1695   }
1696   Value *StartIdx0 =
1697       createStepForVF(Builder, IntStepTy, State.VF, getUnrollPart(*this));
1698 
1699   if (!FirstLaneOnly && State.VF.isScalable()) {
1700     auto *SplatStartIdx = Builder.CreateVectorSplat(State.VF, StartIdx0);
1701     auto *InitVec = Builder.CreateAdd(SplatStartIdx, UnitStepVec);
1702     if (BaseIVTy->isFloatingPointTy())
1703       InitVec = Builder.CreateSIToFP(InitVec, VecIVTy);
1704     auto *Mul = Builder.CreateBinOp(MulOp, InitVec, SplatStep);
1705     auto *Add = Builder.CreateBinOp(AddOp, SplatIV, Mul);
1706     State.set(this, Add);
1707     // It's useful to record the lane values too for the known minimum number
1708     // of elements so we do those below. This improves the code quality when
1709     // trying to extract the first element, for example.
1710   }
1711 
1712   if (BaseIVTy->isFloatingPointTy())
1713     StartIdx0 = Builder.CreateSIToFP(StartIdx0, BaseIVTy);
1714 
1715   for (unsigned Lane = StartLane; Lane < EndLane; ++Lane) {
1716     Value *StartIdx = Builder.CreateBinOp(
1717         AddOp, StartIdx0, getSignedIntOrFpConstant(BaseIVTy, Lane));
1718     // The step returned by `createStepForVF` is a runtime-evaluated value
1719     // when VF is scalable. Otherwise, it should be folded into a Constant.
1720     assert((State.VF.isScalable() || isa<Constant>(StartIdx)) &&
1721            "Expected StartIdx to be folded to a constant when VF is not "
1722            "scalable");
1723     auto *Mul = Builder.CreateBinOp(MulOp, StartIdx, Step);
1724     auto *Add = Builder.CreateBinOp(AddOp, BaseIV, Mul);
1725     State.set(this, Add, VPLane(Lane));
1726   }
1727 }
1728 
1729 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1730 void VPScalarIVStepsRecipe::print(raw_ostream &O, const Twine &Indent,
1731                                   VPSlotTracker &SlotTracker) const {
1732   O << Indent;
1733   printAsOperand(O, SlotTracker);
1734   O << " = SCALAR-STEPS ";
1735   printOperands(O, SlotTracker);
1736 }
1737 #endif
1738 
1739 void VPWidenGEPRecipe::execute(VPTransformState &State) {
1740   assert(State.VF.isVector() && "not widening");
1741   auto *GEP = cast<GetElementPtrInst>(getUnderlyingInstr());
1742   // Construct a vector GEP by widening the operands of the scalar GEP as
1743   // necessary. We mark the vector GEP 'inbounds' if appropriate. A GEP
1744   // results in a vector of pointers when at least one operand of the GEP
1745   // is vector-typed. Thus, to keep the representation compact, we only use
1746   // vector-typed operands for loop-varying values.
1747 
1748   if (areAllOperandsInvariant()) {
1749     // If we are vectorizing, but the GEP has only loop-invariant operands,
1750     // the GEP we build (by only using vector-typed operands for
1751     // loop-varying values) would be a scalar pointer. Thus, to ensure we
1752     // produce a vector of pointers, we need to either arbitrarily pick an
1753     // operand to broadcast, or broadcast a clone of the original GEP.
1754     // Here, we broadcast a clone of the original.
1755     //
1756     // TODO: If at some point we decide to scalarize instructions having
1757     //       loop-invariant operands, this special case will no longer be
1758     //       required. We would add the scalarization decision to
1759     //       collectLoopScalars() and teach getVectorValue() to broadcast
1760     //       the lane-zero scalar value.
1761     SmallVector<Value *> Ops;
1762     for (unsigned I = 0, E = getNumOperands(); I != E; I++)
1763       Ops.push_back(State.get(getOperand(I), VPLane(0)));
1764 
1765     auto *NewGEP =
1766         State.Builder.CreateGEP(GEP->getSourceElementType(), Ops[0],
1767                                 ArrayRef(Ops).drop_front(), "", isInBounds());
1768     Value *Splat = State.Builder.CreateVectorSplat(State.VF, NewGEP);
1769     State.set(this, Splat);
1770     State.addMetadata(Splat, GEP);
1771   } else {
1772     // If the GEP has at least one loop-varying operand, we are sure to
1773     // produce a vector of pointers unless VF is scalar.
1774     // The pointer operand of the new GEP. If it's loop-invariant, we
1775     // won't broadcast it.
1776     auto *Ptr = isPointerLoopInvariant() ? State.get(getOperand(0), VPLane(0))
1777                                          : State.get(getOperand(0));
1778 
1779     // Collect all the indices for the new GEP. If any index is
1780     // loop-invariant, we won't broadcast it.
1781     SmallVector<Value *, 4> Indices;
1782     for (unsigned I = 1, E = getNumOperands(); I < E; I++) {
1783       VPValue *Operand = getOperand(I);
1784       if (isIndexLoopInvariant(I - 1))
1785         Indices.push_back(State.get(Operand, VPLane(0)));
1786       else
1787         Indices.push_back(State.get(Operand));
1788     }
1789 
1790     // Create the new GEP. Note that this GEP may be a scalar if VF == 1,
1791     // but it should be a vector, otherwise.
1792     auto *NewGEP = State.Builder.CreateGEP(GEP->getSourceElementType(), Ptr,
1793                                            Indices, "", isInBounds());
1794     assert((State.VF.isScalar() || NewGEP->getType()->isVectorTy()) &&
1795            "NewGEP is not a pointer vector");
1796     State.set(this, NewGEP);
1797     State.addMetadata(NewGEP, GEP);
1798   }
1799 }
1800 
1801 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1802 void VPWidenGEPRecipe::print(raw_ostream &O, const Twine &Indent,
1803                              VPSlotTracker &SlotTracker) const {
1804   O << Indent << "WIDEN-GEP ";
1805   O << (isPointerLoopInvariant() ? "Inv" : "Var");
1806   for (size_t I = 0; I < getNumOperands() - 1; ++I)
1807     O << "[" << (isIndexLoopInvariant(I) ? "Inv" : "Var") << "]";
1808 
1809   O << " ";
1810   printAsOperand(O, SlotTracker);
1811   O << " = getelementptr";
1812   printFlags(O);
1813   printOperands(O, SlotTracker);
1814 }
1815 #endif
1816 
1817 void VPVectorPointerRecipe ::execute(VPTransformState &State) {
1818   auto &Builder = State.Builder;
1819   State.setDebugLocFrom(getDebugLoc());
1820   unsigned CurrentPart = getUnrollPart(*this);
1821   // Use i32 for the gep index type when the value is constant,
1822   // or query DataLayout for a more suitable index type otherwise.
1823   const DataLayout &DL = Builder.GetInsertBlock()->getDataLayout();
1824   Type *IndexTy = State.VF.isScalable() && (IsReverse || CurrentPart > 0)
1825                       ? DL.getIndexType(Builder.getPtrTy(0))
1826                       : Builder.getInt32Ty();
1827   Value *Ptr = State.get(getOperand(0), VPLane(0));
1828   bool InBounds = isInBounds();
1829 
1830   Value *ResultPtr = nullptr;
1831   if (IsReverse) {
1832     // If the address is consecutive but reversed, then the
1833     // wide store needs to start at the last vector element.
1834     // RunTimeVF =  VScale * VF.getKnownMinValue()
1835     // For fixed-width VScale is 1, then RunTimeVF = VF.getKnownMinValue()
1836     Value *RunTimeVF = getRuntimeVF(Builder, IndexTy, State.VF);
1837     // NumElt = -CurrentPart * RunTimeVF
1838     Value *NumElt = Builder.CreateMul(
1839         ConstantInt::get(IndexTy, -(int64_t)CurrentPart), RunTimeVF);
1840     // LastLane = 1 - RunTimeVF
1841     Value *LastLane =
1842         Builder.CreateSub(ConstantInt::get(IndexTy, 1), RunTimeVF);
1843     ResultPtr = Builder.CreateGEP(IndexedTy, Ptr, NumElt, "", InBounds);
1844     ResultPtr = Builder.CreateGEP(IndexedTy, ResultPtr, LastLane, "", InBounds);
1845   } else {
1846     Value *Increment = createStepForVF(Builder, IndexTy, State.VF, CurrentPart);
1847     ResultPtr = Builder.CreateGEP(IndexedTy, Ptr, Increment, "", InBounds);
1848   }
1849 
1850   State.set(this, ResultPtr, /*IsScalar*/ true);
1851 }
1852 
1853 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1854 void VPVectorPointerRecipe::print(raw_ostream &O, const Twine &Indent,
1855                                   VPSlotTracker &SlotTracker) const {
1856   O << Indent;
1857   printAsOperand(O, SlotTracker);
1858   O << " = vector-pointer ";
1859   if (IsReverse)
1860     O << "(reverse) ";
1861 
1862   printOperands(O, SlotTracker);
1863 }
1864 #endif
1865 
1866 void VPBlendRecipe::execute(VPTransformState &State) {
1867   assert(isNormalized() && "Expected blend to be normalized!");
1868   State.setDebugLocFrom(getDebugLoc());
1869   // We know that all PHIs in non-header blocks are converted into
1870   // selects, so we don't have to worry about the insertion order and we
1871   // can just use the builder.
1872   // At this point we generate the predication tree. There may be
1873   // duplications since this is a simple recursive scan, but future
1874   // optimizations will clean it up.
1875 
1876   unsigned NumIncoming = getNumIncomingValues();
1877 
1878   // Generate a sequence of selects of the form:
1879   // SELECT(Mask3, In3,
1880   //        SELECT(Mask2, In2,
1881   //               SELECT(Mask1, In1,
1882   //                      In0)))
1883   // Note that Mask0 is never used: lanes for which no path reaches this phi and
1884   // are essentially undef are taken from In0.
1885   bool OnlyFirstLaneUsed = vputils::onlyFirstLaneUsed(this);
1886   Value *Result = nullptr;
1887   for (unsigned In = 0; In < NumIncoming; ++In) {
1888     // We might have single edge PHIs (blocks) - use an identity
1889     // 'select' for the first PHI operand.
1890     Value *In0 = State.get(getIncomingValue(In), OnlyFirstLaneUsed);
1891     if (In == 0)
1892       Result = In0; // Initialize with the first incoming value.
1893     else {
1894       // Select between the current value and the previous incoming edge
1895       // based on the incoming mask.
1896       Value *Cond = State.get(getMask(In), OnlyFirstLaneUsed);
1897       Result = State.Builder.CreateSelect(Cond, In0, Result, "predphi");
1898     }
1899   }
1900   State.set(this, Result, OnlyFirstLaneUsed);
1901 }
1902 
1903 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1904 void VPBlendRecipe::print(raw_ostream &O, const Twine &Indent,
1905                           VPSlotTracker &SlotTracker) const {
1906   O << Indent << "BLEND ";
1907   printAsOperand(O, SlotTracker);
1908   O << " =";
1909   if (getNumIncomingValues() == 1) {
1910     // Not a User of any mask: not really blending, this is a
1911     // single-predecessor phi.
1912     O << " ";
1913     getIncomingValue(0)->printAsOperand(O, SlotTracker);
1914   } else {
1915     for (unsigned I = 0, E = getNumIncomingValues(); I < E; ++I) {
1916       O << " ";
1917       getIncomingValue(I)->printAsOperand(O, SlotTracker);
1918       if (I == 0)
1919         continue;
1920       O << "/";
1921       getMask(I)->printAsOperand(O, SlotTracker);
1922     }
1923   }
1924 }
1925 #endif
1926 
1927 void VPReductionRecipe::execute(VPTransformState &State) {
1928   assert(!State.Lane && "Reduction being replicated.");
1929   Value *PrevInChain = State.get(getChainOp(), /*IsScalar*/ true);
1930   RecurKind Kind = RdxDesc.getRecurrenceKind();
1931   // Propagate the fast-math flags carried by the underlying instruction.
1932   IRBuilderBase::FastMathFlagGuard FMFGuard(State.Builder);
1933   State.Builder.setFastMathFlags(RdxDesc.getFastMathFlags());
1934   Value *NewVecOp = State.get(getVecOp());
1935   if (VPValue *Cond = getCondOp()) {
1936     Value *NewCond = State.get(Cond, State.VF.isScalar());
1937     VectorType *VecTy = dyn_cast<VectorType>(NewVecOp->getType());
1938     Type *ElementTy = VecTy ? VecTy->getElementType() : NewVecOp->getType();
1939 
1940     Value *Start;
1941     if (RecurrenceDescriptor::isAnyOfRecurrenceKind(Kind))
1942       Start = RdxDesc.getRecurrenceStartValue();
1943     else
1944       Start = llvm::getRecurrenceIdentity(Kind, ElementTy,
1945                                           RdxDesc.getFastMathFlags());
1946     if (State.VF.isVector())
1947       Start = State.Builder.CreateVectorSplat(VecTy->getElementCount(), Start);
1948 
1949     Value *Select = State.Builder.CreateSelect(NewCond, NewVecOp, Start);
1950     NewVecOp = Select;
1951   }
1952   Value *NewRed;
1953   Value *NextInChain;
1954   if (IsOrdered) {
1955     if (State.VF.isVector())
1956       NewRed =
1957           createOrderedReduction(State.Builder, RdxDesc, NewVecOp, PrevInChain);
1958     else
1959       NewRed = State.Builder.CreateBinOp(
1960           (Instruction::BinaryOps)RdxDesc.getOpcode(Kind), PrevInChain,
1961           NewVecOp);
1962     PrevInChain = NewRed;
1963     NextInChain = NewRed;
1964   } else {
1965     PrevInChain = State.get(getChainOp(), /*IsScalar*/ true);
1966     NewRed = createReduction(State.Builder, RdxDesc, NewVecOp);
1967     if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind))
1968       NextInChain = createMinMaxOp(State.Builder, RdxDesc.getRecurrenceKind(),
1969                                    NewRed, PrevInChain);
1970     else
1971       NextInChain = State.Builder.CreateBinOp(
1972           (Instruction::BinaryOps)RdxDesc.getOpcode(Kind), NewRed, PrevInChain);
1973   }
1974   State.set(this, NextInChain, /*IsScalar*/ true);
1975 }
1976 
1977 void VPReductionEVLRecipe::execute(VPTransformState &State) {
1978   assert(!State.Lane && "Reduction being replicated.");
1979 
1980   auto &Builder = State.Builder;
1981   // Propagate the fast-math flags carried by the underlying instruction.
1982   IRBuilderBase::FastMathFlagGuard FMFGuard(Builder);
1983   const RecurrenceDescriptor &RdxDesc = getRecurrenceDescriptor();
1984   Builder.setFastMathFlags(RdxDesc.getFastMathFlags());
1985 
1986   RecurKind Kind = RdxDesc.getRecurrenceKind();
1987   Value *Prev = State.get(getChainOp(), /*IsScalar*/ true);
1988   Value *VecOp = State.get(getVecOp());
1989   Value *EVL = State.get(getEVL(), VPLane(0));
1990 
1991   VectorBuilder VBuilder(Builder);
1992   VBuilder.setEVL(EVL);
1993   Value *Mask;
1994   // TODO: move the all-true mask generation into VectorBuilder.
1995   if (VPValue *CondOp = getCondOp())
1996     Mask = State.get(CondOp);
1997   else
1998     Mask = Builder.CreateVectorSplat(State.VF, Builder.getTrue());
1999   VBuilder.setMask(Mask);
2000 
2001   Value *NewRed;
2002   if (isOrdered()) {
2003     NewRed = createOrderedReduction(VBuilder, RdxDesc, VecOp, Prev);
2004   } else {
2005     NewRed = createSimpleReduction(VBuilder, VecOp, RdxDesc);
2006     if (RecurrenceDescriptor::isMinMaxRecurrenceKind(Kind))
2007       NewRed = createMinMaxOp(Builder, Kind, NewRed, Prev);
2008     else
2009       NewRed = Builder.CreateBinOp(
2010           (Instruction::BinaryOps)RdxDesc.getOpcode(Kind), NewRed, Prev);
2011   }
2012   State.set(this, NewRed, /*IsScalar*/ true);
2013 }
2014 
2015 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2016 void VPReductionRecipe::print(raw_ostream &O, const Twine &Indent,
2017                               VPSlotTracker &SlotTracker) const {
2018   O << Indent << "REDUCE ";
2019   printAsOperand(O, SlotTracker);
2020   O << " = ";
2021   getChainOp()->printAsOperand(O, SlotTracker);
2022   O << " +";
2023   if (isa<FPMathOperator>(getUnderlyingInstr()))
2024     O << getUnderlyingInstr()->getFastMathFlags();
2025   O << " reduce." << Instruction::getOpcodeName(RdxDesc.getOpcode()) << " (";
2026   getVecOp()->printAsOperand(O, SlotTracker);
2027   if (isConditional()) {
2028     O << ", ";
2029     getCondOp()->printAsOperand(O, SlotTracker);
2030   }
2031   O << ")";
2032   if (RdxDesc.IntermediateStore)
2033     O << " (with final reduction value stored in invariant address sank "
2034          "outside of loop)";
2035 }
2036 
2037 void VPReductionEVLRecipe::print(raw_ostream &O, const Twine &Indent,
2038                                  VPSlotTracker &SlotTracker) const {
2039   const RecurrenceDescriptor &RdxDesc = getRecurrenceDescriptor();
2040   O << Indent << "REDUCE ";
2041   printAsOperand(O, SlotTracker);
2042   O << " = ";
2043   getChainOp()->printAsOperand(O, SlotTracker);
2044   O << " +";
2045   if (isa<FPMathOperator>(getUnderlyingInstr()))
2046     O << getUnderlyingInstr()->getFastMathFlags();
2047   O << " vp.reduce." << Instruction::getOpcodeName(RdxDesc.getOpcode()) << " (";
2048   getVecOp()->printAsOperand(O, SlotTracker);
2049   O << ", ";
2050   getEVL()->printAsOperand(O, SlotTracker);
2051   if (isConditional()) {
2052     O << ", ";
2053     getCondOp()->printAsOperand(O, SlotTracker);
2054   }
2055   O << ")";
2056   if (RdxDesc.IntermediateStore)
2057     O << " (with final reduction value stored in invariant address sank "
2058          "outside of loop)";
2059 }
2060 #endif
2061 
2062 bool VPReplicateRecipe::shouldPack() const {
2063   // Find if the recipe is used by a widened recipe via an intervening
2064   // VPPredInstPHIRecipe. In this case, also pack the scalar values in a vector.
2065   return any_of(users(), [](const VPUser *U) {
2066     if (auto *PredR = dyn_cast<VPPredInstPHIRecipe>(U))
2067       return any_of(PredR->users(), [PredR](const VPUser *U) {
2068         return !U->usesScalars(PredR);
2069       });
2070     return false;
2071   });
2072 }
2073 
2074 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2075 void VPReplicateRecipe::print(raw_ostream &O, const Twine &Indent,
2076                               VPSlotTracker &SlotTracker) const {
2077   O << Indent << (IsUniform ? "CLONE " : "REPLICATE ");
2078 
2079   if (!getUnderlyingInstr()->getType()->isVoidTy()) {
2080     printAsOperand(O, SlotTracker);
2081     O << " = ";
2082   }
2083   if (auto *CB = dyn_cast<CallBase>(getUnderlyingInstr())) {
2084     O << "call";
2085     printFlags(O);
2086     O << "@" << CB->getCalledFunction()->getName() << "(";
2087     interleaveComma(make_range(op_begin(), op_begin() + (getNumOperands() - 1)),
2088                     O, [&O, &SlotTracker](VPValue *Op) {
2089                       Op->printAsOperand(O, SlotTracker);
2090                     });
2091     O << ")";
2092   } else {
2093     O << Instruction::getOpcodeName(getUnderlyingInstr()->getOpcode());
2094     printFlags(O);
2095     printOperands(O, SlotTracker);
2096   }
2097 
2098   if (shouldPack())
2099     O << " (S->V)";
2100 }
2101 #endif
2102 
2103 Value *VPScalarCastRecipe ::generate(VPTransformState &State) {
2104   assert(vputils::onlyFirstLaneUsed(this) &&
2105          "Codegen only implemented for first lane.");
2106   switch (Opcode) {
2107   case Instruction::SExt:
2108   case Instruction::ZExt:
2109   case Instruction::Trunc: {
2110     // Note: SExt/ZExt not used yet.
2111     Value *Op = State.get(getOperand(0), VPLane(0));
2112     return State.Builder.CreateCast(Instruction::CastOps(Opcode), Op, ResultTy);
2113   }
2114   default:
2115     llvm_unreachable("opcode not implemented yet");
2116   }
2117 }
2118 
2119 void VPScalarCastRecipe ::execute(VPTransformState &State) {
2120   State.set(this, generate(State), VPLane(0));
2121 }
2122 
2123 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2124 void VPScalarCastRecipe ::print(raw_ostream &O, const Twine &Indent,
2125                                 VPSlotTracker &SlotTracker) const {
2126   O << Indent << "SCALAR-CAST ";
2127   printAsOperand(O, SlotTracker);
2128   O << " = " << Instruction::getOpcodeName(Opcode) << " ";
2129   printOperands(O, SlotTracker);
2130   O << " to " << *ResultTy;
2131 }
2132 #endif
2133 
2134 void VPBranchOnMaskRecipe::execute(VPTransformState &State) {
2135   assert(State.Lane && "Branch on Mask works only on single instance.");
2136 
2137   unsigned Lane = State.Lane->getKnownLane();
2138 
2139   Value *ConditionBit = nullptr;
2140   VPValue *BlockInMask = getMask();
2141   if (BlockInMask) {
2142     ConditionBit = State.get(BlockInMask);
2143     if (ConditionBit->getType()->isVectorTy())
2144       ConditionBit = State.Builder.CreateExtractElement(
2145           ConditionBit, State.Builder.getInt32(Lane));
2146   } else // Block in mask is all-one.
2147     ConditionBit = State.Builder.getTrue();
2148 
2149   // Replace the temporary unreachable terminator with a new conditional branch,
2150   // whose two destinations will be set later when they are created.
2151   auto *CurrentTerminator = State.CFG.PrevBB->getTerminator();
2152   assert(isa<UnreachableInst>(CurrentTerminator) &&
2153          "Expected to replace unreachable terminator with conditional branch.");
2154   auto *CondBr = BranchInst::Create(State.CFG.PrevBB, nullptr, ConditionBit);
2155   CondBr->setSuccessor(0, nullptr);
2156   ReplaceInstWithInst(CurrentTerminator, CondBr);
2157 }
2158 
2159 void VPPredInstPHIRecipe::execute(VPTransformState &State) {
2160   assert(State.Lane && "Predicated instruction PHI works per instance.");
2161   Instruction *ScalarPredInst =
2162       cast<Instruction>(State.get(getOperand(0), *State.Lane));
2163   BasicBlock *PredicatedBB = ScalarPredInst->getParent();
2164   BasicBlock *PredicatingBB = PredicatedBB->getSinglePredecessor();
2165   assert(PredicatingBB && "Predicated block has no single predecessor.");
2166   assert(isa<VPReplicateRecipe>(getOperand(0)) &&
2167          "operand must be VPReplicateRecipe");
2168 
2169   // By current pack/unpack logic we need to generate only a single phi node: if
2170   // a vector value for the predicated instruction exists at this point it means
2171   // the instruction has vector users only, and a phi for the vector value is
2172   // needed. In this case the recipe of the predicated instruction is marked to
2173   // also do that packing, thereby "hoisting" the insert-element sequence.
2174   // Otherwise, a phi node for the scalar value is needed.
2175   if (State.hasVectorValue(getOperand(0))) {
2176     Value *VectorValue = State.get(getOperand(0));
2177     InsertElementInst *IEI = cast<InsertElementInst>(VectorValue);
2178     PHINode *VPhi = State.Builder.CreatePHI(IEI->getType(), 2);
2179     VPhi->addIncoming(IEI->getOperand(0), PredicatingBB); // Unmodified vector.
2180     VPhi->addIncoming(IEI, PredicatedBB); // New vector with inserted element.
2181     if (State.hasVectorValue(this))
2182       State.reset(this, VPhi);
2183     else
2184       State.set(this, VPhi);
2185     // NOTE: Currently we need to update the value of the operand, so the next
2186     // predicated iteration inserts its generated value in the correct vector.
2187     State.reset(getOperand(0), VPhi);
2188   } else {
2189     if (vputils::onlyFirstLaneUsed(this) && !State.Lane->isFirstLane())
2190       return;
2191 
2192     Type *PredInstType = getOperand(0)->getUnderlyingValue()->getType();
2193     PHINode *Phi = State.Builder.CreatePHI(PredInstType, 2);
2194     Phi->addIncoming(PoisonValue::get(ScalarPredInst->getType()),
2195                      PredicatingBB);
2196     Phi->addIncoming(ScalarPredInst, PredicatedBB);
2197     if (State.hasScalarValue(this, *State.Lane))
2198       State.reset(this, Phi, *State.Lane);
2199     else
2200       State.set(this, Phi, *State.Lane);
2201     // NOTE: Currently we need to update the value of the operand, so the next
2202     // predicated iteration inserts its generated value in the correct vector.
2203     State.reset(getOperand(0), Phi, *State.Lane);
2204   }
2205 }
2206 
2207 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2208 void VPPredInstPHIRecipe::print(raw_ostream &O, const Twine &Indent,
2209                                 VPSlotTracker &SlotTracker) const {
2210   O << Indent << "PHI-PREDICATED-INSTRUCTION ";
2211   printAsOperand(O, SlotTracker);
2212   O << " = ";
2213   printOperands(O, SlotTracker);
2214 }
2215 #endif
2216 
2217 InstructionCost VPWidenMemoryRecipe::computeCost(ElementCount VF,
2218                                                  VPCostContext &Ctx) const {
2219   Type *Ty = ToVectorTy(getLoadStoreType(&Ingredient), VF);
2220   const Align Alignment =
2221       getLoadStoreAlignment(const_cast<Instruction *>(&Ingredient));
2222   unsigned AS =
2223       getLoadStoreAddressSpace(const_cast<Instruction *>(&Ingredient));
2224   TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
2225 
2226   if (!Consecutive) {
2227     // TODO: Using the original IR may not be accurate.
2228     // Currently, ARM will use the underlying IR to calculate gather/scatter
2229     // instruction cost.
2230     const Value *Ptr = getLoadStorePointerOperand(&Ingredient);
2231     assert(!Reverse &&
2232            "Inconsecutive memory access should not have the order.");
2233     return Ctx.TTI.getAddressComputationCost(Ty) +
2234            Ctx.TTI.getGatherScatterOpCost(Ingredient.getOpcode(), Ty, Ptr,
2235                                           IsMasked, Alignment, CostKind,
2236                                           &Ingredient);
2237   }
2238 
2239   InstructionCost Cost = 0;
2240   if (IsMasked) {
2241     Cost += Ctx.TTI.getMaskedMemoryOpCost(Ingredient.getOpcode(), Ty, Alignment,
2242                                           AS, CostKind);
2243   } else {
2244     TTI::OperandValueInfo OpInfo =
2245         Ctx.TTI.getOperandInfo(Ingredient.getOperand(0));
2246     Cost += Ctx.TTI.getMemoryOpCost(Ingredient.getOpcode(), Ty, Alignment, AS,
2247                                     CostKind, OpInfo, &Ingredient);
2248   }
2249   if (!Reverse)
2250     return Cost;
2251 
2252   return Cost += Ctx.TTI.getShuffleCost(TargetTransformInfo::SK_Reverse,
2253                                         cast<VectorType>(Ty), {}, CostKind, 0);
2254 }
2255 
2256 void VPWidenLoadRecipe::execute(VPTransformState &State) {
2257   auto *LI = cast<LoadInst>(&Ingredient);
2258 
2259   Type *ScalarDataTy = getLoadStoreType(&Ingredient);
2260   auto *DataTy = VectorType::get(ScalarDataTy, State.VF);
2261   const Align Alignment = getLoadStoreAlignment(&Ingredient);
2262   bool CreateGather = !isConsecutive();
2263 
2264   auto &Builder = State.Builder;
2265   State.setDebugLocFrom(getDebugLoc());
2266   Value *Mask = nullptr;
2267   if (auto *VPMask = getMask()) {
2268     // Mask reversal is only needed for non-all-one (null) masks, as reverse
2269     // of a null all-one mask is a null mask.
2270     Mask = State.get(VPMask);
2271     if (isReverse())
2272       Mask = Builder.CreateVectorReverse(Mask, "reverse");
2273   }
2274 
2275   Value *Addr = State.get(getAddr(), /*IsScalar*/ !CreateGather);
2276   Value *NewLI;
2277   if (CreateGather) {
2278     NewLI = Builder.CreateMaskedGather(DataTy, Addr, Alignment, Mask, nullptr,
2279                                        "wide.masked.gather");
2280   } else if (Mask) {
2281     NewLI =
2282         Builder.CreateMaskedLoad(DataTy, Addr, Alignment, Mask,
2283                                  PoisonValue::get(DataTy), "wide.masked.load");
2284   } else {
2285     NewLI = Builder.CreateAlignedLoad(DataTy, Addr, Alignment, "wide.load");
2286   }
2287   // Add metadata to the load, but setVectorValue to the reverse shuffle.
2288   State.addMetadata(NewLI, LI);
2289   if (Reverse)
2290     NewLI = Builder.CreateVectorReverse(NewLI, "reverse");
2291   State.set(this, NewLI);
2292 }
2293 
2294 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2295 void VPWidenLoadRecipe::print(raw_ostream &O, const Twine &Indent,
2296                               VPSlotTracker &SlotTracker) const {
2297   O << Indent << "WIDEN ";
2298   printAsOperand(O, SlotTracker);
2299   O << " = load ";
2300   printOperands(O, SlotTracker);
2301 }
2302 #endif
2303 
2304 /// Use all-true mask for reverse rather than actual mask, as it avoids a
2305 /// dependence w/o affecting the result.
2306 static Instruction *createReverseEVL(IRBuilderBase &Builder, Value *Operand,
2307                                      Value *EVL, const Twine &Name) {
2308   VectorType *ValTy = cast<VectorType>(Operand->getType());
2309   Value *AllTrueMask =
2310       Builder.CreateVectorSplat(ValTy->getElementCount(), Builder.getTrue());
2311   return Builder.CreateIntrinsic(ValTy, Intrinsic::experimental_vp_reverse,
2312                                  {Operand, AllTrueMask, EVL}, nullptr, Name);
2313 }
2314 
2315 void VPWidenLoadEVLRecipe::execute(VPTransformState &State) {
2316   auto *LI = cast<LoadInst>(&Ingredient);
2317 
2318   Type *ScalarDataTy = getLoadStoreType(&Ingredient);
2319   auto *DataTy = VectorType::get(ScalarDataTy, State.VF);
2320   const Align Alignment = getLoadStoreAlignment(&Ingredient);
2321   bool CreateGather = !isConsecutive();
2322 
2323   auto &Builder = State.Builder;
2324   State.setDebugLocFrom(getDebugLoc());
2325   CallInst *NewLI;
2326   Value *EVL = State.get(getEVL(), VPLane(0));
2327   Value *Addr = State.get(getAddr(), !CreateGather);
2328   Value *Mask = nullptr;
2329   if (VPValue *VPMask = getMask()) {
2330     Mask = State.get(VPMask);
2331     if (isReverse())
2332       Mask = createReverseEVL(Builder, Mask, EVL, "vp.reverse.mask");
2333   } else {
2334     Mask = Builder.CreateVectorSplat(State.VF, Builder.getTrue());
2335   }
2336 
2337   if (CreateGather) {
2338     NewLI =
2339         Builder.CreateIntrinsic(DataTy, Intrinsic::vp_gather, {Addr, Mask, EVL},
2340                                 nullptr, "wide.masked.gather");
2341   } else {
2342     VectorBuilder VBuilder(Builder);
2343     VBuilder.setEVL(EVL).setMask(Mask);
2344     NewLI = cast<CallInst>(VBuilder.createVectorInstruction(
2345         Instruction::Load, DataTy, Addr, "vp.op.load"));
2346   }
2347   NewLI->addParamAttr(
2348       0, Attribute::getWithAlignment(NewLI->getContext(), Alignment));
2349   State.addMetadata(NewLI, LI);
2350   Instruction *Res = NewLI;
2351   if (isReverse())
2352     Res = createReverseEVL(Builder, Res, EVL, "vp.reverse");
2353   State.set(this, Res);
2354 }
2355 
2356 InstructionCost VPWidenLoadEVLRecipe::computeCost(ElementCount VF,
2357                                                   VPCostContext &Ctx) const {
2358   if (!Consecutive || IsMasked)
2359     return VPWidenMemoryRecipe::computeCost(VF, Ctx);
2360 
2361   // We need to use the getMaskedMemoryOpCost() instead of getMemoryOpCost()
2362   // here because the EVL recipes using EVL to replace the tail mask. But in the
2363   // legacy model, it will always calculate the cost of mask.
2364   // TODO: Using getMemoryOpCost() instead of getMaskedMemoryOpCost when we
2365   // don't need to compare to the legacy cost model.
2366   Type *Ty = ToVectorTy(getLoadStoreType(&Ingredient), VF);
2367   const Align Alignment =
2368       getLoadStoreAlignment(const_cast<Instruction *>(&Ingredient));
2369   unsigned AS =
2370       getLoadStoreAddressSpace(const_cast<Instruction *>(&Ingredient));
2371   TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
2372   InstructionCost Cost = Ctx.TTI.getMaskedMemoryOpCost(
2373       Ingredient.getOpcode(), Ty, Alignment, AS, CostKind);
2374   if (!Reverse)
2375     return Cost;
2376 
2377   return Cost + Ctx.TTI.getShuffleCost(TargetTransformInfo::SK_Reverse,
2378                                        cast<VectorType>(Ty), {}, CostKind, 0);
2379 }
2380 
2381 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2382 void VPWidenLoadEVLRecipe::print(raw_ostream &O, const Twine &Indent,
2383                                  VPSlotTracker &SlotTracker) const {
2384   O << Indent << "WIDEN ";
2385   printAsOperand(O, SlotTracker);
2386   O << " = vp.load ";
2387   printOperands(O, SlotTracker);
2388 }
2389 #endif
2390 
2391 void VPWidenStoreRecipe::execute(VPTransformState &State) {
2392   auto *SI = cast<StoreInst>(&Ingredient);
2393 
2394   VPValue *StoredVPValue = getStoredValue();
2395   bool CreateScatter = !isConsecutive();
2396   const Align Alignment = getLoadStoreAlignment(&Ingredient);
2397 
2398   auto &Builder = State.Builder;
2399   State.setDebugLocFrom(getDebugLoc());
2400 
2401   Value *Mask = nullptr;
2402   if (auto *VPMask = getMask()) {
2403     // Mask reversal is only needed for non-all-one (null) masks, as reverse
2404     // of a null all-one mask is a null mask.
2405     Mask = State.get(VPMask);
2406     if (isReverse())
2407       Mask = Builder.CreateVectorReverse(Mask, "reverse");
2408   }
2409 
2410   Value *StoredVal = State.get(StoredVPValue);
2411   if (isReverse()) {
2412     // If we store to reverse consecutive memory locations, then we need
2413     // to reverse the order of elements in the stored value.
2414     StoredVal = Builder.CreateVectorReverse(StoredVal, "reverse");
2415     // We don't want to update the value in the map as it might be used in
2416     // another expression. So don't call resetVectorValue(StoredVal).
2417   }
2418   Value *Addr = State.get(getAddr(), /*IsScalar*/ !CreateScatter);
2419   Instruction *NewSI = nullptr;
2420   if (CreateScatter)
2421     NewSI = Builder.CreateMaskedScatter(StoredVal, Addr, Alignment, Mask);
2422   else if (Mask)
2423     NewSI = Builder.CreateMaskedStore(StoredVal, Addr, Alignment, Mask);
2424   else
2425     NewSI = Builder.CreateAlignedStore(StoredVal, Addr, Alignment);
2426   State.addMetadata(NewSI, SI);
2427 }
2428 
2429 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2430 void VPWidenStoreRecipe::print(raw_ostream &O, const Twine &Indent,
2431                                VPSlotTracker &SlotTracker) const {
2432   O << Indent << "WIDEN store ";
2433   printOperands(O, SlotTracker);
2434 }
2435 #endif
2436 
2437 void VPWidenStoreEVLRecipe::execute(VPTransformState &State) {
2438   auto *SI = cast<StoreInst>(&Ingredient);
2439 
2440   VPValue *StoredValue = getStoredValue();
2441   bool CreateScatter = !isConsecutive();
2442   const Align Alignment = getLoadStoreAlignment(&Ingredient);
2443 
2444   auto &Builder = State.Builder;
2445   State.setDebugLocFrom(getDebugLoc());
2446 
2447   CallInst *NewSI = nullptr;
2448   Value *StoredVal = State.get(StoredValue);
2449   Value *EVL = State.get(getEVL(), VPLane(0));
2450   if (isReverse())
2451     StoredVal = createReverseEVL(Builder, StoredVal, EVL, "vp.reverse");
2452   Value *Mask = nullptr;
2453   if (VPValue *VPMask = getMask()) {
2454     Mask = State.get(VPMask);
2455     if (isReverse())
2456       Mask = createReverseEVL(Builder, Mask, EVL, "vp.reverse.mask");
2457   } else {
2458     Mask = Builder.CreateVectorSplat(State.VF, Builder.getTrue());
2459   }
2460   Value *Addr = State.get(getAddr(), !CreateScatter);
2461   if (CreateScatter) {
2462     NewSI = Builder.CreateIntrinsic(Type::getVoidTy(EVL->getContext()),
2463                                     Intrinsic::vp_scatter,
2464                                     {StoredVal, Addr, Mask, EVL});
2465   } else {
2466     VectorBuilder VBuilder(Builder);
2467     VBuilder.setEVL(EVL).setMask(Mask);
2468     NewSI = cast<CallInst>(VBuilder.createVectorInstruction(
2469         Instruction::Store, Type::getVoidTy(EVL->getContext()),
2470         {StoredVal, Addr}));
2471   }
2472   NewSI->addParamAttr(
2473       1, Attribute::getWithAlignment(NewSI->getContext(), Alignment));
2474   State.addMetadata(NewSI, SI);
2475 }
2476 
2477 InstructionCost VPWidenStoreEVLRecipe::computeCost(ElementCount VF,
2478                                                    VPCostContext &Ctx) const {
2479   if (!Consecutive || IsMasked)
2480     return VPWidenMemoryRecipe::computeCost(VF, Ctx);
2481 
2482   // We need to use the getMaskedMemoryOpCost() instead of getMemoryOpCost()
2483   // here because the EVL recipes using EVL to replace the tail mask. But in the
2484   // legacy model, it will always calculate the cost of mask.
2485   // TODO: Using getMemoryOpCost() instead of getMaskedMemoryOpCost when we
2486   // don't need to compare to the legacy cost model.
2487   Type *Ty = ToVectorTy(getLoadStoreType(&Ingredient), VF);
2488   const Align Alignment =
2489       getLoadStoreAlignment(const_cast<Instruction *>(&Ingredient));
2490   unsigned AS =
2491       getLoadStoreAddressSpace(const_cast<Instruction *>(&Ingredient));
2492   TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
2493   InstructionCost Cost = Ctx.TTI.getMaskedMemoryOpCost(
2494       Ingredient.getOpcode(), Ty, Alignment, AS, CostKind);
2495   if (!Reverse)
2496     return Cost;
2497 
2498   return Cost + Ctx.TTI.getShuffleCost(TargetTransformInfo::SK_Reverse,
2499                                        cast<VectorType>(Ty), {}, CostKind, 0);
2500 }
2501 
2502 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2503 void VPWidenStoreEVLRecipe::print(raw_ostream &O, const Twine &Indent,
2504                                   VPSlotTracker &SlotTracker) const {
2505   O << Indent << "WIDEN vp.store ";
2506   printOperands(O, SlotTracker);
2507 }
2508 #endif
2509 
2510 static Value *createBitOrPointerCast(IRBuilderBase &Builder, Value *V,
2511                                      VectorType *DstVTy, const DataLayout &DL) {
2512   // Verify that V is a vector type with same number of elements as DstVTy.
2513   auto VF = DstVTy->getElementCount();
2514   auto *SrcVecTy = cast<VectorType>(V->getType());
2515   assert(VF == SrcVecTy->getElementCount() && "Vector dimensions do not match");
2516   Type *SrcElemTy = SrcVecTy->getElementType();
2517   Type *DstElemTy = DstVTy->getElementType();
2518   assert((DL.getTypeSizeInBits(SrcElemTy) == DL.getTypeSizeInBits(DstElemTy)) &&
2519          "Vector elements must have same size");
2520 
2521   // Do a direct cast if element types are castable.
2522   if (CastInst::isBitOrNoopPointerCastable(SrcElemTy, DstElemTy, DL)) {
2523     return Builder.CreateBitOrPointerCast(V, DstVTy);
2524   }
2525   // V cannot be directly casted to desired vector type.
2526   // May happen when V is a floating point vector but DstVTy is a vector of
2527   // pointers or vice-versa. Handle this using a two-step bitcast using an
2528   // intermediate Integer type for the bitcast i.e. Ptr <-> Int <-> Float.
2529   assert((DstElemTy->isPointerTy() != SrcElemTy->isPointerTy()) &&
2530          "Only one type should be a pointer type");
2531   assert((DstElemTy->isFloatingPointTy() != SrcElemTy->isFloatingPointTy()) &&
2532          "Only one type should be a floating point type");
2533   Type *IntTy =
2534       IntegerType::getIntNTy(V->getContext(), DL.getTypeSizeInBits(SrcElemTy));
2535   auto *VecIntTy = VectorType::get(IntTy, VF);
2536   Value *CastVal = Builder.CreateBitOrPointerCast(V, VecIntTy);
2537   return Builder.CreateBitOrPointerCast(CastVal, DstVTy);
2538 }
2539 
2540 /// Return a vector containing interleaved elements from multiple
2541 /// smaller input vectors.
2542 static Value *interleaveVectors(IRBuilderBase &Builder, ArrayRef<Value *> Vals,
2543                                 const Twine &Name) {
2544   unsigned Factor = Vals.size();
2545   assert(Factor > 1 && "Tried to interleave invalid number of vectors");
2546 
2547   VectorType *VecTy = cast<VectorType>(Vals[0]->getType());
2548 #ifndef NDEBUG
2549   for (Value *Val : Vals)
2550     assert(Val->getType() == VecTy && "Tried to interleave mismatched types");
2551 #endif
2552 
2553   // Scalable vectors cannot use arbitrary shufflevectors (only splats), so
2554   // must use intrinsics to interleave.
2555   if (VecTy->isScalableTy()) {
2556     VectorType *WideVecTy = VectorType::getDoubleElementsVectorType(VecTy);
2557     return Builder.CreateIntrinsic(WideVecTy, Intrinsic::vector_interleave2,
2558                                    Vals,
2559                                    /*FMFSource=*/nullptr, Name);
2560   }
2561 
2562   // Fixed length. Start by concatenating all vectors into a wide vector.
2563   Value *WideVec = concatenateVectors(Builder, Vals);
2564 
2565   // Interleave the elements into the wide vector.
2566   const unsigned NumElts = VecTy->getElementCount().getFixedValue();
2567   return Builder.CreateShuffleVector(
2568       WideVec, createInterleaveMask(NumElts, Factor), Name);
2569 }
2570 
2571 // Try to vectorize the interleave group that \p Instr belongs to.
2572 //
2573 // E.g. Translate following interleaved load group (factor = 3):
2574 //   for (i = 0; i < N; i+=3) {
2575 //     R = Pic[i];             // Member of index 0
2576 //     G = Pic[i+1];           // Member of index 1
2577 //     B = Pic[i+2];           // Member of index 2
2578 //     ... // do something to R, G, B
2579 //   }
2580 // To:
2581 //   %wide.vec = load <12 x i32>                       ; Read 4 tuples of R,G,B
2582 //   %R.vec = shuffle %wide.vec, poison, <0, 3, 6, 9>   ; R elements
2583 //   %G.vec = shuffle %wide.vec, poison, <1, 4, 7, 10>  ; G elements
2584 //   %B.vec = shuffle %wide.vec, poison, <2, 5, 8, 11>  ; B elements
2585 //
2586 // Or translate following interleaved store group (factor = 3):
2587 //   for (i = 0; i < N; i+=3) {
2588 //     ... do something to R, G, B
2589 //     Pic[i]   = R;           // Member of index 0
2590 //     Pic[i+1] = G;           // Member of index 1
2591 //     Pic[i+2] = B;           // Member of index 2
2592 //   }
2593 // To:
2594 //   %R_G.vec = shuffle %R.vec, %G.vec, <0, 1, 2, ..., 7>
2595 //   %B_U.vec = shuffle %B.vec, poison, <0, 1, 2, 3, u, u, u, u>
2596 //   %interleaved.vec = shuffle %R_G.vec, %B_U.vec,
2597 //        <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11>    ; Interleave R,G,B elements
2598 //   store <12 x i32> %interleaved.vec              ; Write 4 tuples of R,G,B
2599 void VPInterleaveRecipe::execute(VPTransformState &State) {
2600   assert(!State.Lane && "Interleave group being replicated.");
2601   const InterleaveGroup<Instruction> *Group = IG;
2602   Instruction *Instr = Group->getInsertPos();
2603 
2604   // Prepare for the vector type of the interleaved load/store.
2605   Type *ScalarTy = getLoadStoreType(Instr);
2606   unsigned InterleaveFactor = Group->getFactor();
2607   auto *VecTy = VectorType::get(ScalarTy, State.VF * InterleaveFactor);
2608 
2609   // TODO: extend the masked interleaved-group support to reversed access.
2610   VPValue *BlockInMask = getMask();
2611   assert((!BlockInMask || !Group->isReverse()) &&
2612          "Reversed masked interleave-group not supported.");
2613 
2614   VPValue *Addr = getAddr();
2615   Value *ResAddr = State.get(Addr, VPLane(0));
2616   if (auto *I = dyn_cast<Instruction>(ResAddr))
2617     State.setDebugLocFrom(I->getDebugLoc());
2618 
2619   // If the group is reverse, adjust the index to refer to the last vector lane
2620   // instead of the first. We adjust the index from the first vector lane,
2621   // rather than directly getting the pointer for lane VF - 1, because the
2622   // pointer operand of the interleaved access is supposed to be uniform.
2623   if (Group->isReverse()) {
2624     Value *RuntimeVF =
2625         getRuntimeVF(State.Builder, State.Builder.getInt32Ty(), State.VF);
2626     Value *Index =
2627         State.Builder.CreateSub(RuntimeVF, State.Builder.getInt32(1));
2628     Index = State.Builder.CreateMul(Index,
2629                                     State.Builder.getInt32(Group->getFactor()));
2630     Index = State.Builder.CreateNeg(Index);
2631 
2632     bool InBounds = false;
2633     if (auto *Gep = dyn_cast<GetElementPtrInst>(ResAddr->stripPointerCasts()))
2634       InBounds = Gep->isInBounds();
2635     ResAddr = State.Builder.CreateGEP(ScalarTy, ResAddr, Index, "", InBounds);
2636   }
2637 
2638   State.setDebugLocFrom(Instr->getDebugLoc());
2639   Value *PoisonVec = PoisonValue::get(VecTy);
2640 
2641   auto CreateGroupMask = [&BlockInMask, &State,
2642                           &InterleaveFactor](Value *MaskForGaps) -> Value * {
2643     if (State.VF.isScalable()) {
2644       assert(!MaskForGaps && "Interleaved groups with gaps are not supported.");
2645       assert(InterleaveFactor == 2 &&
2646              "Unsupported deinterleave factor for scalable vectors");
2647       auto *ResBlockInMask = State.get(BlockInMask);
2648       SmallVector<Value *, 2> Ops = {ResBlockInMask, ResBlockInMask};
2649       auto *MaskTy = VectorType::get(State.Builder.getInt1Ty(),
2650                                      State.VF.getKnownMinValue() * 2, true);
2651       return State.Builder.CreateIntrinsic(
2652           MaskTy, Intrinsic::vector_interleave2, Ops,
2653           /*FMFSource=*/nullptr, "interleaved.mask");
2654     }
2655 
2656     if (!BlockInMask)
2657       return MaskForGaps;
2658 
2659     Value *ResBlockInMask = State.get(BlockInMask);
2660     Value *ShuffledMask = State.Builder.CreateShuffleVector(
2661         ResBlockInMask,
2662         createReplicatedMask(InterleaveFactor, State.VF.getKnownMinValue()),
2663         "interleaved.mask");
2664     return MaskForGaps ? State.Builder.CreateBinOp(Instruction::And,
2665                                                    ShuffledMask, MaskForGaps)
2666                        : ShuffledMask;
2667   };
2668 
2669   const DataLayout &DL = Instr->getDataLayout();
2670   // Vectorize the interleaved load group.
2671   if (isa<LoadInst>(Instr)) {
2672     Value *MaskForGaps = nullptr;
2673     if (NeedsMaskForGaps) {
2674       MaskForGaps = createBitMaskForGaps(State.Builder,
2675                                          State.VF.getKnownMinValue(), *Group);
2676       assert(MaskForGaps && "Mask for Gaps is required but it is null");
2677     }
2678 
2679     Instruction *NewLoad;
2680     if (BlockInMask || MaskForGaps) {
2681       Value *GroupMask = CreateGroupMask(MaskForGaps);
2682       NewLoad = State.Builder.CreateMaskedLoad(VecTy, ResAddr,
2683                                                Group->getAlign(), GroupMask,
2684                                                PoisonVec, "wide.masked.vec");
2685     } else
2686       NewLoad = State.Builder.CreateAlignedLoad(VecTy, ResAddr,
2687                                                 Group->getAlign(), "wide.vec");
2688     Group->addMetadata(NewLoad);
2689 
2690     ArrayRef<VPValue *> VPDefs = definedValues();
2691     const DataLayout &DL = State.CFG.PrevBB->getDataLayout();
2692     if (VecTy->isScalableTy()) {
2693       assert(InterleaveFactor == 2 &&
2694              "Unsupported deinterleave factor for scalable vectors");
2695 
2696         // Scalable vectors cannot use arbitrary shufflevectors (only splats),
2697         // so must use intrinsics to deinterleave.
2698       Value *DI = State.Builder.CreateIntrinsic(
2699           Intrinsic::vector_deinterleave2, VecTy, NewLoad,
2700           /*FMFSource=*/nullptr, "strided.vec");
2701       unsigned J = 0;
2702       for (unsigned I = 0; I < InterleaveFactor; ++I) {
2703         Instruction *Member = Group->getMember(I);
2704 
2705         if (!Member)
2706           continue;
2707 
2708         Value *StridedVec = State.Builder.CreateExtractValue(DI, I);
2709         // If this member has different type, cast the result type.
2710         if (Member->getType() != ScalarTy) {
2711           VectorType *OtherVTy = VectorType::get(Member->getType(), State.VF);
2712           StridedVec =
2713               createBitOrPointerCast(State.Builder, StridedVec, OtherVTy, DL);
2714         }
2715 
2716         if (Group->isReverse())
2717           StridedVec = State.Builder.CreateVectorReverse(StridedVec, "reverse");
2718 
2719         State.set(VPDefs[J], StridedVec);
2720         ++J;
2721       }
2722 
2723       return;
2724     }
2725 
2726     // For each member in the group, shuffle out the appropriate data from the
2727     // wide loads.
2728     unsigned J = 0;
2729     for (unsigned I = 0; I < InterleaveFactor; ++I) {
2730       Instruction *Member = Group->getMember(I);
2731 
2732       // Skip the gaps in the group.
2733       if (!Member)
2734         continue;
2735 
2736       auto StrideMask =
2737           createStrideMask(I, InterleaveFactor, State.VF.getKnownMinValue());
2738       Value *StridedVec =
2739           State.Builder.CreateShuffleVector(NewLoad, StrideMask, "strided.vec");
2740 
2741       // If this member has different type, cast the result type.
2742       if (Member->getType() != ScalarTy) {
2743         assert(!State.VF.isScalable() && "VF is assumed to be non scalable.");
2744         VectorType *OtherVTy = VectorType::get(Member->getType(), State.VF);
2745         StridedVec =
2746             createBitOrPointerCast(State.Builder, StridedVec, OtherVTy, DL);
2747       }
2748 
2749       if (Group->isReverse())
2750         StridedVec = State.Builder.CreateVectorReverse(StridedVec, "reverse");
2751 
2752       State.set(VPDefs[J], StridedVec);
2753       ++J;
2754     }
2755     return;
2756   }
2757 
2758   // The sub vector type for current instruction.
2759   auto *SubVT = VectorType::get(ScalarTy, State.VF);
2760 
2761   // Vectorize the interleaved store group.
2762   Value *MaskForGaps =
2763       createBitMaskForGaps(State.Builder, State.VF.getKnownMinValue(), *Group);
2764   assert((!MaskForGaps || !State.VF.isScalable()) &&
2765          "masking gaps for scalable vectors is not yet supported.");
2766   ArrayRef<VPValue *> StoredValues = getStoredValues();
2767   // Collect the stored vector from each member.
2768   SmallVector<Value *, 4> StoredVecs;
2769   unsigned StoredIdx = 0;
2770   for (unsigned i = 0; i < InterleaveFactor; i++) {
2771     assert((Group->getMember(i) || MaskForGaps) &&
2772            "Fail to get a member from an interleaved store group");
2773     Instruction *Member = Group->getMember(i);
2774 
2775     // Skip the gaps in the group.
2776     if (!Member) {
2777       Value *Undef = PoisonValue::get(SubVT);
2778       StoredVecs.push_back(Undef);
2779       continue;
2780     }
2781 
2782     Value *StoredVec = State.get(StoredValues[StoredIdx]);
2783     ++StoredIdx;
2784 
2785     if (Group->isReverse())
2786       StoredVec = State.Builder.CreateVectorReverse(StoredVec, "reverse");
2787 
2788     // If this member has different type, cast it to a unified type.
2789 
2790     if (StoredVec->getType() != SubVT)
2791       StoredVec = createBitOrPointerCast(State.Builder, StoredVec, SubVT, DL);
2792 
2793     StoredVecs.push_back(StoredVec);
2794   }
2795 
2796   // Interleave all the smaller vectors into one wider vector.
2797   Value *IVec = interleaveVectors(State.Builder, StoredVecs, "interleaved.vec");
2798   Instruction *NewStoreInstr;
2799   if (BlockInMask || MaskForGaps) {
2800     Value *GroupMask = CreateGroupMask(MaskForGaps);
2801     NewStoreInstr = State.Builder.CreateMaskedStore(
2802         IVec, ResAddr, Group->getAlign(), GroupMask);
2803   } else
2804     NewStoreInstr =
2805         State.Builder.CreateAlignedStore(IVec, ResAddr, Group->getAlign());
2806 
2807   Group->addMetadata(NewStoreInstr);
2808 }
2809 
2810 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2811 void VPInterleaveRecipe::print(raw_ostream &O, const Twine &Indent,
2812                                VPSlotTracker &SlotTracker) const {
2813   O << Indent << "INTERLEAVE-GROUP with factor " << IG->getFactor() << " at ";
2814   IG->getInsertPos()->printAsOperand(O, false);
2815   O << ", ";
2816   getAddr()->printAsOperand(O, SlotTracker);
2817   VPValue *Mask = getMask();
2818   if (Mask) {
2819     O << ", ";
2820     Mask->printAsOperand(O, SlotTracker);
2821   }
2822 
2823   unsigned OpIdx = 0;
2824   for (unsigned i = 0; i < IG->getFactor(); ++i) {
2825     if (!IG->getMember(i))
2826       continue;
2827     if (getNumStoreOperands() > 0) {
2828       O << "\n" << Indent << "  store ";
2829       getOperand(1 + OpIdx)->printAsOperand(O, SlotTracker);
2830       O << " to index " << i;
2831     } else {
2832       O << "\n" << Indent << "  ";
2833       getVPValue(OpIdx)->printAsOperand(O, SlotTracker);
2834       O << " = load from index " << i;
2835     }
2836     ++OpIdx;
2837   }
2838 }
2839 #endif
2840 
2841 void VPCanonicalIVPHIRecipe::execute(VPTransformState &State) {
2842   Value *Start = getStartValue()->getLiveInIRValue();
2843   PHINode *Phi = PHINode::Create(Start->getType(), 2, "index");
2844   Phi->insertBefore(State.CFG.PrevBB->getFirstInsertionPt());
2845 
2846   BasicBlock *VectorPH = State.CFG.getPreheaderBBFor(this);
2847   Phi->addIncoming(Start, VectorPH);
2848   Phi->setDebugLoc(getDebugLoc());
2849   State.set(this, Phi, /*IsScalar*/ true);
2850 }
2851 
2852 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2853 void VPCanonicalIVPHIRecipe::print(raw_ostream &O, const Twine &Indent,
2854                                    VPSlotTracker &SlotTracker) const {
2855   O << Indent << "EMIT ";
2856   printAsOperand(O, SlotTracker);
2857   O << " = CANONICAL-INDUCTION ";
2858   printOperands(O, SlotTracker);
2859 }
2860 #endif
2861 
2862 bool VPCanonicalIVPHIRecipe::isCanonical(
2863     InductionDescriptor::InductionKind Kind, VPValue *Start,
2864     VPValue *Step) const {
2865   // Must be an integer induction.
2866   if (Kind != InductionDescriptor::IK_IntInduction)
2867     return false;
2868   // Start must match the start value of this canonical induction.
2869   if (Start != getStartValue())
2870     return false;
2871 
2872   // If the step is defined by a recipe, it is not a ConstantInt.
2873   if (Step->getDefiningRecipe())
2874     return false;
2875 
2876   ConstantInt *StepC = dyn_cast<ConstantInt>(Step->getLiveInIRValue());
2877   return StepC && StepC->isOne();
2878 }
2879 
2880 bool VPWidenPointerInductionRecipe::onlyScalarsGenerated(bool IsScalable) {
2881   return IsScalarAfterVectorization &&
2882          (!IsScalable || vputils::onlyFirstLaneUsed(this));
2883 }
2884 
2885 void VPWidenPointerInductionRecipe::execute(VPTransformState &State) {
2886   assert(IndDesc.getKind() == InductionDescriptor::IK_PtrInduction &&
2887          "Not a pointer induction according to InductionDescriptor!");
2888   assert(cast<PHINode>(getUnderlyingInstr())->getType()->isPointerTy() &&
2889          "Unexpected type.");
2890   assert(!onlyScalarsGenerated(State.VF.isScalable()) &&
2891          "Recipe should have been replaced");
2892 
2893   auto *IVR = getParent()->getPlan()->getCanonicalIV();
2894   PHINode *CanonicalIV = cast<PHINode>(State.get(IVR, /*IsScalar*/ true));
2895   unsigned CurrentPart = getUnrollPart(*this);
2896 
2897   // Build a pointer phi
2898   Value *ScalarStartValue = getStartValue()->getLiveInIRValue();
2899   Type *ScStValueType = ScalarStartValue->getType();
2900 
2901   BasicBlock *VectorPH = State.CFG.getPreheaderBBFor(this);
2902   PHINode *NewPointerPhi = nullptr;
2903   if (CurrentPart == 0) {
2904     NewPointerPhi = PHINode::Create(ScStValueType, 2, "pointer.phi",
2905                                     CanonicalIV->getIterator());
2906     NewPointerPhi->addIncoming(ScalarStartValue, VectorPH);
2907   } else {
2908     // The recipe has been unrolled. In that case, fetch the single pointer phi
2909     // shared among all unrolled parts of the recipe.
2910     auto *GEP =
2911         cast<GetElementPtrInst>(State.get(getFirstUnrolledPartOperand()));
2912     NewPointerPhi = cast<PHINode>(GEP->getPointerOperand());
2913   }
2914 
2915   // A pointer induction, performed by using a gep
2916   BasicBlock::iterator InductionLoc = State.Builder.GetInsertPoint();
2917   Value *ScalarStepValue = State.get(getOperand(1), VPLane(0));
2918   Type *PhiType = IndDesc.getStep()->getType();
2919   Value *RuntimeVF = getRuntimeVF(State.Builder, PhiType, State.VF);
2920   // Add induction update using an incorrect block temporarily. The phi node
2921   // will be fixed after VPlan execution. Note that at this point the latch
2922   // block cannot be used, as it does not exist yet.
2923   // TODO: Model increment value in VPlan, by turning the recipe into a
2924   // multi-def and a subclass of VPHeaderPHIRecipe.
2925   if (CurrentPart == 0) {
2926     // The recipe represents the first part of the pointer induction. Create the
2927     // GEP to increment the phi across all unrolled parts.
2928     unsigned UF = CurrentPart == 0 ? getParent()->getPlan()->getUF() : 1;
2929     Value *NumUnrolledElems =
2930         State.Builder.CreateMul(RuntimeVF, ConstantInt::get(PhiType, UF));
2931 
2932     Value *InductionGEP = GetElementPtrInst::Create(
2933         State.Builder.getInt8Ty(), NewPointerPhi,
2934         State.Builder.CreateMul(ScalarStepValue, NumUnrolledElems), "ptr.ind",
2935         InductionLoc);
2936 
2937     NewPointerPhi->addIncoming(InductionGEP, VectorPH);
2938   }
2939 
2940   // Create actual address geps that use the pointer phi as base and a
2941   // vectorized version of the step value (<step*0, ..., step*N>) as offset.
2942   Type *VecPhiType = VectorType::get(PhiType, State.VF);
2943   Value *StartOffsetScalar = State.Builder.CreateMul(
2944       RuntimeVF, ConstantInt::get(PhiType, CurrentPart));
2945   Value *StartOffset =
2946       State.Builder.CreateVectorSplat(State.VF, StartOffsetScalar);
2947   // Create a vector of consecutive numbers from zero to VF.
2948   StartOffset = State.Builder.CreateAdd(
2949       StartOffset, State.Builder.CreateStepVector(VecPhiType));
2950 
2951   assert(ScalarStepValue == State.get(getOperand(1), VPLane(0)) &&
2952          "scalar step must be the same across all parts");
2953   Value *GEP = State.Builder.CreateGEP(
2954       State.Builder.getInt8Ty(), NewPointerPhi,
2955       State.Builder.CreateMul(StartOffset, State.Builder.CreateVectorSplat(
2956                                                State.VF, ScalarStepValue)),
2957       "vector.gep");
2958   State.set(this, GEP);
2959 }
2960 
2961 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2962 void VPWidenPointerInductionRecipe::print(raw_ostream &O, const Twine &Indent,
2963                                           VPSlotTracker &SlotTracker) const {
2964   assert((getNumOperands() == 2 || getNumOperands() == 4) &&
2965          "unexpected number of operands");
2966   O << Indent << "EMIT ";
2967   printAsOperand(O, SlotTracker);
2968   O << " = WIDEN-POINTER-INDUCTION ";
2969   getStartValue()->printAsOperand(O, SlotTracker);
2970   O << ", " << *IndDesc.getStep();
2971   if (getNumOperands() == 4) {
2972     O << ", ";
2973     getOperand(2)->printAsOperand(O, SlotTracker);
2974     O << ", ";
2975     getOperand(3)->printAsOperand(O, SlotTracker);
2976   }
2977 }
2978 #endif
2979 
2980 void VPExpandSCEVRecipe::execute(VPTransformState &State) {
2981   assert(!State.Lane && "cannot be used in per-lane");
2982   const DataLayout &DL = State.CFG.PrevBB->getDataLayout();
2983   SCEVExpander Exp(SE, DL, "induction");
2984 
2985   Value *Res = Exp.expandCodeFor(Expr, Expr->getType(),
2986                                  &*State.Builder.GetInsertPoint());
2987   assert(!State.ExpandedSCEVs.contains(Expr) &&
2988          "Same SCEV expanded multiple times");
2989   State.ExpandedSCEVs[Expr] = Res;
2990   State.set(this, Res, VPLane(0));
2991 }
2992 
2993 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2994 void VPExpandSCEVRecipe::print(raw_ostream &O, const Twine &Indent,
2995                                VPSlotTracker &SlotTracker) const {
2996   O << Indent << "EMIT ";
2997   getVPSingleValue()->printAsOperand(O, SlotTracker);
2998   O << " = EXPAND SCEV " << *Expr;
2999 }
3000 #endif
3001 
3002 void VPWidenCanonicalIVRecipe::execute(VPTransformState &State) {
3003   Value *CanonicalIV = State.get(getOperand(0), /*IsScalar*/ true);
3004   Type *STy = CanonicalIV->getType();
3005   IRBuilder<> Builder(State.CFG.PrevBB->getTerminator());
3006   ElementCount VF = State.VF;
3007   Value *VStart = VF.isScalar()
3008                       ? CanonicalIV
3009                       : Builder.CreateVectorSplat(VF, CanonicalIV, "broadcast");
3010   Value *VStep = createStepForVF(Builder, STy, VF, getUnrollPart(*this));
3011   if (VF.isVector()) {
3012     VStep = Builder.CreateVectorSplat(VF, VStep);
3013     VStep =
3014         Builder.CreateAdd(VStep, Builder.CreateStepVector(VStep->getType()));
3015   }
3016   Value *CanonicalVectorIV = Builder.CreateAdd(VStart, VStep, "vec.iv");
3017   State.set(this, CanonicalVectorIV);
3018 }
3019 
3020 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
3021 void VPWidenCanonicalIVRecipe::print(raw_ostream &O, const Twine &Indent,
3022                                      VPSlotTracker &SlotTracker) const {
3023   O << Indent << "EMIT ";
3024   printAsOperand(O, SlotTracker);
3025   O << " = WIDEN-CANONICAL-INDUCTION ";
3026   printOperands(O, SlotTracker);
3027 }
3028 #endif
3029 
3030 void VPFirstOrderRecurrencePHIRecipe::execute(VPTransformState &State) {
3031   auto &Builder = State.Builder;
3032   // Create a vector from the initial value.
3033   auto *VectorInit = getStartValue()->getLiveInIRValue();
3034 
3035   Type *VecTy = State.VF.isScalar()
3036                     ? VectorInit->getType()
3037                     : VectorType::get(VectorInit->getType(), State.VF);
3038 
3039   BasicBlock *VectorPH = State.CFG.getPreheaderBBFor(this);
3040   if (State.VF.isVector()) {
3041     auto *IdxTy = Builder.getInt32Ty();
3042     auto *One = ConstantInt::get(IdxTy, 1);
3043     IRBuilder<>::InsertPointGuard Guard(Builder);
3044     Builder.SetInsertPoint(VectorPH->getTerminator());
3045     auto *RuntimeVF = getRuntimeVF(Builder, IdxTy, State.VF);
3046     auto *LastIdx = Builder.CreateSub(RuntimeVF, One);
3047     VectorInit = Builder.CreateInsertElement(
3048         PoisonValue::get(VecTy), VectorInit, LastIdx, "vector.recur.init");
3049   }
3050 
3051   // Create a phi node for the new recurrence.
3052   PHINode *Phi = PHINode::Create(VecTy, 2, "vector.recur");
3053   Phi->insertBefore(State.CFG.PrevBB->getFirstInsertionPt());
3054   Phi->addIncoming(VectorInit, VectorPH);
3055   State.set(this, Phi);
3056 }
3057 
3058 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
3059 void VPFirstOrderRecurrencePHIRecipe::print(raw_ostream &O, const Twine &Indent,
3060                                             VPSlotTracker &SlotTracker) const {
3061   O << Indent << "FIRST-ORDER-RECURRENCE-PHI ";
3062   printAsOperand(O, SlotTracker);
3063   O << " = phi ";
3064   printOperands(O, SlotTracker);
3065 }
3066 #endif
3067 
3068 void VPReductionPHIRecipe::execute(VPTransformState &State) {
3069   auto &Builder = State.Builder;
3070 
3071   // Reductions do not have to start at zero. They can start with
3072   // any loop invariant values.
3073   VPValue *StartVPV = getStartValue();
3074   Value *StartV = StartVPV->getLiveInIRValue();
3075 
3076   // In order to support recurrences we need to be able to vectorize Phi nodes.
3077   // Phi nodes have cycles, so we need to vectorize them in two stages. This is
3078   // stage #1: We create a new vector PHI node with no incoming edges. We'll use
3079   // this value when we vectorize all of the instructions that use the PHI.
3080   bool ScalarPHI = State.VF.isScalar() || IsInLoop;
3081   Type *VecTy = ScalarPHI ? StartV->getType()
3082                           : VectorType::get(StartV->getType(), State.VF);
3083 
3084   BasicBlock *HeaderBB = State.CFG.PrevBB;
3085   assert(State.CurrentVectorLoop->getHeader() == HeaderBB &&
3086          "recipe must be in the vector loop header");
3087   auto *Phi = PHINode::Create(VecTy, 2, "vec.phi");
3088   Phi->insertBefore(HeaderBB->getFirstInsertionPt());
3089   State.set(this, Phi, IsInLoop);
3090 
3091   BasicBlock *VectorPH = State.CFG.getPreheaderBBFor(this);
3092 
3093   Value *Iden = nullptr;
3094   RecurKind RK = RdxDesc.getRecurrenceKind();
3095   unsigned CurrentPart = getUnrollPart(*this);
3096 
3097   if (RecurrenceDescriptor::isMinMaxRecurrenceKind(RK) ||
3098       RecurrenceDescriptor::isAnyOfRecurrenceKind(RK)) {
3099     // MinMax and AnyOf reductions have the start value as their identity.
3100     if (ScalarPHI) {
3101       Iden = StartV;
3102     } else {
3103       IRBuilderBase::InsertPointGuard IPBuilder(Builder);
3104       Builder.SetInsertPoint(VectorPH->getTerminator());
3105       StartV = Iden = State.get(StartVPV);
3106     }
3107   } else {
3108     Iden = llvm::getRecurrenceIdentity(RK, VecTy->getScalarType(),
3109                                        RdxDesc.getFastMathFlags());
3110 
3111     if (!ScalarPHI) {
3112       if (CurrentPart == 0) {
3113         // Create start and identity vector values for the reduction in the
3114         // preheader.
3115         // TODO: Introduce recipes in VPlan preheader to create initial values.
3116         Iden = Builder.CreateVectorSplat(State.VF, Iden);
3117         IRBuilderBase::InsertPointGuard IPBuilder(Builder);
3118         Builder.SetInsertPoint(VectorPH->getTerminator());
3119         Constant *Zero = Builder.getInt32(0);
3120         StartV = Builder.CreateInsertElement(Iden, StartV, Zero);
3121       } else {
3122         Iden = Builder.CreateVectorSplat(State.VF, Iden);
3123       }
3124     }
3125   }
3126 
3127   Phi = cast<PHINode>(State.get(this, IsInLoop));
3128   Value *StartVal = (CurrentPart == 0) ? StartV : Iden;
3129   Phi->addIncoming(StartVal, VectorPH);
3130 }
3131 
3132 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
3133 void VPReductionPHIRecipe::print(raw_ostream &O, const Twine &Indent,
3134                                  VPSlotTracker &SlotTracker) const {
3135   O << Indent << "WIDEN-REDUCTION-PHI ";
3136 
3137   printAsOperand(O, SlotTracker);
3138   O << " = phi ";
3139   printOperands(O, SlotTracker);
3140 }
3141 #endif
3142 
3143 void VPWidenPHIRecipe::execute(VPTransformState &State) {
3144   assert(EnableVPlanNativePath &&
3145          "Non-native vplans are not expected to have VPWidenPHIRecipes.");
3146 
3147   Value *Op0 = State.get(getOperand(0));
3148   Type *VecTy = Op0->getType();
3149   Value *VecPhi = State.Builder.CreatePHI(VecTy, 2, "vec.phi");
3150   State.set(this, VecPhi);
3151 }
3152 
3153 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
3154 void VPWidenPHIRecipe::print(raw_ostream &O, const Twine &Indent,
3155                              VPSlotTracker &SlotTracker) const {
3156   O << Indent << "WIDEN-PHI ";
3157 
3158   auto *OriginalPhi = cast<PHINode>(getUnderlyingValue());
3159   // Unless all incoming values are modeled in VPlan  print the original PHI
3160   // directly.
3161   // TODO: Remove once all VPWidenPHIRecipe instances keep all relevant incoming
3162   // values as VPValues.
3163   if (getNumOperands() != OriginalPhi->getNumOperands()) {
3164     O << VPlanIngredient(OriginalPhi);
3165     return;
3166   }
3167 
3168   printAsOperand(O, SlotTracker);
3169   O << " = phi ";
3170   printOperands(O, SlotTracker);
3171 }
3172 #endif
3173 
3174 // TODO: It would be good to use the existing VPWidenPHIRecipe instead and
3175 // remove VPActiveLaneMaskPHIRecipe.
3176 void VPActiveLaneMaskPHIRecipe::execute(VPTransformState &State) {
3177   BasicBlock *VectorPH = State.CFG.getPreheaderBBFor(this);
3178   Value *StartMask = State.get(getOperand(0));
3179   PHINode *Phi =
3180       State.Builder.CreatePHI(StartMask->getType(), 2, "active.lane.mask");
3181   Phi->addIncoming(StartMask, VectorPH);
3182   Phi->setDebugLoc(getDebugLoc());
3183   State.set(this, Phi);
3184 }
3185 
3186 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
3187 void VPActiveLaneMaskPHIRecipe::print(raw_ostream &O, const Twine &Indent,
3188                                       VPSlotTracker &SlotTracker) const {
3189   O << Indent << "ACTIVE-LANE-MASK-PHI ";
3190 
3191   printAsOperand(O, SlotTracker);
3192   O << " = phi ";
3193   printOperands(O, SlotTracker);
3194 }
3195 #endif
3196 
3197 void VPEVLBasedIVPHIRecipe::execute(VPTransformState &State) {
3198   BasicBlock *VectorPH = State.CFG.getPreheaderBBFor(this);
3199   Value *Start = State.get(getOperand(0), VPLane(0));
3200   PHINode *Phi = State.Builder.CreatePHI(Start->getType(), 2, "evl.based.iv");
3201   Phi->addIncoming(Start, VectorPH);
3202   Phi->setDebugLoc(getDebugLoc());
3203   State.set(this, Phi, /*IsScalar=*/true);
3204 }
3205 
3206 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
3207 void VPEVLBasedIVPHIRecipe::print(raw_ostream &O, const Twine &Indent,
3208                                   VPSlotTracker &SlotTracker) const {
3209   O << Indent << "EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI ";
3210 
3211   printAsOperand(O, SlotTracker);
3212   O << " = phi ";
3213   printOperands(O, SlotTracker);
3214 }
3215 #endif
3216