xref: /llvm-project/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp (revision 09a29fcc8dbbb2cc1c0fdf26ef4f8fafab4e03d9)
1 //===-- VPlanTransforms.cpp - Utility VPlan to VPlan transforms -----------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 ///
9 /// \file
10 /// This file implements a set of utility VPlan to VPlan transformations.
11 ///
12 //===----------------------------------------------------------------------===//
13 
14 #include "VPlanTransforms.h"
15 #include "VPRecipeBuilder.h"
16 #include "VPlan.h"
17 #include "VPlanAnalysis.h"
18 #include "VPlanCFG.h"
19 #include "VPlanDominatorTree.h"
20 #include "VPlanPatternMatch.h"
21 #include "VPlanUtils.h"
22 #include "llvm/ADT/PostOrderIterator.h"
23 #include "llvm/ADT/STLExtras.h"
24 #include "llvm/ADT/SetVector.h"
25 #include "llvm/ADT/TypeSwitch.h"
26 #include "llvm/Analysis/IVDescriptors.h"
27 #include "llvm/Analysis/VectorUtils.h"
28 #include "llvm/IR/Intrinsics.h"
29 #include "llvm/IR/PatternMatch.h"
30 
31 using namespace llvm;
32 
33 void VPlanTransforms::VPInstructionsToVPRecipes(
34     VPlanPtr &Plan,
35     function_ref<const InductionDescriptor *(PHINode *)>
36         GetIntOrFpInductionDescriptor,
37     ScalarEvolution &SE, const TargetLibraryInfo &TLI) {
38 
39   ReversePostOrderTraversal<VPBlockDeepTraversalWrapper<VPBlockBase *>> RPOT(
40       Plan->getVectorLoopRegion());
41   for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(RPOT)) {
42     // Skip blocks outside region
43     if (!VPBB->getParent())
44       break;
45     VPRecipeBase *Term = VPBB->getTerminator();
46     auto EndIter = Term ? Term->getIterator() : VPBB->end();
47     // Introduce each ingredient into VPlan.
48     for (VPRecipeBase &Ingredient :
49          make_early_inc_range(make_range(VPBB->begin(), EndIter))) {
50 
51       VPValue *VPV = Ingredient.getVPSingleValue();
52       Instruction *Inst = cast<Instruction>(VPV->getUnderlyingValue());
53 
54       VPRecipeBase *NewRecipe = nullptr;
55       if (auto *VPPhi = dyn_cast<VPWidenPHIRecipe>(&Ingredient)) {
56         auto *Phi = cast<PHINode>(VPPhi->getUnderlyingValue());
57         const auto *II = GetIntOrFpInductionDescriptor(Phi);
58         if (!II)
59           continue;
60 
61         VPValue *Start = Plan->getOrAddLiveIn(II->getStartValue());
62         VPValue *Step =
63             vputils::getOrCreateVPValueForSCEVExpr(*Plan, II->getStep(), SE);
64         NewRecipe = new VPWidenIntOrFpInductionRecipe(
65             Phi, Start, Step, &Plan->getVF(), *II, Ingredient.getDebugLoc());
66       } else {
67         assert(isa<VPInstruction>(&Ingredient) &&
68                "only VPInstructions expected here");
69         assert(!isa<PHINode>(Inst) && "phis should be handled above");
70         // Create VPWidenMemoryRecipe for loads and stores.
71         if (LoadInst *Load = dyn_cast<LoadInst>(Inst)) {
72           NewRecipe = new VPWidenLoadRecipe(
73               *Load, Ingredient.getOperand(0), nullptr /*Mask*/,
74               false /*Consecutive*/, false /*Reverse*/,
75               Ingredient.getDebugLoc());
76         } else if (StoreInst *Store = dyn_cast<StoreInst>(Inst)) {
77           NewRecipe = new VPWidenStoreRecipe(
78               *Store, Ingredient.getOperand(1), Ingredient.getOperand(0),
79               nullptr /*Mask*/, false /*Consecutive*/, false /*Reverse*/,
80               Ingredient.getDebugLoc());
81         } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Inst)) {
82           NewRecipe = new VPWidenGEPRecipe(GEP, Ingredient.operands());
83         } else if (CallInst *CI = dyn_cast<CallInst>(Inst)) {
84           NewRecipe = new VPWidenIntrinsicRecipe(
85               *CI, getVectorIntrinsicIDForCall(CI, &TLI),
86               {Ingredient.op_begin(), Ingredient.op_end() - 1}, CI->getType(),
87               CI->getDebugLoc());
88         } else if (SelectInst *SI = dyn_cast<SelectInst>(Inst)) {
89           NewRecipe = new VPWidenSelectRecipe(*SI, Ingredient.operands());
90         } else if (auto *CI = dyn_cast<CastInst>(Inst)) {
91           NewRecipe = new VPWidenCastRecipe(
92               CI->getOpcode(), Ingredient.getOperand(0), CI->getType(), *CI);
93         } else {
94           NewRecipe = new VPWidenRecipe(*Inst, Ingredient.operands());
95         }
96       }
97 
98       NewRecipe->insertBefore(&Ingredient);
99       if (NewRecipe->getNumDefinedValues() == 1)
100         VPV->replaceAllUsesWith(NewRecipe->getVPSingleValue());
101       else
102         assert(NewRecipe->getNumDefinedValues() == 0 &&
103                "Only recpies with zero or one defined values expected");
104       Ingredient.eraseFromParent();
105     }
106   }
107 }
108 
109 static bool sinkScalarOperands(VPlan &Plan) {
110   auto Iter = vp_depth_first_deep(Plan.getEntry());
111   bool Changed = false;
112   // First, collect the operands of all recipes in replicate blocks as seeds for
113   // sinking.
114   SetVector<std::pair<VPBasicBlock *, VPSingleDefRecipe *>> WorkList;
115   for (VPRegionBlock *VPR : VPBlockUtils::blocksOnly<VPRegionBlock>(Iter)) {
116     VPBasicBlock *EntryVPBB = VPR->getEntryBasicBlock();
117     if (!VPR->isReplicator() || EntryVPBB->getSuccessors().size() != 2)
118       continue;
119     VPBasicBlock *VPBB = dyn_cast<VPBasicBlock>(EntryVPBB->getSuccessors()[0]);
120     if (!VPBB || VPBB->getSingleSuccessor() != VPR->getExitingBasicBlock())
121       continue;
122     for (auto &Recipe : *VPBB) {
123       for (VPValue *Op : Recipe.operands())
124         if (auto *Def =
125                 dyn_cast_or_null<VPSingleDefRecipe>(Op->getDefiningRecipe()))
126           WorkList.insert(std::make_pair(VPBB, Def));
127     }
128   }
129 
130   bool ScalarVFOnly = Plan.hasScalarVFOnly();
131   // Try to sink each replicate or scalar IV steps recipe in the worklist.
132   for (unsigned I = 0; I != WorkList.size(); ++I) {
133     VPBasicBlock *SinkTo;
134     VPSingleDefRecipe *SinkCandidate;
135     std::tie(SinkTo, SinkCandidate) = WorkList[I];
136     if (SinkCandidate->getParent() == SinkTo ||
137         SinkCandidate->mayHaveSideEffects() ||
138         SinkCandidate->mayReadOrWriteMemory())
139       continue;
140     if (auto *RepR = dyn_cast<VPReplicateRecipe>(SinkCandidate)) {
141       if (!ScalarVFOnly && RepR->isUniform())
142         continue;
143     } else if (!isa<VPScalarIVStepsRecipe>(SinkCandidate))
144       continue;
145 
146     bool NeedsDuplicating = false;
147     // All recipe users of the sink candidate must be in the same block SinkTo
148     // or all users outside of SinkTo must be uniform-after-vectorization (
149     // i.e., only first lane is used) . In the latter case, we need to duplicate
150     // SinkCandidate.
151     auto CanSinkWithUser = [SinkTo, &NeedsDuplicating,
152                             SinkCandidate](VPUser *U) {
153       auto *UI = cast<VPRecipeBase>(U);
154       if (UI->getParent() == SinkTo)
155         return true;
156       NeedsDuplicating = UI->onlyFirstLaneUsed(SinkCandidate);
157       // We only know how to duplicate VPRecipeRecipes for now.
158       return NeedsDuplicating && isa<VPReplicateRecipe>(SinkCandidate);
159     };
160     if (!all_of(SinkCandidate->users(), CanSinkWithUser))
161       continue;
162 
163     if (NeedsDuplicating) {
164       if (ScalarVFOnly)
165         continue;
166       Instruction *I = SinkCandidate->getUnderlyingInstr();
167       auto *Clone = new VPReplicateRecipe(I, SinkCandidate->operands(), true);
168       // TODO: add ".cloned" suffix to name of Clone's VPValue.
169 
170       Clone->insertBefore(SinkCandidate);
171       SinkCandidate->replaceUsesWithIf(Clone, [SinkTo](VPUser &U, unsigned) {
172         return cast<VPRecipeBase>(&U)->getParent() != SinkTo;
173       });
174     }
175     SinkCandidate->moveBefore(*SinkTo, SinkTo->getFirstNonPhi());
176     for (VPValue *Op : SinkCandidate->operands())
177       if (auto *Def =
178               dyn_cast_or_null<VPSingleDefRecipe>(Op->getDefiningRecipe()))
179         WorkList.insert(std::make_pair(SinkTo, Def));
180     Changed = true;
181   }
182   return Changed;
183 }
184 
185 /// If \p R is a region with a VPBranchOnMaskRecipe in the entry block, return
186 /// the mask.
187 VPValue *getPredicatedMask(VPRegionBlock *R) {
188   auto *EntryBB = dyn_cast<VPBasicBlock>(R->getEntry());
189   if (!EntryBB || EntryBB->size() != 1 ||
190       !isa<VPBranchOnMaskRecipe>(EntryBB->begin()))
191     return nullptr;
192 
193   return cast<VPBranchOnMaskRecipe>(&*EntryBB->begin())->getOperand(0);
194 }
195 
196 /// If \p R is a triangle region, return the 'then' block of the triangle.
197 static VPBasicBlock *getPredicatedThenBlock(VPRegionBlock *R) {
198   auto *EntryBB = cast<VPBasicBlock>(R->getEntry());
199   if (EntryBB->getNumSuccessors() != 2)
200     return nullptr;
201 
202   auto *Succ0 = dyn_cast<VPBasicBlock>(EntryBB->getSuccessors()[0]);
203   auto *Succ1 = dyn_cast<VPBasicBlock>(EntryBB->getSuccessors()[1]);
204   if (!Succ0 || !Succ1)
205     return nullptr;
206 
207   if (Succ0->getNumSuccessors() + Succ1->getNumSuccessors() != 1)
208     return nullptr;
209   if (Succ0->getSingleSuccessor() == Succ1)
210     return Succ0;
211   if (Succ1->getSingleSuccessor() == Succ0)
212     return Succ1;
213   return nullptr;
214 }
215 
216 // Merge replicate regions in their successor region, if a replicate region
217 // is connected to a successor replicate region with the same predicate by a
218 // single, empty VPBasicBlock.
219 static bool mergeReplicateRegionsIntoSuccessors(VPlan &Plan) {
220   SmallPtrSet<VPRegionBlock *, 4> TransformedRegions;
221 
222   // Collect replicate regions followed by an empty block, followed by another
223   // replicate region with matching masks to process front. This is to avoid
224   // iterator invalidation issues while merging regions.
225   SmallVector<VPRegionBlock *, 8> WorkList;
226   for (VPRegionBlock *Region1 : VPBlockUtils::blocksOnly<VPRegionBlock>(
227            vp_depth_first_deep(Plan.getEntry()))) {
228     if (!Region1->isReplicator())
229       continue;
230     auto *MiddleBasicBlock =
231         dyn_cast_or_null<VPBasicBlock>(Region1->getSingleSuccessor());
232     if (!MiddleBasicBlock || !MiddleBasicBlock->empty())
233       continue;
234 
235     auto *Region2 =
236         dyn_cast_or_null<VPRegionBlock>(MiddleBasicBlock->getSingleSuccessor());
237     if (!Region2 || !Region2->isReplicator())
238       continue;
239 
240     VPValue *Mask1 = getPredicatedMask(Region1);
241     VPValue *Mask2 = getPredicatedMask(Region2);
242     if (!Mask1 || Mask1 != Mask2)
243       continue;
244 
245     assert(Mask1 && Mask2 && "both region must have conditions");
246     WorkList.push_back(Region1);
247   }
248 
249   // Move recipes from Region1 to its successor region, if both are triangles.
250   for (VPRegionBlock *Region1 : WorkList) {
251     if (TransformedRegions.contains(Region1))
252       continue;
253     auto *MiddleBasicBlock = cast<VPBasicBlock>(Region1->getSingleSuccessor());
254     auto *Region2 = cast<VPRegionBlock>(MiddleBasicBlock->getSingleSuccessor());
255 
256     VPBasicBlock *Then1 = getPredicatedThenBlock(Region1);
257     VPBasicBlock *Then2 = getPredicatedThenBlock(Region2);
258     if (!Then1 || !Then2)
259       continue;
260 
261     // Note: No fusion-preventing memory dependencies are expected in either
262     // region. Such dependencies should be rejected during earlier dependence
263     // checks, which guarantee accesses can be re-ordered for vectorization.
264     //
265     // Move recipes to the successor region.
266     for (VPRecipeBase &ToMove : make_early_inc_range(reverse(*Then1)))
267       ToMove.moveBefore(*Then2, Then2->getFirstNonPhi());
268 
269     auto *Merge1 = cast<VPBasicBlock>(Then1->getSingleSuccessor());
270     auto *Merge2 = cast<VPBasicBlock>(Then2->getSingleSuccessor());
271 
272     // Move VPPredInstPHIRecipes from the merge block to the successor region's
273     // merge block. Update all users inside the successor region to use the
274     // original values.
275     for (VPRecipeBase &Phi1ToMove : make_early_inc_range(reverse(*Merge1))) {
276       VPValue *PredInst1 =
277           cast<VPPredInstPHIRecipe>(&Phi1ToMove)->getOperand(0);
278       VPValue *Phi1ToMoveV = Phi1ToMove.getVPSingleValue();
279       Phi1ToMoveV->replaceUsesWithIf(PredInst1, [Then2](VPUser &U, unsigned) {
280         return cast<VPRecipeBase>(&U)->getParent() == Then2;
281       });
282 
283       // Remove phi recipes that are unused after merging the regions.
284       if (Phi1ToMove.getVPSingleValue()->getNumUsers() == 0) {
285         Phi1ToMove.eraseFromParent();
286         continue;
287       }
288       Phi1ToMove.moveBefore(*Merge2, Merge2->begin());
289     }
290 
291     // Finally, remove the first region.
292     for (VPBlockBase *Pred : make_early_inc_range(Region1->getPredecessors())) {
293       VPBlockUtils::disconnectBlocks(Pred, Region1);
294       VPBlockUtils::connectBlocks(Pred, MiddleBasicBlock);
295     }
296     VPBlockUtils::disconnectBlocks(Region1, MiddleBasicBlock);
297     TransformedRegions.insert(Region1);
298   }
299 
300   return !TransformedRegions.empty();
301 }
302 
303 static VPRegionBlock *createReplicateRegion(VPReplicateRecipe *PredRecipe,
304                                             VPlan &Plan) {
305   Instruction *Instr = PredRecipe->getUnderlyingInstr();
306   // Build the triangular if-then region.
307   std::string RegionName = (Twine("pred.") + Instr->getOpcodeName()).str();
308   assert(Instr->getParent() && "Predicated instruction not in any basic block");
309   auto *BlockInMask = PredRecipe->getMask();
310   auto *BOMRecipe = new VPBranchOnMaskRecipe(BlockInMask);
311   auto *Entry =
312       Plan.createVPBasicBlock(Twine(RegionName) + ".entry", BOMRecipe);
313 
314   // Replace predicated replicate recipe with a replicate recipe without a
315   // mask but in the replicate region.
316   auto *RecipeWithoutMask = new VPReplicateRecipe(
317       PredRecipe->getUnderlyingInstr(),
318       make_range(PredRecipe->op_begin(), std::prev(PredRecipe->op_end())),
319       PredRecipe->isUniform());
320   auto *Pred =
321       Plan.createVPBasicBlock(Twine(RegionName) + ".if", RecipeWithoutMask);
322 
323   VPPredInstPHIRecipe *PHIRecipe = nullptr;
324   if (PredRecipe->getNumUsers() != 0) {
325     PHIRecipe = new VPPredInstPHIRecipe(RecipeWithoutMask,
326                                         RecipeWithoutMask->getDebugLoc());
327     PredRecipe->replaceAllUsesWith(PHIRecipe);
328     PHIRecipe->setOperand(0, RecipeWithoutMask);
329   }
330   PredRecipe->eraseFromParent();
331   auto *Exiting =
332       Plan.createVPBasicBlock(Twine(RegionName) + ".continue", PHIRecipe);
333   VPRegionBlock *Region =
334       Plan.createVPRegionBlock(Entry, Exiting, RegionName, true);
335 
336   // Note: first set Entry as region entry and then connect successors starting
337   // from it in order, to propagate the "parent" of each VPBasicBlock.
338   VPBlockUtils::insertTwoBlocksAfter(Pred, Exiting, Entry);
339   VPBlockUtils::connectBlocks(Pred, Exiting);
340 
341   return Region;
342 }
343 
344 static void addReplicateRegions(VPlan &Plan) {
345   SmallVector<VPReplicateRecipe *> WorkList;
346   for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(
347            vp_depth_first_deep(Plan.getEntry()))) {
348     for (VPRecipeBase &R : *VPBB)
349       if (auto *RepR = dyn_cast<VPReplicateRecipe>(&R)) {
350         if (RepR->isPredicated())
351           WorkList.push_back(RepR);
352       }
353   }
354 
355   unsigned BBNum = 0;
356   for (VPReplicateRecipe *RepR : WorkList) {
357     VPBasicBlock *CurrentBlock = RepR->getParent();
358     VPBasicBlock *SplitBlock = CurrentBlock->splitAt(RepR->getIterator());
359 
360     BasicBlock *OrigBB = RepR->getUnderlyingInstr()->getParent();
361     SplitBlock->setName(
362         OrigBB->hasName() ? OrigBB->getName() + "." + Twine(BBNum++) : "");
363     // Record predicated instructions for above packing optimizations.
364     VPBlockBase *Region = createReplicateRegion(RepR, Plan);
365     Region->setParent(CurrentBlock->getParent());
366     VPBlockUtils::insertOnEdge(CurrentBlock, SplitBlock, Region);
367   }
368 }
369 
370 /// Remove redundant VPBasicBlocks by merging them into their predecessor if
371 /// the predecessor has a single successor.
372 static bool mergeBlocksIntoPredecessors(VPlan &Plan) {
373   SmallVector<VPBasicBlock *> WorkList;
374   for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(
375            vp_depth_first_deep(Plan.getEntry()))) {
376     // Don't fold the blocks in the skeleton of the Plan into their single
377     // predecessors for now.
378     // TODO: Remove restriction once more of the skeleton is modeled in VPlan.
379     if (!VPBB->getParent())
380       continue;
381     auto *PredVPBB =
382         dyn_cast_or_null<VPBasicBlock>(VPBB->getSinglePredecessor());
383     if (!PredVPBB || PredVPBB->getNumSuccessors() != 1 ||
384         isa<VPIRBasicBlock>(PredVPBB))
385       continue;
386     WorkList.push_back(VPBB);
387   }
388 
389   for (VPBasicBlock *VPBB : WorkList) {
390     VPBasicBlock *PredVPBB = cast<VPBasicBlock>(VPBB->getSinglePredecessor());
391     for (VPRecipeBase &R : make_early_inc_range(*VPBB))
392       R.moveBefore(*PredVPBB, PredVPBB->end());
393     VPBlockUtils::disconnectBlocks(PredVPBB, VPBB);
394     auto *ParentRegion = cast_or_null<VPRegionBlock>(VPBB->getParent());
395     if (ParentRegion && ParentRegion->getExiting() == VPBB)
396       ParentRegion->setExiting(PredVPBB);
397     for (auto *Succ : to_vector(VPBB->successors())) {
398       VPBlockUtils::disconnectBlocks(VPBB, Succ);
399       VPBlockUtils::connectBlocks(PredVPBB, Succ);
400     }
401     // VPBB is now dead and will be cleaned up when the plan gets destroyed.
402   }
403   return !WorkList.empty();
404 }
405 
406 void VPlanTransforms::createAndOptimizeReplicateRegions(VPlan &Plan) {
407   // Convert masked VPReplicateRecipes to if-then region blocks.
408   addReplicateRegions(Plan);
409 
410   bool ShouldSimplify = true;
411   while (ShouldSimplify) {
412     ShouldSimplify = sinkScalarOperands(Plan);
413     ShouldSimplify |= mergeReplicateRegionsIntoSuccessors(Plan);
414     ShouldSimplify |= mergeBlocksIntoPredecessors(Plan);
415   }
416 }
417 
418 /// Remove redundant casts of inductions.
419 ///
420 /// Such redundant casts are casts of induction variables that can be ignored,
421 /// because we already proved that the casted phi is equal to the uncasted phi
422 /// in the vectorized loop. There is no need to vectorize the cast - the same
423 /// value can be used for both the phi and casts in the vector loop.
424 static void removeRedundantInductionCasts(VPlan &Plan) {
425   for (auto &Phi : Plan.getVectorLoopRegion()->getEntryBasicBlock()->phis()) {
426     auto *IV = dyn_cast<VPWidenIntOrFpInductionRecipe>(&Phi);
427     if (!IV || IV->getTruncInst())
428       continue;
429 
430     // A sequence of IR Casts has potentially been recorded for IV, which
431     // *must be bypassed* when the IV is vectorized, because the vectorized IV
432     // will produce the desired casted value. This sequence forms a def-use
433     // chain and is provided in reverse order, ending with the cast that uses
434     // the IV phi. Search for the recipe of the last cast in the chain and
435     // replace it with the original IV. Note that only the final cast is
436     // expected to have users outside the cast-chain and the dead casts left
437     // over will be cleaned up later.
438     auto &Casts = IV->getInductionDescriptor().getCastInsts();
439     VPValue *FindMyCast = IV;
440     for (Instruction *IRCast : reverse(Casts)) {
441       VPSingleDefRecipe *FoundUserCast = nullptr;
442       for (auto *U : FindMyCast->users()) {
443         auto *UserCast = dyn_cast<VPSingleDefRecipe>(U);
444         if (UserCast && UserCast->getUnderlyingValue() == IRCast) {
445           FoundUserCast = UserCast;
446           break;
447         }
448       }
449       FindMyCast = FoundUserCast;
450     }
451     FindMyCast->replaceAllUsesWith(IV);
452   }
453 }
454 
455 /// Try to replace VPWidenCanonicalIVRecipes with a widened canonical IV
456 /// recipe, if it exists.
457 static void removeRedundantCanonicalIVs(VPlan &Plan) {
458   VPCanonicalIVPHIRecipe *CanonicalIV = Plan.getCanonicalIV();
459   VPWidenCanonicalIVRecipe *WidenNewIV = nullptr;
460   for (VPUser *U : CanonicalIV->users()) {
461     WidenNewIV = dyn_cast<VPWidenCanonicalIVRecipe>(U);
462     if (WidenNewIV)
463       break;
464   }
465 
466   if (!WidenNewIV)
467     return;
468 
469   VPBasicBlock *HeaderVPBB = Plan.getVectorLoopRegion()->getEntryBasicBlock();
470   for (VPRecipeBase &Phi : HeaderVPBB->phis()) {
471     auto *WidenOriginalIV = dyn_cast<VPWidenIntOrFpInductionRecipe>(&Phi);
472 
473     if (!WidenOriginalIV || !WidenOriginalIV->isCanonical())
474       continue;
475 
476     // Replace WidenNewIV with WidenOriginalIV if WidenOriginalIV provides
477     // everything WidenNewIV's users need. That is, WidenOriginalIV will
478     // generate a vector phi or all users of WidenNewIV demand the first lane
479     // only.
480     if (any_of(WidenOriginalIV->users(),
481                [WidenOriginalIV](VPUser *U) {
482                  return !U->usesScalars(WidenOriginalIV);
483                }) ||
484         vputils::onlyFirstLaneUsed(WidenNewIV)) {
485       WidenNewIV->replaceAllUsesWith(WidenOriginalIV);
486       WidenNewIV->eraseFromParent();
487       return;
488     }
489   }
490 }
491 
492 /// Returns true if \p R is dead and can be removed.
493 static bool isDeadRecipe(VPRecipeBase &R) {
494   using namespace llvm::PatternMatch;
495   // Do remove conditional assume instructions as their conditions may be
496   // flattened.
497   auto *RepR = dyn_cast<VPReplicateRecipe>(&R);
498   bool IsConditionalAssume =
499       RepR && RepR->isPredicated() &&
500       match(RepR->getUnderlyingInstr(), m_Intrinsic<Intrinsic::assume>());
501   if (IsConditionalAssume)
502     return true;
503 
504   if (R.mayHaveSideEffects())
505     return false;
506 
507   // Recipe is dead if no user keeps the recipe alive.
508   return all_of(R.definedValues(),
509                 [](VPValue *V) { return V->getNumUsers() == 0; });
510 }
511 
512 void VPlanTransforms::removeDeadRecipes(VPlan &Plan) {
513   ReversePostOrderTraversal<VPBlockDeepTraversalWrapper<VPBlockBase *>> RPOT(
514       Plan.getEntry());
515 
516   for (VPBasicBlock *VPBB : reverse(VPBlockUtils::blocksOnly<VPBasicBlock>(RPOT))) {
517     // The recipes in the block are processed in reverse order, to catch chains
518     // of dead recipes.
519     for (VPRecipeBase &R : make_early_inc_range(reverse(*VPBB))) {
520       if (isDeadRecipe(R))
521         R.eraseFromParent();
522     }
523   }
524 }
525 
526 static VPScalarIVStepsRecipe *
527 createScalarIVSteps(VPlan &Plan, InductionDescriptor::InductionKind Kind,
528                     Instruction::BinaryOps InductionOpcode,
529                     FPMathOperator *FPBinOp, Instruction *TruncI,
530                     VPValue *StartV, VPValue *Step, DebugLoc DL,
531                     VPBuilder &Builder) {
532   VPBasicBlock *HeaderVPBB = Plan.getVectorLoopRegion()->getEntryBasicBlock();
533   VPCanonicalIVPHIRecipe *CanonicalIV = Plan.getCanonicalIV();
534   VPSingleDefRecipe *BaseIV = Builder.createDerivedIV(
535       Kind, FPBinOp, StartV, CanonicalIV, Step, "offset.idx");
536 
537   // Truncate base induction if needed.
538   Type *CanonicalIVType = CanonicalIV->getScalarType();
539   VPTypeAnalysis TypeInfo(CanonicalIVType);
540   Type *ResultTy = TypeInfo.inferScalarType(BaseIV);
541   if (TruncI) {
542     Type *TruncTy = TruncI->getType();
543     assert(ResultTy->getScalarSizeInBits() > TruncTy->getScalarSizeInBits() &&
544            "Not truncating.");
545     assert(ResultTy->isIntegerTy() && "Truncation requires an integer type");
546     BaseIV = Builder.createScalarCast(Instruction::Trunc, BaseIV, TruncTy, DL);
547     ResultTy = TruncTy;
548   }
549 
550   // Truncate step if needed.
551   Type *StepTy = TypeInfo.inferScalarType(Step);
552   if (ResultTy != StepTy) {
553     assert(StepTy->getScalarSizeInBits() > ResultTy->getScalarSizeInBits() &&
554            "Not truncating.");
555     assert(StepTy->isIntegerTy() && "Truncation requires an integer type");
556     auto *VecPreheader =
557         cast<VPBasicBlock>(HeaderVPBB->getSingleHierarchicalPredecessor());
558     VPBuilder::InsertPointGuard Guard(Builder);
559     Builder.setInsertPoint(VecPreheader);
560     Step = Builder.createScalarCast(Instruction::Trunc, Step, ResultTy, DL);
561   }
562   return Builder.createScalarIVSteps(InductionOpcode, FPBinOp, BaseIV, Step);
563 }
564 
565 static SmallVector<VPUser *> collectUsersRecursively(VPValue *V) {
566   SetVector<VPUser *> Users(V->user_begin(), V->user_end());
567   for (unsigned I = 0; I != Users.size(); ++I) {
568     VPRecipeBase *Cur = cast<VPRecipeBase>(Users[I]);
569     if (isa<VPHeaderPHIRecipe>(Cur))
570       continue;
571     for (VPValue *V : Cur->definedValues())
572       Users.insert(V->user_begin(), V->user_end());
573   }
574   return Users.takeVector();
575 }
576 
577 /// Legalize VPWidenPointerInductionRecipe, by replacing it with a PtrAdd
578 /// (IndStart, ScalarIVSteps (0, Step)) if only its scalar values are used, as
579 /// VPWidenPointerInductionRecipe will generate vectors only. If some users
580 /// require vectors while other require scalars, the scalar uses need to extract
581 /// the scalars from the generated vectors (Note that this is different to how
582 /// int/fp inductions are handled). Legalize extract-from-ends using uniform
583 /// VPReplicateRecipe of wide inductions to use regular VPReplicateRecipe, so
584 /// the correct end value is available. Also optimize
585 /// VPWidenIntOrFpInductionRecipe, if any of its users needs scalar values, by
586 /// providing them scalar steps built on the canonical scalar IV and update the
587 /// original IV's users. This is an optional optimization to reduce the needs of
588 /// vector extracts.
589 static void legalizeAndOptimizeInductions(VPlan &Plan) {
590   using namespace llvm::VPlanPatternMatch;
591   VPBasicBlock *HeaderVPBB = Plan.getVectorLoopRegion()->getEntryBasicBlock();
592   bool HasOnlyVectorVFs = !Plan.hasVF(ElementCount::getFixed(1));
593   VPBuilder Builder(HeaderVPBB, HeaderVPBB->getFirstNonPhi());
594   for (VPRecipeBase &Phi : HeaderVPBB->phis()) {
595     auto *PhiR = dyn_cast<VPWidenInductionRecipe>(&Phi);
596     if (!PhiR)
597       continue;
598 
599     // Try to narrow wide and replicating recipes to uniform recipes, based on
600     // VPlan analysis.
601     // TODO: Apply to all recipes in the future, to replace legacy uniformity
602     // analysis.
603     auto Users = collectUsersRecursively(PhiR);
604     for (VPUser *U : reverse(Users)) {
605       auto *Def = dyn_cast<VPSingleDefRecipe>(U);
606       auto *RepR = dyn_cast<VPReplicateRecipe>(U);
607       // Skip recipes that shouldn't be narrowed.
608       if (!Def || !isa<VPReplicateRecipe, VPWidenRecipe>(Def) ||
609           Def->getNumUsers() == 0 || !Def->getUnderlyingValue() ||
610           (RepR && (RepR->isUniform() || RepR->isPredicated())))
611         continue;
612 
613       // Skip recipes that may have other lanes than their first used.
614       if (!vputils::isUniformAfterVectorization(Def) &&
615           !vputils::onlyFirstLaneUsed(Def))
616         continue;
617 
618       auto *Clone = new VPReplicateRecipe(Def->getUnderlyingInstr(),
619                                           Def->operands(), /*IsUniform*/ true);
620       Clone->insertAfter(Def);
621       Def->replaceAllUsesWith(Clone);
622     }
623 
624     // Replace wide pointer inductions which have only their scalars used by
625     // PtrAdd(IndStart, ScalarIVSteps (0, Step)).
626     if (auto *PtrIV = dyn_cast<VPWidenPointerInductionRecipe>(&Phi)) {
627       if (!PtrIV->onlyScalarsGenerated(Plan.hasScalableVF()))
628         continue;
629 
630       const InductionDescriptor &ID = PtrIV->getInductionDescriptor();
631       VPValue *StartV =
632           Plan.getOrAddLiveIn(ConstantInt::get(ID.getStep()->getType(), 0));
633       VPValue *StepV = PtrIV->getOperand(1);
634       VPScalarIVStepsRecipe *Steps = createScalarIVSteps(
635           Plan, InductionDescriptor::IK_IntInduction, Instruction::Add, nullptr,
636           nullptr, StartV, StepV, PtrIV->getDebugLoc(), Builder);
637 
638       VPValue *PtrAdd = Builder.createPtrAdd(PtrIV->getStartValue(), Steps,
639                                              PtrIV->getDebugLoc(), "next.gep");
640 
641       PtrIV->replaceAllUsesWith(PtrAdd);
642       continue;
643     }
644 
645     // Replace widened induction with scalar steps for users that only use
646     // scalars.
647     auto *WideIV = cast<VPWidenIntOrFpInductionRecipe>(&Phi);
648     if (HasOnlyVectorVFs && none_of(WideIV->users(), [WideIV](VPUser *U) {
649           return U->usesScalars(WideIV);
650         }))
651       continue;
652 
653     const InductionDescriptor &ID = WideIV->getInductionDescriptor();
654     VPScalarIVStepsRecipe *Steps = createScalarIVSteps(
655         Plan, ID.getKind(), ID.getInductionOpcode(),
656         dyn_cast_or_null<FPMathOperator>(ID.getInductionBinOp()),
657         WideIV->getTruncInst(), WideIV->getStartValue(), WideIV->getStepValue(),
658         WideIV->getDebugLoc(), Builder);
659 
660     // Update scalar users of IV to use Step instead.
661     if (!HasOnlyVectorVFs)
662       WideIV->replaceAllUsesWith(Steps);
663     else
664       WideIV->replaceUsesWithIf(Steps, [WideIV](VPUser &U, unsigned) {
665         return U.usesScalars(WideIV);
666       });
667   }
668 }
669 
670 /// Check if \p VPV is an untruncated wide induction, either before or after the
671 /// increment. If so return the header IV (before the increment), otherwise
672 /// return null.
673 static VPWidenInductionRecipe *getOptimizableIVOf(VPValue *VPV) {
674   auto *WideIV = dyn_cast<VPWidenInductionRecipe>(VPV);
675   if (WideIV) {
676     // VPV itself is a wide induction, separately compute the end value for exit
677     // users if it is not a truncated IV.
678     auto *IntOrFpIV = dyn_cast<VPWidenIntOrFpInductionRecipe>(WideIV);
679     return (IntOrFpIV && IntOrFpIV->getTruncInst()) ? nullptr : WideIV;
680   }
681 
682   // Check if VPV is an optimizable induction increment.
683   VPRecipeBase *Def = VPV->getDefiningRecipe();
684   if (!Def || Def->getNumOperands() != 2)
685     return nullptr;
686   WideIV = dyn_cast<VPWidenInductionRecipe>(Def->getOperand(0));
687   if (!WideIV)
688     WideIV = dyn_cast<VPWidenInductionRecipe>(Def->getOperand(1));
689   if (!WideIV)
690     return nullptr;
691 
692   auto IsWideIVInc = [&]() {
693     using namespace VPlanPatternMatch;
694     auto &ID = WideIV->getInductionDescriptor();
695 
696     // Check if VPV increments the induction by the induction step.
697     VPValue *IVStep = WideIV->getStepValue();
698     switch (ID.getInductionOpcode()) {
699     case Instruction::Add:
700       return match(VPV, m_c_Binary<Instruction::Add>(m_Specific(WideIV),
701                                                      m_Specific(IVStep)));
702     case Instruction::FAdd:
703       return match(VPV, m_c_Binary<Instruction::FAdd>(m_Specific(WideIV),
704                                                       m_Specific(IVStep)));
705     case Instruction::FSub:
706       return match(VPV, m_Binary<Instruction::FSub>(m_Specific(WideIV),
707                                                     m_Specific(IVStep)));
708     case Instruction::Sub: {
709       // IVStep will be the negated step of the subtraction. Check if Step == -1
710       // * IVStep.
711       VPValue *Step;
712       if (!match(VPV,
713                  m_Binary<Instruction::Sub>(m_VPValue(), m_VPValue(Step))) ||
714           !Step->isLiveIn() || !IVStep->isLiveIn())
715         return false;
716       auto *StepCI = dyn_cast<ConstantInt>(Step->getLiveInIRValue());
717       auto *IVStepCI = dyn_cast<ConstantInt>(IVStep->getLiveInIRValue());
718       return StepCI && IVStepCI &&
719              StepCI->getValue() == (-1 * IVStepCI->getValue());
720     }
721     default:
722       return ID.getKind() == InductionDescriptor::IK_PtrInduction &&
723              match(VPV, m_GetElementPtr(m_Specific(WideIV),
724                                         m_Specific(WideIV->getStepValue())));
725     }
726     llvm_unreachable("should have been covered by switch above");
727   };
728   return IsWideIVInc() ? WideIV : nullptr;
729 }
730 
731 void VPlanTransforms::optimizeInductionExitUsers(
732     VPlan &Plan, DenseMap<VPValue *, VPValue *> &EndValues) {
733   using namespace VPlanPatternMatch;
734   SmallVector<VPIRBasicBlock *> ExitVPBBs(Plan.getExitBlocks());
735   if (ExitVPBBs.size() != 1)
736     return;
737 
738   VPIRBasicBlock *ExitVPBB = ExitVPBBs[0];
739   VPBlockBase *PredVPBB = ExitVPBB->getSinglePredecessor();
740   if (!PredVPBB)
741     return;
742   assert(PredVPBB == Plan.getMiddleBlock() &&
743          "predecessor must be the middle block");
744 
745   VPTypeAnalysis TypeInfo(Plan.getCanonicalIV()->getScalarType());
746   VPBuilder B(Plan.getMiddleBlock()->getTerminator());
747   for (VPRecipeBase &R : *ExitVPBB) {
748     auto *ExitIRI = cast<VPIRInstruction>(&R);
749     if (!isa<PHINode>(ExitIRI->getInstruction()))
750       break;
751 
752     VPValue *Incoming;
753     if (!match(ExitIRI->getOperand(0),
754                m_VPInstruction<VPInstruction::ExtractFromEnd>(
755                    m_VPValue(Incoming), m_SpecificInt(1))))
756       continue;
757 
758     auto *WideIV = getOptimizableIVOf(Incoming);
759     if (!WideIV)
760       continue;
761     VPValue *EndValue = EndValues.lookup(WideIV);
762     assert(EndValue && "end value must have been pre-computed");
763 
764     if (Incoming != WideIV) {
765       ExitIRI->setOperand(0, EndValue);
766       continue;
767     }
768 
769     VPValue *Escape = nullptr;
770     VPValue *Step = WideIV->getStepValue();
771     Type *ScalarTy = TypeInfo.inferScalarType(WideIV);
772     if (ScalarTy->isIntegerTy()) {
773       Escape =
774           B.createNaryOp(Instruction::Sub, {EndValue, Step}, {}, "ind.escape");
775     } else if (ScalarTy->isPointerTy()) {
776       auto *Zero = Plan.getOrAddLiveIn(
777           ConstantInt::get(Step->getLiveInIRValue()->getType(), 0));
778       Escape = B.createPtrAdd(EndValue,
779                               B.createNaryOp(Instruction::Sub, {Zero, Step}),
780                               {}, "ind.escape");
781     } else if (ScalarTy->isFloatingPointTy()) {
782       const auto &ID = WideIV->getInductionDescriptor();
783       Escape = B.createNaryOp(
784           ID.getInductionBinOp()->getOpcode() == Instruction::FAdd
785               ? Instruction::FSub
786               : Instruction::FAdd,
787           {EndValue, Step}, {ID.getInductionBinOp()->getFastMathFlags()});
788     } else {
789       llvm_unreachable("all possible induction types must be handled");
790     }
791     ExitIRI->setOperand(0, Escape);
792   }
793 }
794 
795 /// Remove redundant EpxandSCEVRecipes in \p Plan's entry block by replacing
796 /// them with already existing recipes expanding the same SCEV expression.
797 static void removeRedundantExpandSCEVRecipes(VPlan &Plan) {
798   DenseMap<const SCEV *, VPValue *> SCEV2VPV;
799 
800   for (VPRecipeBase &R :
801        make_early_inc_range(*Plan.getEntry()->getEntryBasicBlock())) {
802     auto *ExpR = dyn_cast<VPExpandSCEVRecipe>(&R);
803     if (!ExpR)
804       continue;
805 
806     auto I = SCEV2VPV.insert({ExpR->getSCEV(), ExpR});
807     if (I.second)
808       continue;
809     ExpR->replaceAllUsesWith(I.first->second);
810     ExpR->eraseFromParent();
811   }
812 }
813 
814 static void recursivelyDeleteDeadRecipes(VPValue *V) {
815   SmallVector<VPValue *> WorkList;
816   SmallPtrSet<VPValue *, 8> Seen;
817   WorkList.push_back(V);
818 
819   while (!WorkList.empty()) {
820     VPValue *Cur = WorkList.pop_back_val();
821     if (!Seen.insert(Cur).second)
822       continue;
823     VPRecipeBase *R = Cur->getDefiningRecipe();
824     if (!R)
825       continue;
826     if (!isDeadRecipe(*R))
827       continue;
828     WorkList.append(R->op_begin(), R->op_end());
829     R->eraseFromParent();
830   }
831 }
832 
833 /// Try to simplify recipe \p R.
834 static void simplifyRecipe(VPRecipeBase &R, VPTypeAnalysis &TypeInfo) {
835   using namespace llvm::VPlanPatternMatch;
836 
837   if (auto *Blend = dyn_cast<VPBlendRecipe>(&R)) {
838     // Try to remove redundant blend recipes.
839     SmallPtrSet<VPValue *, 4> UniqueValues;
840     if (Blend->isNormalized() || !match(Blend->getMask(0), m_False()))
841       UniqueValues.insert(Blend->getIncomingValue(0));
842     for (unsigned I = 1; I != Blend->getNumIncomingValues(); ++I)
843       if (!match(Blend->getMask(I), m_False()))
844         UniqueValues.insert(Blend->getIncomingValue(I));
845 
846     if (UniqueValues.size() == 1) {
847       Blend->replaceAllUsesWith(*UniqueValues.begin());
848       Blend->eraseFromParent();
849       return;
850     }
851 
852     if (Blend->isNormalized())
853       return;
854 
855     // Normalize the blend so its first incoming value is used as the initial
856     // value with the others blended into it.
857 
858     unsigned StartIndex = 0;
859     for (unsigned I = 0; I != Blend->getNumIncomingValues(); ++I) {
860       // If a value's mask is used only by the blend then is can be deadcoded.
861       // TODO: Find the most expensive mask that can be deadcoded, or a mask
862       // that's used by multiple blends where it can be removed from them all.
863       VPValue *Mask = Blend->getMask(I);
864       if (Mask->getNumUsers() == 1 && !match(Mask, m_False())) {
865         StartIndex = I;
866         break;
867       }
868     }
869 
870     SmallVector<VPValue *, 4> OperandsWithMask;
871     OperandsWithMask.push_back(Blend->getIncomingValue(StartIndex));
872 
873     for (unsigned I = 0; I != Blend->getNumIncomingValues(); ++I) {
874       if (I == StartIndex)
875         continue;
876       OperandsWithMask.push_back(Blend->getIncomingValue(I));
877       OperandsWithMask.push_back(Blend->getMask(I));
878     }
879 
880     auto *NewBlend = new VPBlendRecipe(
881         cast<PHINode>(Blend->getUnderlyingValue()), OperandsWithMask);
882     NewBlend->insertBefore(&R);
883 
884     VPValue *DeadMask = Blend->getMask(StartIndex);
885     Blend->replaceAllUsesWith(NewBlend);
886     Blend->eraseFromParent();
887     recursivelyDeleteDeadRecipes(DeadMask);
888     return;
889   }
890 
891   VPValue *A;
892   if (match(&R, m_Trunc(m_ZExtOrSExt(m_VPValue(A))))) {
893     VPValue *Trunc = R.getVPSingleValue();
894     Type *TruncTy = TypeInfo.inferScalarType(Trunc);
895     Type *ATy = TypeInfo.inferScalarType(A);
896     if (TruncTy == ATy) {
897       Trunc->replaceAllUsesWith(A);
898     } else {
899       // Don't replace a scalarizing recipe with a widened cast.
900       if (isa<VPReplicateRecipe>(&R))
901         return;
902       if (ATy->getScalarSizeInBits() < TruncTy->getScalarSizeInBits()) {
903 
904         unsigned ExtOpcode = match(R.getOperand(0), m_SExt(m_VPValue()))
905                                  ? Instruction::SExt
906                                  : Instruction::ZExt;
907         auto *VPC =
908             new VPWidenCastRecipe(Instruction::CastOps(ExtOpcode), A, TruncTy);
909         if (auto *UnderlyingExt = R.getOperand(0)->getUnderlyingValue()) {
910           // UnderlyingExt has distinct return type, used to retain legacy cost.
911           VPC->setUnderlyingValue(UnderlyingExt);
912         }
913         VPC->insertBefore(&R);
914         Trunc->replaceAllUsesWith(VPC);
915       } else if (ATy->getScalarSizeInBits() > TruncTy->getScalarSizeInBits()) {
916         auto *VPC = new VPWidenCastRecipe(Instruction::Trunc, A, TruncTy);
917         VPC->insertBefore(&R);
918         Trunc->replaceAllUsesWith(VPC);
919       }
920     }
921 #ifndef NDEBUG
922     // Verify that the cached type info is for both A and its users is still
923     // accurate by comparing it to freshly computed types.
924     VPTypeAnalysis TypeInfo2(
925         R.getParent()->getPlan()->getCanonicalIV()->getScalarType());
926     assert(TypeInfo.inferScalarType(A) == TypeInfo2.inferScalarType(A));
927     for (VPUser *U : A->users()) {
928       auto *R = cast<VPRecipeBase>(U);
929       for (VPValue *VPV : R->definedValues())
930         assert(TypeInfo.inferScalarType(VPV) == TypeInfo2.inferScalarType(VPV));
931     }
932 #endif
933   }
934 
935   // Simplify (X && Y) || (X && !Y) -> X.
936   // TODO: Split up into simpler, modular combines: (X && Y) || (X && Z) into X
937   // && (Y || Z) and (X || !X) into true. This requires queuing newly created
938   // recipes to be visited during simplification.
939   VPValue *X, *Y, *X1, *Y1;
940   if (match(&R,
941             m_c_BinaryOr(m_LogicalAnd(m_VPValue(X), m_VPValue(Y)),
942                          m_LogicalAnd(m_VPValue(X1), m_Not(m_VPValue(Y1))))) &&
943       X == X1 && Y == Y1) {
944     R.getVPSingleValue()->replaceAllUsesWith(X);
945     R.eraseFromParent();
946     return;
947   }
948 
949   if (match(&R, m_c_Mul(m_VPValue(A), m_SpecificInt(1))))
950     return R.getVPSingleValue()->replaceAllUsesWith(A);
951 
952   if (match(&R, m_Not(m_Not(m_VPValue(A)))))
953     return R.getVPSingleValue()->replaceAllUsesWith(A);
954 
955   // Remove redundant DerviedIVs, that is 0 + A * 1 -> A and 0 + 0 * x -> 0.
956   if ((match(&R,
957              m_DerivedIV(m_SpecificInt(0), m_VPValue(A), m_SpecificInt(1))) ||
958        match(&R,
959              m_DerivedIV(m_SpecificInt(0), m_SpecificInt(0), m_VPValue()))) &&
960       TypeInfo.inferScalarType(R.getOperand(1)) ==
961           TypeInfo.inferScalarType(R.getVPSingleValue()))
962     return R.getVPSingleValue()->replaceAllUsesWith(R.getOperand(1));
963 }
964 
965 /// Try to simplify the recipes in \p Plan. Use \p CanonicalIVTy as type for all
966 /// un-typed live-ins in VPTypeAnalysis.
967 static void simplifyRecipes(VPlan &Plan, Type *CanonicalIVTy) {
968   ReversePostOrderTraversal<VPBlockDeepTraversalWrapper<VPBlockBase *>> RPOT(
969       Plan.getEntry());
970   VPTypeAnalysis TypeInfo(CanonicalIVTy);
971   for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(RPOT)) {
972     for (VPRecipeBase &R : make_early_inc_range(*VPBB)) {
973       simplifyRecipe(R, TypeInfo);
974     }
975   }
976 }
977 
978 void VPlanTransforms::optimizeForVFAndUF(VPlan &Plan, ElementCount BestVF,
979                                          unsigned BestUF,
980                                          PredicatedScalarEvolution &PSE) {
981   assert(Plan.hasVF(BestVF) && "BestVF is not available in Plan");
982   assert(Plan.hasUF(BestUF) && "BestUF is not available in Plan");
983   VPRegionBlock *VectorRegion = Plan.getVectorLoopRegion();
984   VPBasicBlock *ExitingVPBB = VectorRegion->getExitingBasicBlock();
985   auto *Term = &ExitingVPBB->back();
986   // Try to simplify the branch condition if TC <= VF * UF when preparing to
987   // execute the plan for the main vector loop. We only do this if the
988   // terminator is:
989   //  1. BranchOnCount, or
990   //  2. BranchOnCond where the input is Not(ActiveLaneMask).
991   using namespace llvm::VPlanPatternMatch;
992   if (!match(Term, m_BranchOnCount(m_VPValue(), m_VPValue())) &&
993       !match(Term,
994              m_BranchOnCond(m_Not(m_ActiveLaneMask(m_VPValue(), m_VPValue())))))
995     return;
996 
997   ScalarEvolution &SE = *PSE.getSE();
998   const SCEV *TripCount =
999       vputils::getSCEVExprForVPValue(Plan.getTripCount(), SE);
1000   assert(!isa<SCEVCouldNotCompute>(TripCount) &&
1001          "Trip count SCEV must be computable");
1002   ElementCount NumElements = BestVF.multiplyCoefficientBy(BestUF);
1003   const SCEV *C = SE.getElementCount(TripCount->getType(), NumElements);
1004   if (TripCount->isZero() ||
1005       !SE.isKnownPredicate(CmpInst::ICMP_ULE, TripCount, C))
1006     return;
1007 
1008   // The vector loop region only executes once. If possible, completely remove
1009   // the region, otherwise replace the terminator controlling the latch with
1010   // (BranchOnCond true).
1011   auto *Header = cast<VPBasicBlock>(VectorRegion->getEntry());
1012   auto *CanIVTy = Plan.getCanonicalIV()->getScalarType();
1013   if (all_of(
1014           Header->phis(),
1015           IsaPred<VPCanonicalIVPHIRecipe, VPFirstOrderRecurrencePHIRecipe>)) {
1016     for (VPRecipeBase &HeaderR : make_early_inc_range(Header->phis())) {
1017       auto *HeaderPhiR = cast<VPHeaderPHIRecipe>(&HeaderR);
1018       HeaderPhiR->replaceAllUsesWith(HeaderPhiR->getStartValue());
1019       HeaderPhiR->eraseFromParent();
1020     }
1021 
1022     VPBlockBase *Preheader = VectorRegion->getSinglePredecessor();
1023     VPBlockBase *Exit = VectorRegion->getSingleSuccessor();
1024     VPBlockUtils::disconnectBlocks(Preheader, VectorRegion);
1025     VPBlockUtils::disconnectBlocks(VectorRegion, Exit);
1026 
1027     for (VPBlockBase *B : vp_depth_first_shallow(VectorRegion->getEntry()))
1028       B->setParent(nullptr);
1029 
1030     VPBlockUtils::connectBlocks(Preheader, Header);
1031     VPBlockUtils::connectBlocks(ExitingVPBB, Exit);
1032     simplifyRecipes(Plan, CanIVTy);
1033   } else {
1034     // The vector region contains header phis for which we cannot remove the
1035     // loop region yet.
1036     LLVMContext &Ctx = SE.getContext();
1037     auto *BOC = new VPInstruction(
1038         VPInstruction::BranchOnCond,
1039         {Plan.getOrAddLiveIn(ConstantInt::getTrue(Ctx))}, Term->getDebugLoc());
1040     ExitingVPBB->appendRecipe(BOC);
1041   }
1042 
1043   Term->eraseFromParent();
1044   VPlanTransforms::removeDeadRecipes(Plan);
1045 
1046   Plan.setVF(BestVF);
1047   Plan.setUF(BestUF);
1048   // TODO: Further simplifications are possible
1049   //      1. Replace inductions with constants.
1050   //      2. Replace vector loop region with VPBasicBlock.
1051 }
1052 
1053 /// Sink users of \p FOR after the recipe defining the previous value \p
1054 /// Previous of the recurrence. \returns true if all users of \p FOR could be
1055 /// re-arranged as needed or false if it is not possible.
1056 static bool
1057 sinkRecurrenceUsersAfterPrevious(VPFirstOrderRecurrencePHIRecipe *FOR,
1058                                  VPRecipeBase *Previous,
1059                                  VPDominatorTree &VPDT) {
1060   // Collect recipes that need sinking.
1061   SmallVector<VPRecipeBase *> WorkList;
1062   SmallPtrSet<VPRecipeBase *, 8> Seen;
1063   Seen.insert(Previous);
1064   auto TryToPushSinkCandidate = [&](VPRecipeBase *SinkCandidate) {
1065     // The previous value must not depend on the users of the recurrence phi. In
1066     // that case, FOR is not a fixed order recurrence.
1067     if (SinkCandidate == Previous)
1068       return false;
1069 
1070     if (isa<VPHeaderPHIRecipe>(SinkCandidate) ||
1071         !Seen.insert(SinkCandidate).second ||
1072         VPDT.properlyDominates(Previous, SinkCandidate))
1073       return true;
1074 
1075     if (SinkCandidate->mayHaveSideEffects())
1076       return false;
1077 
1078     WorkList.push_back(SinkCandidate);
1079     return true;
1080   };
1081 
1082   // Recursively sink users of FOR after Previous.
1083   WorkList.push_back(FOR);
1084   for (unsigned I = 0; I != WorkList.size(); ++I) {
1085     VPRecipeBase *Current = WorkList[I];
1086     assert(Current->getNumDefinedValues() == 1 &&
1087            "only recipes with a single defined value expected");
1088 
1089     for (VPUser *User : Current->getVPSingleValue()->users()) {
1090       if (!TryToPushSinkCandidate(cast<VPRecipeBase>(User)))
1091         return false;
1092     }
1093   }
1094 
1095   // Keep recipes to sink ordered by dominance so earlier instructions are
1096   // processed first.
1097   sort(WorkList, [&VPDT](const VPRecipeBase *A, const VPRecipeBase *B) {
1098     return VPDT.properlyDominates(A, B);
1099   });
1100 
1101   for (VPRecipeBase *SinkCandidate : WorkList) {
1102     if (SinkCandidate == FOR)
1103       continue;
1104 
1105     SinkCandidate->moveAfter(Previous);
1106     Previous = SinkCandidate;
1107   }
1108   return true;
1109 }
1110 
1111 /// Try to hoist \p Previous and its operands before all users of \p FOR.
1112 static bool hoistPreviousBeforeFORUsers(VPFirstOrderRecurrencePHIRecipe *FOR,
1113                                         VPRecipeBase *Previous,
1114                                         VPDominatorTree &VPDT) {
1115   if (Previous->mayHaveSideEffects() || Previous->mayReadFromMemory())
1116     return false;
1117 
1118   // Collect recipes that need hoisting.
1119   SmallVector<VPRecipeBase *> HoistCandidates;
1120   SmallPtrSet<VPRecipeBase *, 8> Visited;
1121   VPRecipeBase *HoistPoint = nullptr;
1122   // Find the closest hoist point by looking at all users of FOR and selecting
1123   // the recipe dominating all other users.
1124   for (VPUser *U : FOR->users()) {
1125     auto *R = cast<VPRecipeBase>(U);
1126     if (!HoistPoint || VPDT.properlyDominates(R, HoistPoint))
1127       HoistPoint = R;
1128   }
1129   assert(all_of(FOR->users(),
1130                 [&VPDT, HoistPoint](VPUser *U) {
1131                   auto *R = cast<VPRecipeBase>(U);
1132                   return HoistPoint == R ||
1133                          VPDT.properlyDominates(HoistPoint, R);
1134                 }) &&
1135          "HoistPoint must dominate all users of FOR");
1136 
1137   auto NeedsHoisting = [HoistPoint, &VPDT,
1138                         &Visited](VPValue *HoistCandidateV) -> VPRecipeBase * {
1139     VPRecipeBase *HoistCandidate = HoistCandidateV->getDefiningRecipe();
1140     if (!HoistCandidate)
1141       return nullptr;
1142     VPRegionBlock *EnclosingLoopRegion =
1143         HoistCandidate->getParent()->getEnclosingLoopRegion();
1144     assert((!HoistCandidate->getParent()->getParent() ||
1145             HoistCandidate->getParent()->getParent() == EnclosingLoopRegion) &&
1146            "CFG in VPlan should still be flat, without replicate regions");
1147     // Hoist candidate was already visited, no need to hoist.
1148     if (!Visited.insert(HoistCandidate).second)
1149       return nullptr;
1150 
1151     // Candidate is outside loop region or a header phi, dominates FOR users w/o
1152     // hoisting.
1153     if (!EnclosingLoopRegion || isa<VPHeaderPHIRecipe>(HoistCandidate))
1154       return nullptr;
1155 
1156     // If we reached a recipe that dominates HoistPoint, we don't need to
1157     // hoist the recipe.
1158     if (VPDT.properlyDominates(HoistCandidate, HoistPoint))
1159       return nullptr;
1160     return HoistCandidate;
1161   };
1162   auto CanHoist = [&](VPRecipeBase *HoistCandidate) {
1163     // Avoid hoisting candidates with side-effects, as we do not yet analyze
1164     // associated dependencies.
1165     return !HoistCandidate->mayHaveSideEffects();
1166   };
1167 
1168   if (!NeedsHoisting(Previous->getVPSingleValue()))
1169     return true;
1170 
1171   // Recursively try to hoist Previous and its operands before all users of FOR.
1172   HoistCandidates.push_back(Previous);
1173 
1174   for (unsigned I = 0; I != HoistCandidates.size(); ++I) {
1175     VPRecipeBase *Current = HoistCandidates[I];
1176     assert(Current->getNumDefinedValues() == 1 &&
1177            "only recipes with a single defined value expected");
1178     if (!CanHoist(Current))
1179       return false;
1180 
1181     for (VPValue *Op : Current->operands()) {
1182       // If we reach FOR, it means the original Previous depends on some other
1183       // recurrence that in turn depends on FOR. If that is the case, we would
1184       // also need to hoist recipes involving the other FOR, which may break
1185       // dependencies.
1186       if (Op == FOR)
1187         return false;
1188 
1189       if (auto *R = NeedsHoisting(Op))
1190         HoistCandidates.push_back(R);
1191     }
1192   }
1193 
1194   // Order recipes to hoist by dominance so earlier instructions are processed
1195   // first.
1196   sort(HoistCandidates, [&VPDT](const VPRecipeBase *A, const VPRecipeBase *B) {
1197     return VPDT.properlyDominates(A, B);
1198   });
1199 
1200   for (VPRecipeBase *HoistCandidate : HoistCandidates) {
1201     HoistCandidate->moveBefore(*HoistPoint->getParent(),
1202                                HoistPoint->getIterator());
1203   }
1204 
1205   return true;
1206 }
1207 
1208 bool VPlanTransforms::adjustFixedOrderRecurrences(VPlan &Plan,
1209                                                   VPBuilder &LoopBuilder) {
1210   VPDominatorTree VPDT;
1211   VPDT.recalculate(Plan);
1212 
1213   SmallVector<VPFirstOrderRecurrencePHIRecipe *> RecurrencePhis;
1214   for (VPRecipeBase &R :
1215        Plan.getVectorLoopRegion()->getEntry()->getEntryBasicBlock()->phis())
1216     if (auto *FOR = dyn_cast<VPFirstOrderRecurrencePHIRecipe>(&R))
1217       RecurrencePhis.push_back(FOR);
1218 
1219   for (VPFirstOrderRecurrencePHIRecipe *FOR : RecurrencePhis) {
1220     SmallPtrSet<VPFirstOrderRecurrencePHIRecipe *, 4> SeenPhis;
1221     VPRecipeBase *Previous = FOR->getBackedgeValue()->getDefiningRecipe();
1222     // Fixed-order recurrences do not contain cycles, so this loop is guaranteed
1223     // to terminate.
1224     while (auto *PrevPhi =
1225                dyn_cast_or_null<VPFirstOrderRecurrencePHIRecipe>(Previous)) {
1226       assert(PrevPhi->getParent() == FOR->getParent());
1227       assert(SeenPhis.insert(PrevPhi).second);
1228       Previous = PrevPhi->getBackedgeValue()->getDefiningRecipe();
1229     }
1230 
1231     if (!sinkRecurrenceUsersAfterPrevious(FOR, Previous, VPDT) &&
1232         !hoistPreviousBeforeFORUsers(FOR, Previous, VPDT))
1233       return false;
1234 
1235     // Introduce a recipe to combine the incoming and previous values of a
1236     // fixed-order recurrence.
1237     VPBasicBlock *InsertBlock = Previous->getParent();
1238     if (isa<VPHeaderPHIRecipe>(Previous))
1239       LoopBuilder.setInsertPoint(InsertBlock, InsertBlock->getFirstNonPhi());
1240     else
1241       LoopBuilder.setInsertPoint(InsertBlock,
1242                                  std::next(Previous->getIterator()));
1243 
1244     auto *RecurSplice = cast<VPInstruction>(
1245         LoopBuilder.createNaryOp(VPInstruction::FirstOrderRecurrenceSplice,
1246                                  {FOR, FOR->getBackedgeValue()}));
1247 
1248     FOR->replaceAllUsesWith(RecurSplice);
1249     // Set the first operand of RecurSplice to FOR again, after replacing
1250     // all users.
1251     RecurSplice->setOperand(0, FOR);
1252   }
1253   return true;
1254 }
1255 
1256 void VPlanTransforms::clearReductionWrapFlags(VPlan &Plan) {
1257   for (VPRecipeBase &R :
1258        Plan.getVectorLoopRegion()->getEntryBasicBlock()->phis()) {
1259     auto *PhiR = dyn_cast<VPReductionPHIRecipe>(&R);
1260     if (!PhiR)
1261       continue;
1262     const RecurrenceDescriptor &RdxDesc = PhiR->getRecurrenceDescriptor();
1263     RecurKind RK = RdxDesc.getRecurrenceKind();
1264     if (RK != RecurKind::Add && RK != RecurKind::Mul)
1265       continue;
1266 
1267     for (VPUser *U : collectUsersRecursively(PhiR))
1268       if (auto *RecWithFlags = dyn_cast<VPRecipeWithIRFlags>(U)) {
1269         RecWithFlags->dropPoisonGeneratingFlags();
1270       }
1271   }
1272 }
1273 
1274 /// Move loop-invariant recipes out of the vector loop region in \p Plan.
1275 static void licm(VPlan &Plan) {
1276   VPBasicBlock *Preheader = Plan.getVectorPreheader();
1277 
1278   // Return true if we do not know how to (mechanically) hoist a given recipe
1279   // out of a loop region. Does not address legality concerns such as aliasing
1280   // or speculation safety.
1281   auto CannotHoistRecipe = [](VPRecipeBase &R) {
1282     // Allocas cannot be hoisted.
1283     auto *RepR = dyn_cast<VPReplicateRecipe>(&R);
1284     return RepR && RepR->getOpcode() == Instruction::Alloca;
1285   };
1286 
1287   // Hoist any loop invariant recipes from the vector loop region to the
1288   // preheader. Preform a shallow traversal of the vector loop region, to
1289   // exclude recipes in replicate regions.
1290   VPRegionBlock *LoopRegion = Plan.getVectorLoopRegion();
1291   for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(
1292            vp_depth_first_shallow(LoopRegion->getEntry()))) {
1293     for (VPRecipeBase &R : make_early_inc_range(*VPBB)) {
1294       if (CannotHoistRecipe(R))
1295         continue;
1296       // TODO: Relax checks in the future, e.g. we could also hoist reads, if
1297       // their memory location is not modified in the vector loop.
1298       if (R.mayHaveSideEffects() || R.mayReadFromMemory() || R.isPhi() ||
1299           any_of(R.operands(), [](VPValue *Op) {
1300             return !Op->isDefinedOutsideLoopRegions();
1301           }))
1302         continue;
1303       R.moveBefore(*Preheader, Preheader->end());
1304     }
1305   }
1306 }
1307 
1308 void VPlanTransforms::truncateToMinimalBitwidths(
1309     VPlan &Plan, const MapVector<Instruction *, uint64_t> &MinBWs) {
1310 #ifndef NDEBUG
1311   // Count the processed recipes and cross check the count later with MinBWs
1312   // size, to make sure all entries in MinBWs have been handled.
1313   unsigned NumProcessedRecipes = 0;
1314 #endif
1315   // Keep track of created truncates, so they can be re-used. Note that we
1316   // cannot use RAUW after creating a new truncate, as this would could make
1317   // other uses have different types for their operands, making them invalidly
1318   // typed.
1319   DenseMap<VPValue *, VPWidenCastRecipe *> ProcessedTruncs;
1320   Type *CanonicalIVType = Plan.getCanonicalIV()->getScalarType();
1321   VPTypeAnalysis TypeInfo(CanonicalIVType);
1322   VPBasicBlock *PH = Plan.getVectorPreheader();
1323   for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(
1324            vp_depth_first_deep(Plan.getVectorLoopRegion()))) {
1325     for (VPRecipeBase &R : make_early_inc_range(*VPBB)) {
1326       if (!isa<VPWidenRecipe, VPWidenCastRecipe, VPReplicateRecipe,
1327                VPWidenSelectRecipe, VPWidenLoadRecipe>(&R))
1328         continue;
1329 
1330       VPValue *ResultVPV = R.getVPSingleValue();
1331       auto *UI = cast_or_null<Instruction>(ResultVPV->getUnderlyingValue());
1332       unsigned NewResSizeInBits = MinBWs.lookup(UI);
1333       if (!NewResSizeInBits)
1334         continue;
1335 
1336 #ifndef NDEBUG
1337       NumProcessedRecipes++;
1338 #endif
1339       // If the value wasn't vectorized, we must maintain the original scalar
1340       // type. Skip those here, after incrementing NumProcessedRecipes. Also
1341       // skip casts which do not need to be handled explicitly here, as
1342       // redundant casts will be removed during recipe simplification.
1343       if (isa<VPReplicateRecipe, VPWidenCastRecipe>(&R)) {
1344 #ifndef NDEBUG
1345         // If any of the operands is a live-in and not used by VPWidenRecipe or
1346         // VPWidenSelectRecipe, but in MinBWs, make sure it is counted as
1347         // processed as well. When MinBWs is currently constructed, there is no
1348         // information about whether recipes are widened or replicated and in
1349         // case they are reciplicated the operands are not truncated. Counting
1350         // them them here ensures we do not miss any recipes in MinBWs.
1351         // TODO: Remove once the analysis is done on VPlan.
1352         for (VPValue *Op : R.operands()) {
1353           if (!Op->isLiveIn())
1354             continue;
1355           auto *UV = dyn_cast_or_null<Instruction>(Op->getUnderlyingValue());
1356           if (UV && MinBWs.contains(UV) && !ProcessedTruncs.contains(Op) &&
1357               none_of(Op->users(),
1358                       IsaPred<VPWidenRecipe, VPWidenSelectRecipe>)) {
1359             // Add an entry to ProcessedTruncs to avoid counting the same
1360             // operand multiple times.
1361             ProcessedTruncs[Op] = nullptr;
1362             NumProcessedRecipes += 1;
1363           }
1364         }
1365 #endif
1366         continue;
1367       }
1368 
1369       Type *OldResTy = TypeInfo.inferScalarType(ResultVPV);
1370       unsigned OldResSizeInBits = OldResTy->getScalarSizeInBits();
1371       assert(OldResTy->isIntegerTy() && "only integer types supported");
1372       (void)OldResSizeInBits;
1373 
1374       LLVMContext &Ctx = CanonicalIVType->getContext();
1375       auto *NewResTy = IntegerType::get(Ctx, NewResSizeInBits);
1376 
1377       // Any wrapping introduced by shrinking this operation shouldn't be
1378       // considered undefined behavior. So, we can't unconditionally copy
1379       // arithmetic wrapping flags to VPW.
1380       if (auto *VPW = dyn_cast<VPRecipeWithIRFlags>(&R))
1381         VPW->dropPoisonGeneratingFlags();
1382 
1383       using namespace llvm::VPlanPatternMatch;
1384       if (OldResSizeInBits != NewResSizeInBits &&
1385           !match(&R, m_Binary<Instruction::ICmp>(m_VPValue(), m_VPValue()))) {
1386         // Extend result to original width.
1387         auto *Ext =
1388             new VPWidenCastRecipe(Instruction::ZExt, ResultVPV, OldResTy);
1389         Ext->insertAfter(&R);
1390         ResultVPV->replaceAllUsesWith(Ext);
1391         Ext->setOperand(0, ResultVPV);
1392         assert(OldResSizeInBits > NewResSizeInBits && "Nothing to shrink?");
1393       } else {
1394         assert(
1395             match(&R, m_Binary<Instruction::ICmp>(m_VPValue(), m_VPValue())) &&
1396             "Only ICmps should not need extending the result.");
1397       }
1398 
1399       assert(!isa<VPWidenStoreRecipe>(&R) && "stores cannot be narrowed");
1400       if (isa<VPWidenLoadRecipe>(&R))
1401         continue;
1402 
1403       // Shrink operands by introducing truncates as needed.
1404       unsigned StartIdx = isa<VPWidenSelectRecipe>(&R) ? 1 : 0;
1405       for (unsigned Idx = StartIdx; Idx != R.getNumOperands(); ++Idx) {
1406         auto *Op = R.getOperand(Idx);
1407         unsigned OpSizeInBits =
1408             TypeInfo.inferScalarType(Op)->getScalarSizeInBits();
1409         if (OpSizeInBits == NewResSizeInBits)
1410           continue;
1411         assert(OpSizeInBits > NewResSizeInBits && "nothing to truncate");
1412         auto [ProcessedIter, IterIsEmpty] =
1413             ProcessedTruncs.insert({Op, nullptr});
1414         VPWidenCastRecipe *NewOp =
1415             IterIsEmpty
1416                 ? new VPWidenCastRecipe(Instruction::Trunc, Op, NewResTy)
1417                 : ProcessedIter->second;
1418         R.setOperand(Idx, NewOp);
1419         if (!IterIsEmpty)
1420           continue;
1421         ProcessedIter->second = NewOp;
1422         if (!Op->isLiveIn()) {
1423           NewOp->insertBefore(&R);
1424         } else {
1425           PH->appendRecipe(NewOp);
1426 #ifndef NDEBUG
1427           auto *OpInst = dyn_cast<Instruction>(Op->getLiveInIRValue());
1428           bool IsContained = MinBWs.contains(OpInst);
1429           NumProcessedRecipes += IsContained;
1430 #endif
1431         }
1432       }
1433 
1434     }
1435   }
1436 
1437   assert(MinBWs.size() == NumProcessedRecipes &&
1438          "some entries in MinBWs haven't been processed");
1439 }
1440 
1441 void VPlanTransforms::optimize(VPlan &Plan) {
1442   removeRedundantCanonicalIVs(Plan);
1443   removeRedundantInductionCasts(Plan);
1444 
1445   simplifyRecipes(Plan, Plan.getCanonicalIV()->getScalarType());
1446   removeDeadRecipes(Plan);
1447   legalizeAndOptimizeInductions(Plan);
1448   removeRedundantExpandSCEVRecipes(Plan);
1449   simplifyRecipes(Plan, Plan.getCanonicalIV()->getScalarType());
1450   removeDeadRecipes(Plan);
1451 
1452   createAndOptimizeReplicateRegions(Plan);
1453   mergeBlocksIntoPredecessors(Plan);
1454   licm(Plan);
1455 }
1456 
1457 // Add a VPActiveLaneMaskPHIRecipe and related recipes to \p Plan and replace
1458 // the loop terminator with a branch-on-cond recipe with the negated
1459 // active-lane-mask as operand. Note that this turns the loop into an
1460 // uncountable one. Only the existing terminator is replaced, all other existing
1461 // recipes/users remain unchanged, except for poison-generating flags being
1462 // dropped from the canonical IV increment. Return the created
1463 // VPActiveLaneMaskPHIRecipe.
1464 //
1465 // The function uses the following definitions:
1466 //
1467 //  %TripCount = DataWithControlFlowWithoutRuntimeCheck ?
1468 //    calculate-trip-count-minus-VF (original TC) : original TC
1469 //  %IncrementValue = DataWithControlFlowWithoutRuntimeCheck ?
1470 //     CanonicalIVPhi : CanonicalIVIncrement
1471 //  %StartV is the canonical induction start value.
1472 //
1473 // The function adds the following recipes:
1474 //
1475 // vector.ph:
1476 //   %TripCount = calculate-trip-count-minus-VF (original TC)
1477 //       [if DataWithControlFlowWithoutRuntimeCheck]
1478 //   %EntryInc = canonical-iv-increment-for-part %StartV
1479 //   %EntryALM = active-lane-mask %EntryInc, %TripCount
1480 //
1481 // vector.body:
1482 //   ...
1483 //   %P = active-lane-mask-phi [ %EntryALM, %vector.ph ], [ %ALM, %vector.body ]
1484 //   ...
1485 //   %InLoopInc = canonical-iv-increment-for-part %IncrementValue
1486 //   %ALM = active-lane-mask %InLoopInc, TripCount
1487 //   %Negated = Not %ALM
1488 //   branch-on-cond %Negated
1489 //
1490 static VPActiveLaneMaskPHIRecipe *addVPLaneMaskPhiAndUpdateExitBranch(
1491     VPlan &Plan, bool DataAndControlFlowWithoutRuntimeCheck) {
1492   VPRegionBlock *TopRegion = Plan.getVectorLoopRegion();
1493   VPBasicBlock *EB = TopRegion->getExitingBasicBlock();
1494   auto *CanonicalIVPHI = Plan.getCanonicalIV();
1495   VPValue *StartV = CanonicalIVPHI->getStartValue();
1496 
1497   auto *CanonicalIVIncrement =
1498       cast<VPInstruction>(CanonicalIVPHI->getBackedgeValue());
1499   // TODO: Check if dropping the flags is needed if
1500   // !DataAndControlFlowWithoutRuntimeCheck.
1501   CanonicalIVIncrement->dropPoisonGeneratingFlags();
1502   DebugLoc DL = CanonicalIVIncrement->getDebugLoc();
1503   // We can't use StartV directly in the ActiveLaneMask VPInstruction, since
1504   // we have to take unrolling into account. Each part needs to start at
1505   //   Part * VF
1506   auto *VecPreheader = Plan.getVectorPreheader();
1507   VPBuilder Builder(VecPreheader);
1508 
1509   // Create the ActiveLaneMask instruction using the correct start values.
1510   VPValue *TC = Plan.getTripCount();
1511 
1512   VPValue *TripCount, *IncrementValue;
1513   if (!DataAndControlFlowWithoutRuntimeCheck) {
1514     // When the loop is guarded by a runtime overflow check for the loop
1515     // induction variable increment by VF, we can increment the value before
1516     // the get.active.lane mask and use the unmodified tripcount.
1517     IncrementValue = CanonicalIVIncrement;
1518     TripCount = TC;
1519   } else {
1520     // When avoiding a runtime check, the active.lane.mask inside the loop
1521     // uses a modified trip count and the induction variable increment is
1522     // done after the active.lane.mask intrinsic is called.
1523     IncrementValue = CanonicalIVPHI;
1524     TripCount = Builder.createNaryOp(VPInstruction::CalculateTripCountMinusVF,
1525                                      {TC}, DL);
1526   }
1527   auto *EntryIncrement = Builder.createOverflowingOp(
1528       VPInstruction::CanonicalIVIncrementForPart, {StartV}, {false, false}, DL,
1529       "index.part.next");
1530 
1531   // Create the active lane mask instruction in the VPlan preheader.
1532   auto *EntryALM =
1533       Builder.createNaryOp(VPInstruction::ActiveLaneMask, {EntryIncrement, TC},
1534                            DL, "active.lane.mask.entry");
1535 
1536   // Now create the ActiveLaneMaskPhi recipe in the main loop using the
1537   // preheader ActiveLaneMask instruction.
1538   auto *LaneMaskPhi = new VPActiveLaneMaskPHIRecipe(EntryALM, DebugLoc());
1539   LaneMaskPhi->insertAfter(CanonicalIVPHI);
1540 
1541   // Create the active lane mask for the next iteration of the loop before the
1542   // original terminator.
1543   VPRecipeBase *OriginalTerminator = EB->getTerminator();
1544   Builder.setInsertPoint(OriginalTerminator);
1545   auto *InLoopIncrement =
1546       Builder.createOverflowingOp(VPInstruction::CanonicalIVIncrementForPart,
1547                                   {IncrementValue}, {false, false}, DL);
1548   auto *ALM = Builder.createNaryOp(VPInstruction::ActiveLaneMask,
1549                                    {InLoopIncrement, TripCount}, DL,
1550                                    "active.lane.mask.next");
1551   LaneMaskPhi->addOperand(ALM);
1552 
1553   // Replace the original terminator with BranchOnCond. We have to invert the
1554   // mask here because a true condition means jumping to the exit block.
1555   auto *NotMask = Builder.createNot(ALM, DL);
1556   Builder.createNaryOp(VPInstruction::BranchOnCond, {NotMask}, DL);
1557   OriginalTerminator->eraseFromParent();
1558   return LaneMaskPhi;
1559 }
1560 
1561 /// Collect all VPValues representing a header mask through the (ICMP_ULE,
1562 /// WideCanonicalIV, backedge-taken-count) pattern.
1563 /// TODO: Introduce explicit recipe for header-mask instead of searching
1564 /// for the header-mask pattern manually.
1565 static SmallVector<VPValue *> collectAllHeaderMasks(VPlan &Plan) {
1566   SmallVector<VPValue *> WideCanonicalIVs;
1567   auto *FoundWidenCanonicalIVUser =
1568       find_if(Plan.getCanonicalIV()->users(),
1569               [](VPUser *U) { return isa<VPWidenCanonicalIVRecipe>(U); });
1570   assert(count_if(Plan.getCanonicalIV()->users(),
1571                   [](VPUser *U) { return isa<VPWidenCanonicalIVRecipe>(U); }) <=
1572              1 &&
1573          "Must have at most one VPWideCanonicalIVRecipe");
1574   if (FoundWidenCanonicalIVUser != Plan.getCanonicalIV()->users().end()) {
1575     auto *WideCanonicalIV =
1576         cast<VPWidenCanonicalIVRecipe>(*FoundWidenCanonicalIVUser);
1577     WideCanonicalIVs.push_back(WideCanonicalIV);
1578   }
1579 
1580   // Also include VPWidenIntOrFpInductionRecipes that represent a widened
1581   // version of the canonical induction.
1582   VPBasicBlock *HeaderVPBB = Plan.getVectorLoopRegion()->getEntryBasicBlock();
1583   for (VPRecipeBase &Phi : HeaderVPBB->phis()) {
1584     auto *WidenOriginalIV = dyn_cast<VPWidenIntOrFpInductionRecipe>(&Phi);
1585     if (WidenOriginalIV && WidenOriginalIV->isCanonical())
1586       WideCanonicalIVs.push_back(WidenOriginalIV);
1587   }
1588 
1589   // Walk users of wide canonical IVs and collect to all compares of the form
1590   // (ICMP_ULE, WideCanonicalIV, backedge-taken-count).
1591   SmallVector<VPValue *> HeaderMasks;
1592   for (auto *Wide : WideCanonicalIVs) {
1593     for (VPUser *U : SmallVector<VPUser *>(Wide->users())) {
1594       auto *HeaderMask = dyn_cast<VPInstruction>(U);
1595       if (!HeaderMask || !vputils::isHeaderMask(HeaderMask, Plan))
1596         continue;
1597 
1598       assert(HeaderMask->getOperand(0) == Wide &&
1599              "WidenCanonicalIV must be the first operand of the compare");
1600       HeaderMasks.push_back(HeaderMask);
1601     }
1602   }
1603   return HeaderMasks;
1604 }
1605 
1606 void VPlanTransforms::addActiveLaneMask(
1607     VPlan &Plan, bool UseActiveLaneMaskForControlFlow,
1608     bool DataAndControlFlowWithoutRuntimeCheck) {
1609   assert((!DataAndControlFlowWithoutRuntimeCheck ||
1610           UseActiveLaneMaskForControlFlow) &&
1611          "DataAndControlFlowWithoutRuntimeCheck implies "
1612          "UseActiveLaneMaskForControlFlow");
1613 
1614   auto *FoundWidenCanonicalIVUser =
1615       find_if(Plan.getCanonicalIV()->users(),
1616               [](VPUser *U) { return isa<VPWidenCanonicalIVRecipe>(U); });
1617   assert(FoundWidenCanonicalIVUser &&
1618          "Must have widened canonical IV when tail folding!");
1619   auto *WideCanonicalIV =
1620       cast<VPWidenCanonicalIVRecipe>(*FoundWidenCanonicalIVUser);
1621   VPSingleDefRecipe *LaneMask;
1622   if (UseActiveLaneMaskForControlFlow) {
1623     LaneMask = addVPLaneMaskPhiAndUpdateExitBranch(
1624         Plan, DataAndControlFlowWithoutRuntimeCheck);
1625   } else {
1626     VPBuilder B = VPBuilder::getToInsertAfter(WideCanonicalIV);
1627     LaneMask = B.createNaryOp(VPInstruction::ActiveLaneMask,
1628                               {WideCanonicalIV, Plan.getTripCount()}, nullptr,
1629                               "active.lane.mask");
1630   }
1631 
1632   // Walk users of WideCanonicalIV and replace all compares of the form
1633   // (ICMP_ULE, WideCanonicalIV, backedge-taken-count) with an
1634   // active-lane-mask.
1635   for (VPValue *HeaderMask : collectAllHeaderMasks(Plan))
1636     HeaderMask->replaceAllUsesWith(LaneMask);
1637 }
1638 
1639 /// Try to convert \p CurRecipe to a corresponding EVL-based recipe. Returns
1640 /// nullptr if no EVL-based recipe could be created.
1641 /// \p HeaderMask  Header Mask.
1642 /// \p CurRecipe   Recipe to be transform.
1643 /// \p TypeInfo    VPlan-based type analysis.
1644 /// \p AllOneMask  The vector mask parameter of vector-predication intrinsics.
1645 /// \p EVL         The explicit vector length parameter of vector-predication
1646 /// intrinsics.
1647 static VPRecipeBase *createEVLRecipe(VPValue *HeaderMask,
1648                                      VPRecipeBase &CurRecipe,
1649                                      VPTypeAnalysis &TypeInfo,
1650                                      VPValue &AllOneMask, VPValue &EVL) {
1651   using namespace llvm::VPlanPatternMatch;
1652   auto GetNewMask = [&](VPValue *OrigMask) -> VPValue * {
1653     assert(OrigMask && "Unmasked recipe when folding tail");
1654     return HeaderMask == OrigMask ? nullptr : OrigMask;
1655   };
1656 
1657   return TypeSwitch<VPRecipeBase *, VPRecipeBase *>(&CurRecipe)
1658       .Case<VPWidenLoadRecipe>([&](VPWidenLoadRecipe *L) {
1659         VPValue *NewMask = GetNewMask(L->getMask());
1660         return new VPWidenLoadEVLRecipe(*L, EVL, NewMask);
1661       })
1662       .Case<VPWidenStoreRecipe>([&](VPWidenStoreRecipe *S) {
1663         VPValue *NewMask = GetNewMask(S->getMask());
1664         return new VPWidenStoreEVLRecipe(*S, EVL, NewMask);
1665       })
1666       .Case<VPWidenRecipe>([&](VPWidenRecipe *W) -> VPRecipeBase * {
1667         unsigned Opcode = W->getOpcode();
1668         if (!Instruction::isBinaryOp(Opcode) && !Instruction::isUnaryOp(Opcode))
1669           return nullptr;
1670         return new VPWidenEVLRecipe(*W, EVL);
1671       })
1672       .Case<VPReductionRecipe>([&](VPReductionRecipe *Red) {
1673         VPValue *NewMask = GetNewMask(Red->getCondOp());
1674         return new VPReductionEVLRecipe(*Red, EVL, NewMask);
1675       })
1676       .Case<VPWidenIntrinsicRecipe, VPWidenCastRecipe>(
1677           [&](auto *CR) -> VPRecipeBase * {
1678             Intrinsic::ID VPID;
1679             if (auto *CallR = dyn_cast<VPWidenIntrinsicRecipe>(CR)) {
1680               VPID =
1681                   VPIntrinsic::getForIntrinsic(CallR->getVectorIntrinsicID());
1682             } else {
1683               auto *CastR = cast<VPWidenCastRecipe>(CR);
1684               VPID = VPIntrinsic::getForOpcode(CastR->getOpcode());
1685             }
1686 
1687             // Not all intrinsics have a corresponding VP intrinsic.
1688             if (VPID == Intrinsic::not_intrinsic)
1689               return nullptr;
1690             assert(VPIntrinsic::getMaskParamPos(VPID) &&
1691                    VPIntrinsic::getVectorLengthParamPos(VPID) &&
1692                    "Expected VP intrinsic to have mask and EVL");
1693 
1694             SmallVector<VPValue *> Ops(CR->operands());
1695             Ops.push_back(&AllOneMask);
1696             Ops.push_back(&EVL);
1697             return new VPWidenIntrinsicRecipe(
1698                 VPID, Ops, TypeInfo.inferScalarType(CR), CR->getDebugLoc());
1699           })
1700       .Case<VPWidenSelectRecipe>([&](VPWidenSelectRecipe *Sel) {
1701         SmallVector<VPValue *> Ops(Sel->operands());
1702         Ops.push_back(&EVL);
1703         return new VPWidenIntrinsicRecipe(Intrinsic::vp_select, Ops,
1704                                           TypeInfo.inferScalarType(Sel),
1705                                           Sel->getDebugLoc());
1706       })
1707       .Case<VPInstruction>([&](VPInstruction *VPI) -> VPRecipeBase * {
1708         VPValue *LHS, *RHS;
1709         // Transform select with a header mask condition
1710         //   select(header_mask, LHS, RHS)
1711         // into vector predication merge.
1712         //   vp.merge(all-true, LHS, RHS, EVL)
1713         if (!match(VPI, m_Select(m_Specific(HeaderMask), m_VPValue(LHS),
1714                                  m_VPValue(RHS))))
1715           return nullptr;
1716         // Use all true as the condition because this transformation is
1717         // limited to selects whose condition is a header mask.
1718         return new VPWidenIntrinsicRecipe(
1719             Intrinsic::vp_merge, {&AllOneMask, LHS, RHS, &EVL},
1720             TypeInfo.inferScalarType(LHS), VPI->getDebugLoc());
1721       })
1722       .Default([&](VPRecipeBase *R) { return nullptr; });
1723 }
1724 
1725 /// Replace recipes with their EVL variants.
1726 static void transformRecipestoEVLRecipes(VPlan &Plan, VPValue &EVL) {
1727   Type *CanonicalIVType = Plan.getCanonicalIV()->getScalarType();
1728   VPTypeAnalysis TypeInfo(CanonicalIVType);
1729   LLVMContext &Ctx = CanonicalIVType->getContext();
1730   VPValue *AllOneMask = Plan.getOrAddLiveIn(ConstantInt::getTrue(Ctx));
1731 
1732   for (VPUser *U : to_vector(Plan.getVF().users())) {
1733     if (auto *R = dyn_cast<VPReverseVectorPointerRecipe>(U))
1734       R->setOperand(1, &EVL);
1735   }
1736 
1737   SmallVector<VPRecipeBase *> ToErase;
1738 
1739   for (VPValue *HeaderMask : collectAllHeaderMasks(Plan)) {
1740     for (VPUser *U : collectUsersRecursively(HeaderMask)) {
1741       auto *CurRecipe = cast<VPRecipeBase>(U);
1742       VPRecipeBase *EVLRecipe =
1743           createEVLRecipe(HeaderMask, *CurRecipe, TypeInfo, *AllOneMask, EVL);
1744       if (!EVLRecipe)
1745         continue;
1746 
1747       [[maybe_unused]] unsigned NumDefVal = EVLRecipe->getNumDefinedValues();
1748       assert(NumDefVal == CurRecipe->getNumDefinedValues() &&
1749              "New recipe must define the same number of values as the "
1750              "original.");
1751       assert(
1752           NumDefVal <= 1 &&
1753           "Only supports recipes with a single definition or without users.");
1754       EVLRecipe->insertBefore(CurRecipe);
1755       if (isa<VPSingleDefRecipe, VPWidenLoadEVLRecipe>(EVLRecipe)) {
1756         VPValue *CurVPV = CurRecipe->getVPSingleValue();
1757         CurVPV->replaceAllUsesWith(EVLRecipe->getVPSingleValue());
1758       }
1759       // Defer erasing recipes till the end so that we don't invalidate the
1760       // VPTypeAnalysis cache.
1761       ToErase.push_back(CurRecipe);
1762     }
1763   }
1764 
1765   for (VPRecipeBase *R : reverse(ToErase)) {
1766     SmallVector<VPValue *> PossiblyDead(R->operands());
1767     R->eraseFromParent();
1768     for (VPValue *Op : PossiblyDead)
1769       recursivelyDeleteDeadRecipes(Op);
1770   }
1771 }
1772 
1773 /// Add a VPEVLBasedIVPHIRecipe and related recipes to \p Plan and
1774 /// replaces all uses except the canonical IV increment of
1775 /// VPCanonicalIVPHIRecipe with a VPEVLBasedIVPHIRecipe. VPCanonicalIVPHIRecipe
1776 /// is used only for loop iterations counting after this transformation.
1777 ///
1778 /// The function uses the following definitions:
1779 ///  %StartV is the canonical induction start value.
1780 ///
1781 /// The function adds the following recipes:
1782 ///
1783 /// vector.ph:
1784 /// ...
1785 ///
1786 /// vector.body:
1787 /// ...
1788 /// %EVLPhi = EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI [ %StartV, %vector.ph ],
1789 ///                                               [ %NextEVLIV, %vector.body ]
1790 /// %AVL = sub original TC, %EVLPhi
1791 /// %VPEVL = EXPLICIT-VECTOR-LENGTH %AVL
1792 /// ...
1793 /// %NextEVLIV = add IVSize (cast i32 %VPEVVL to IVSize), %EVLPhi
1794 /// ...
1795 ///
1796 /// If MaxSafeElements is provided, the function adds the following recipes:
1797 /// vector.ph:
1798 /// ...
1799 ///
1800 /// vector.body:
1801 /// ...
1802 /// %EVLPhi = EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI [ %StartV, %vector.ph ],
1803 ///                                               [ %NextEVLIV, %vector.body ]
1804 /// %AVL = sub original TC, %EVLPhi
1805 /// %cmp = cmp ult %AVL, MaxSafeElements
1806 /// %SAFE_AVL = select %cmp, %AVL, MaxSafeElements
1807 /// %VPEVL = EXPLICIT-VECTOR-LENGTH %SAFE_AVL
1808 /// ...
1809 /// %NextEVLIV = add IVSize (cast i32 %VPEVL to IVSize), %EVLPhi
1810 /// ...
1811 ///
1812 bool VPlanTransforms::tryAddExplicitVectorLength(
1813     VPlan &Plan, const std::optional<unsigned> &MaxSafeElements) {
1814   VPBasicBlock *Header = Plan.getVectorLoopRegion()->getEntryBasicBlock();
1815   // The transform updates all users of inductions to work based on EVL, instead
1816   // of the VF directly. At the moment, widened inductions cannot be updated, so
1817   // bail out if the plan contains any.
1818   bool ContainsWidenInductions = any_of(
1819       Header->phis(),
1820       IsaPred<VPWidenIntOrFpInductionRecipe, VPWidenPointerInductionRecipe>);
1821   if (ContainsWidenInductions)
1822     return false;
1823 
1824   auto *CanonicalIVPHI = Plan.getCanonicalIV();
1825   VPValue *StartV = CanonicalIVPHI->getStartValue();
1826 
1827   // Create the ExplicitVectorLengthPhi recipe in the main loop.
1828   auto *EVLPhi = new VPEVLBasedIVPHIRecipe(StartV, DebugLoc());
1829   EVLPhi->insertAfter(CanonicalIVPHI);
1830   VPBuilder Builder(Header, Header->getFirstNonPhi());
1831   // Compute original TC - IV as the AVL (application vector length).
1832   VPValue *AVL = Builder.createNaryOp(
1833       Instruction::Sub, {Plan.getTripCount(), EVLPhi}, DebugLoc(), "avl");
1834   if (MaxSafeElements) {
1835     // Support for MaxSafeDist for correct loop emission.
1836     VPValue *AVLSafe = Plan.getOrAddLiveIn(
1837         ConstantInt::get(CanonicalIVPHI->getScalarType(), *MaxSafeElements));
1838     VPValue *Cmp = Builder.createICmp(ICmpInst::ICMP_ULT, AVL, AVLSafe);
1839     AVL = Builder.createSelect(Cmp, AVL, AVLSafe, DebugLoc(), "safe_avl");
1840   }
1841   auto *VPEVL = Builder.createNaryOp(VPInstruction::ExplicitVectorLength, AVL,
1842                                      DebugLoc());
1843 
1844   auto *CanonicalIVIncrement =
1845       cast<VPInstruction>(CanonicalIVPHI->getBackedgeValue());
1846   VPSingleDefRecipe *OpVPEVL = VPEVL;
1847   if (unsigned IVSize = CanonicalIVPHI->getScalarType()->getScalarSizeInBits();
1848       IVSize != 32) {
1849     OpVPEVL = new VPScalarCastRecipe(
1850         IVSize < 32 ? Instruction::Trunc : Instruction::ZExt, OpVPEVL,
1851         CanonicalIVPHI->getScalarType(), CanonicalIVIncrement->getDebugLoc());
1852     OpVPEVL->insertBefore(CanonicalIVIncrement);
1853   }
1854   auto *NextEVLIV =
1855       new VPInstruction(Instruction::Add, {OpVPEVL, EVLPhi},
1856                         {CanonicalIVIncrement->hasNoUnsignedWrap(),
1857                          CanonicalIVIncrement->hasNoSignedWrap()},
1858                         CanonicalIVIncrement->getDebugLoc(), "index.evl.next");
1859   NextEVLIV->insertBefore(CanonicalIVIncrement);
1860   EVLPhi->addOperand(NextEVLIV);
1861 
1862   transformRecipestoEVLRecipes(Plan, *VPEVL);
1863 
1864   // Replace all uses of VPCanonicalIVPHIRecipe by
1865   // VPEVLBasedIVPHIRecipe except for the canonical IV increment.
1866   CanonicalIVPHI->replaceAllUsesWith(EVLPhi);
1867   CanonicalIVIncrement->setOperand(0, CanonicalIVPHI);
1868   // TODO: support unroll factor > 1.
1869   Plan.setUF(1);
1870   return true;
1871 }
1872 
1873 void VPlanTransforms::dropPoisonGeneratingRecipes(
1874     VPlan &Plan, function_ref<bool(BasicBlock *)> BlockNeedsPredication) {
1875   // Collect recipes in the backward slice of `Root` that may generate a poison
1876   // value that is used after vectorization.
1877   SmallPtrSet<VPRecipeBase *, 16> Visited;
1878   auto CollectPoisonGeneratingInstrsInBackwardSlice([&](VPRecipeBase *Root) {
1879     SmallVector<VPRecipeBase *, 16> Worklist;
1880     Worklist.push_back(Root);
1881 
1882     // Traverse the backward slice of Root through its use-def chain.
1883     while (!Worklist.empty()) {
1884       VPRecipeBase *CurRec = Worklist.pop_back_val();
1885 
1886       if (!Visited.insert(CurRec).second)
1887         continue;
1888 
1889       // Prune search if we find another recipe generating a widen memory
1890       // instruction. Widen memory instructions involved in address computation
1891       // will lead to gather/scatter instructions, which don't need to be
1892       // handled.
1893       if (isa<VPWidenMemoryRecipe, VPInterleaveRecipe, VPScalarIVStepsRecipe,
1894               VPHeaderPHIRecipe>(CurRec))
1895         continue;
1896 
1897       // This recipe contributes to the address computation of a widen
1898       // load/store. If the underlying instruction has poison-generating flags,
1899       // drop them directly.
1900       if (auto *RecWithFlags = dyn_cast<VPRecipeWithIRFlags>(CurRec)) {
1901         VPValue *A, *B;
1902         using namespace llvm::VPlanPatternMatch;
1903         // Dropping disjoint from an OR may yield incorrect results, as some
1904         // analysis may have converted it to an Add implicitly (e.g. SCEV used
1905         // for dependence analysis). Instead, replace it with an equivalent Add.
1906         // This is possible as all users of the disjoint OR only access lanes
1907         // where the operands are disjoint or poison otherwise.
1908         if (match(RecWithFlags, m_BinaryOr(m_VPValue(A), m_VPValue(B))) &&
1909             RecWithFlags->isDisjoint()) {
1910           VPBuilder Builder(RecWithFlags);
1911           VPInstruction *New = Builder.createOverflowingOp(
1912               Instruction::Add, {A, B}, {false, false},
1913               RecWithFlags->getDebugLoc());
1914           New->setUnderlyingValue(RecWithFlags->getUnderlyingValue());
1915           RecWithFlags->replaceAllUsesWith(New);
1916           RecWithFlags->eraseFromParent();
1917           CurRec = New;
1918         } else
1919           RecWithFlags->dropPoisonGeneratingFlags();
1920       } else {
1921         Instruction *Instr = dyn_cast_or_null<Instruction>(
1922             CurRec->getVPSingleValue()->getUnderlyingValue());
1923         (void)Instr;
1924         assert((!Instr || !Instr->hasPoisonGeneratingFlags()) &&
1925                "found instruction with poison generating flags not covered by "
1926                "VPRecipeWithIRFlags");
1927       }
1928 
1929       // Add new definitions to the worklist.
1930       for (VPValue *Operand : CurRec->operands())
1931         if (VPRecipeBase *OpDef = Operand->getDefiningRecipe())
1932           Worklist.push_back(OpDef);
1933     }
1934   });
1935 
1936   // Traverse all the recipes in the VPlan and collect the poison-generating
1937   // recipes in the backward slice starting at the address of a VPWidenRecipe or
1938   // VPInterleaveRecipe.
1939   auto Iter = vp_depth_first_deep(Plan.getEntry());
1940   for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(Iter)) {
1941     for (VPRecipeBase &Recipe : *VPBB) {
1942       if (auto *WidenRec = dyn_cast<VPWidenMemoryRecipe>(&Recipe)) {
1943         Instruction &UnderlyingInstr = WidenRec->getIngredient();
1944         VPRecipeBase *AddrDef = WidenRec->getAddr()->getDefiningRecipe();
1945         if (AddrDef && WidenRec->isConsecutive() &&
1946             BlockNeedsPredication(UnderlyingInstr.getParent()))
1947           CollectPoisonGeneratingInstrsInBackwardSlice(AddrDef);
1948       } else if (auto *InterleaveRec = dyn_cast<VPInterleaveRecipe>(&Recipe)) {
1949         VPRecipeBase *AddrDef = InterleaveRec->getAddr()->getDefiningRecipe();
1950         if (AddrDef) {
1951           // Check if any member of the interleave group needs predication.
1952           const InterleaveGroup<Instruction> *InterGroup =
1953               InterleaveRec->getInterleaveGroup();
1954           bool NeedPredication = false;
1955           for (int I = 0, NumMembers = InterGroup->getNumMembers();
1956                I < NumMembers; ++I) {
1957             Instruction *Member = InterGroup->getMember(I);
1958             if (Member)
1959               NeedPredication |= BlockNeedsPredication(Member->getParent());
1960           }
1961 
1962           if (NeedPredication)
1963             CollectPoisonGeneratingInstrsInBackwardSlice(AddrDef);
1964         }
1965       }
1966     }
1967   }
1968 }
1969 
1970 void VPlanTransforms::createInterleaveGroups(
1971     VPlan &Plan,
1972     const SmallPtrSetImpl<const InterleaveGroup<Instruction> *>
1973         &InterleaveGroups,
1974     VPRecipeBuilder &RecipeBuilder, bool ScalarEpilogueAllowed) {
1975   if (InterleaveGroups.empty())
1976     return;
1977 
1978   // Interleave memory: for each Interleave Group we marked earlier as relevant
1979   // for this VPlan, replace the Recipes widening its memory instructions with a
1980   // single VPInterleaveRecipe at its insertion point.
1981   VPDominatorTree VPDT;
1982   VPDT.recalculate(Plan);
1983   for (const auto *IG : InterleaveGroups) {
1984     SmallVector<VPValue *, 4> StoredValues;
1985     for (unsigned i = 0; i < IG->getFactor(); ++i)
1986       if (auto *SI = dyn_cast_or_null<StoreInst>(IG->getMember(i))) {
1987         auto *StoreR = cast<VPWidenStoreRecipe>(RecipeBuilder.getRecipe(SI));
1988         StoredValues.push_back(StoreR->getStoredValue());
1989       }
1990 
1991     bool NeedsMaskForGaps =
1992         IG->requiresScalarEpilogue() && !ScalarEpilogueAllowed;
1993 
1994     Instruction *IRInsertPos = IG->getInsertPos();
1995     auto *InsertPos =
1996         cast<VPWidenMemoryRecipe>(RecipeBuilder.getRecipe(IRInsertPos));
1997 
1998     // Get or create the start address for the interleave group.
1999     auto *Start =
2000         cast<VPWidenMemoryRecipe>(RecipeBuilder.getRecipe(IG->getMember(0)));
2001     VPValue *Addr = Start->getAddr();
2002     VPRecipeBase *AddrDef = Addr->getDefiningRecipe();
2003     if (AddrDef && !VPDT.properlyDominates(AddrDef, InsertPos)) {
2004       // TODO: Hoist Addr's defining recipe (and any operands as needed) to
2005       // InsertPos or sink loads above zero members to join it.
2006       bool InBounds = false;
2007       if (auto *Gep = dyn_cast<GetElementPtrInst>(
2008               getLoadStorePointerOperand(IRInsertPos)->stripPointerCasts()))
2009         InBounds = Gep->isInBounds();
2010 
2011       // We cannot re-use the address of member zero because it does not
2012       // dominate the insert position. Instead, use the address of the insert
2013       // position and create a PtrAdd adjusting it to the address of member
2014       // zero.
2015       assert(IG->getIndex(IRInsertPos) != 0 &&
2016              "index of insert position shouldn't be zero");
2017       auto &DL = IRInsertPos->getDataLayout();
2018       APInt Offset(32,
2019                    DL.getTypeAllocSize(getLoadStoreType(IRInsertPos)) *
2020                        IG->getIndex(IRInsertPos),
2021                    /*IsSigned=*/true);
2022       VPValue *OffsetVPV = Plan.getOrAddLiveIn(
2023           ConstantInt::get(IRInsertPos->getParent()->getContext(), -Offset));
2024       VPBuilder B(InsertPos);
2025       Addr = InBounds ? B.createInBoundsPtrAdd(InsertPos->getAddr(), OffsetVPV)
2026                       : B.createPtrAdd(InsertPos->getAddr(), OffsetVPV);
2027     }
2028     auto *VPIG = new VPInterleaveRecipe(IG, Addr, StoredValues,
2029                                         InsertPos->getMask(), NeedsMaskForGaps);
2030     VPIG->insertBefore(InsertPos);
2031 
2032     unsigned J = 0;
2033     for (unsigned i = 0; i < IG->getFactor(); ++i)
2034       if (Instruction *Member = IG->getMember(i)) {
2035         VPRecipeBase *MemberR = RecipeBuilder.getRecipe(Member);
2036         if (!Member->getType()->isVoidTy()) {
2037           VPValue *OriginalV = MemberR->getVPSingleValue();
2038           OriginalV->replaceAllUsesWith(VPIG->getVPValue(J));
2039           J++;
2040         }
2041         MemberR->eraseFromParent();
2042       }
2043   }
2044 }
2045 
2046 void VPlanTransforms::convertToConcreteRecipes(VPlan &Plan) {
2047   for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(
2048            vp_depth_first_deep(Plan.getEntry()))) {
2049     for (VPRecipeBase &R : make_early_inc_range(VPBB->phis())) {
2050       if (!isa<VPCanonicalIVPHIRecipe, VPEVLBasedIVPHIRecipe>(&R))
2051         continue;
2052       auto *PhiR = cast<VPHeaderPHIRecipe>(&R);
2053       StringRef Name =
2054           isa<VPCanonicalIVPHIRecipe>(PhiR) ? "index" : "evl.based.iv";
2055       auto *ScalarR =
2056           new VPScalarPHIRecipe(PhiR->getStartValue(), PhiR->getBackedgeValue(),
2057                                 PhiR->getDebugLoc(), Name);
2058       ScalarR->insertBefore(PhiR);
2059       PhiR->replaceAllUsesWith(ScalarR);
2060       PhiR->eraseFromParent();
2061     }
2062   }
2063 }
2064 
2065 bool VPlanTransforms::handleUncountableEarlyExit(
2066     VPlan &Plan, ScalarEvolution &SE, Loop *OrigLoop,
2067     BasicBlock *UncountableExitingBlock, VPRecipeBuilder &RecipeBuilder) {
2068   VPRegionBlock *LoopRegion = Plan.getVectorLoopRegion();
2069   auto *LatchVPBB = cast<VPBasicBlock>(LoopRegion->getExiting());
2070   VPBuilder Builder(LatchVPBB->getTerminator());
2071   auto *MiddleVPBB = Plan.getMiddleBlock();
2072   VPValue *IsEarlyExitTaken = nullptr;
2073 
2074   // Process the uncountable exiting block. Update IsEarlyExitTaken, which
2075   // tracks if the uncountable early exit has been taken. Also split the middle
2076   // block and have it conditionally branch to the early exit block if
2077   // EarlyExitTaken.
2078   auto *EarlyExitingBranch =
2079       cast<BranchInst>(UncountableExitingBlock->getTerminator());
2080   BasicBlock *TrueSucc = EarlyExitingBranch->getSuccessor(0);
2081   BasicBlock *FalseSucc = EarlyExitingBranch->getSuccessor(1);
2082 
2083   // The early exit block may or may not be the same as the "countable" exit
2084   // block. Creates a new VPIRBB for the early exit block in case it is distinct
2085   // from the countable exit block.
2086   // TODO: Introduce both exit blocks during VPlan skeleton construction.
2087   VPIRBasicBlock *VPEarlyExitBlock;
2088   if (OrigLoop->getUniqueExitBlock()) {
2089     VPEarlyExitBlock = cast<VPIRBasicBlock>(MiddleVPBB->getSuccessors()[0]);
2090   } else {
2091     VPEarlyExitBlock = Plan.createVPIRBasicBlock(
2092         !OrigLoop->contains(TrueSucc) ? TrueSucc : FalseSucc);
2093   }
2094 
2095   VPValue *EarlyExitNotTakenCond = RecipeBuilder.getBlockInMask(
2096       OrigLoop->contains(TrueSucc) ? TrueSucc : FalseSucc);
2097   auto *EarlyExitTakenCond = Builder.createNot(EarlyExitNotTakenCond);
2098   IsEarlyExitTaken =
2099       Builder.createNaryOp(VPInstruction::AnyOf, {EarlyExitTakenCond});
2100 
2101   VPBasicBlock *NewMiddle = Plan.createVPBasicBlock("middle.split");
2102   VPBlockUtils::insertOnEdge(LoopRegion, MiddleVPBB, NewMiddle);
2103   VPBlockUtils::connectBlocks(NewMiddle, VPEarlyExitBlock);
2104   NewMiddle->swapSuccessors();
2105 
2106   // Update the exit phis in the early exit block.
2107   VPBuilder MiddleBuilder(NewMiddle);
2108   for (VPRecipeBase &R : *VPEarlyExitBlock) {
2109     auto *ExitIRI = cast<VPIRInstruction>(&R);
2110     auto *ExitPhi = dyn_cast<PHINode>(&ExitIRI->getInstruction());
2111     if (!ExitPhi)
2112       break;
2113 
2114     VPValue *IncomingFromEarlyExit = RecipeBuilder.getVPValueOrAddLiveIn(
2115         ExitPhi->getIncomingValueForBlock(UncountableExitingBlock));
2116     // The incoming value from the early exit must be a live-in for now.
2117     if (!IncomingFromEarlyExit->isLiveIn())
2118       return false;
2119 
2120     if (OrigLoop->getUniqueExitBlock()) {
2121       // If there's a unique exit block, VPEarlyExitBlock has 2 predecessors
2122       // (MiddleVPBB and NewMiddle). Add the incoming value from MiddleVPBB
2123       // which is coming from the original latch.
2124       VPValue *IncomingFromLatch = RecipeBuilder.getVPValueOrAddLiveIn(
2125           ExitPhi->getIncomingValueForBlock(OrigLoop->getLoopLatch()));
2126       ExitIRI->addOperand(IncomingFromLatch);
2127       ExitIRI->extractLastLaneOfOperand(MiddleBuilder);
2128     }
2129     // Add the incoming value from the early exit.
2130     ExitIRI->addOperand(IncomingFromEarlyExit);
2131   }
2132   MiddleBuilder.createNaryOp(VPInstruction::BranchOnCond, {IsEarlyExitTaken});
2133 
2134   // Replace the condition controlling the non-early exit from the vector loop
2135   // with one exiting if either the original condition of the vector latch is
2136   // true or the early exit has been taken.
2137   auto *LatchExitingBranch = cast<VPInstruction>(LatchVPBB->getTerminator());
2138   assert(LatchExitingBranch->getOpcode() == VPInstruction::BranchOnCount &&
2139          "Unexpected terminator");
2140   auto *IsLatchExitTaken =
2141       Builder.createICmp(CmpInst::ICMP_EQ, LatchExitingBranch->getOperand(0),
2142                          LatchExitingBranch->getOperand(1));
2143   auto *AnyExitTaken = Builder.createNaryOp(
2144       Instruction::Or, {IsEarlyExitTaken, IsLatchExitTaken});
2145   Builder.createNaryOp(VPInstruction::BranchOnCond, AnyExitTaken);
2146   LatchExitingBranch->eraseFromParent();
2147   return true;
2148 }
2149