xref: /llvm-project/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp (revision 7f59b4e9982f92431f3069645dab6171363c3404)
1 //===-- VPlanTransforms.cpp - Utility VPlan to VPlan transforms -----------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 ///
9 /// \file
10 /// This file implements a set of utility VPlan to VPlan transformations.
11 ///
12 //===----------------------------------------------------------------------===//
13 
14 #include "VPlanTransforms.h"
15 #include "VPRecipeBuilder.h"
16 #include "VPlan.h"
17 #include "VPlanAnalysis.h"
18 #include "VPlanCFG.h"
19 #include "VPlanDominatorTree.h"
20 #include "VPlanPatternMatch.h"
21 #include "VPlanUtils.h"
22 #include "llvm/ADT/PostOrderIterator.h"
23 #include "llvm/ADT/STLExtras.h"
24 #include "llvm/ADT/SetVector.h"
25 #include "llvm/ADT/TypeSwitch.h"
26 #include "llvm/Analysis/IVDescriptors.h"
27 #include "llvm/Analysis/VectorUtils.h"
28 #include "llvm/IR/Intrinsics.h"
29 #include "llvm/IR/PatternMatch.h"
30 
31 using namespace llvm;
32 
33 void VPlanTransforms::VPInstructionsToVPRecipes(
34     VPlanPtr &Plan,
35     function_ref<const InductionDescriptor *(PHINode *)>
36         GetIntOrFpInductionDescriptor,
37     ScalarEvolution &SE, const TargetLibraryInfo &TLI) {
38 
39   ReversePostOrderTraversal<VPBlockDeepTraversalWrapper<VPBlockBase *>> RPOT(
40       Plan->getVectorLoopRegion());
41   for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(RPOT)) {
42     // Skip blocks outside region
43     if (!VPBB->getParent())
44       break;
45     VPRecipeBase *Term = VPBB->getTerminator();
46     auto EndIter = Term ? Term->getIterator() : VPBB->end();
47     // Introduce each ingredient into VPlan.
48     for (VPRecipeBase &Ingredient :
49          make_early_inc_range(make_range(VPBB->begin(), EndIter))) {
50 
51       VPValue *VPV = Ingredient.getVPSingleValue();
52       Instruction *Inst = cast<Instruction>(VPV->getUnderlyingValue());
53 
54       VPRecipeBase *NewRecipe = nullptr;
55       if (auto *VPPhi = dyn_cast<VPWidenPHIRecipe>(&Ingredient)) {
56         auto *Phi = cast<PHINode>(VPPhi->getUnderlyingValue());
57         const auto *II = GetIntOrFpInductionDescriptor(Phi);
58         if (!II)
59           continue;
60 
61         VPValue *Start = Plan->getOrAddLiveIn(II->getStartValue());
62         VPValue *Step =
63             vputils::getOrCreateVPValueForSCEVExpr(*Plan, II->getStep(), SE);
64         NewRecipe = new VPWidenIntOrFpInductionRecipe(
65             Phi, Start, Step, &Plan->getVF(), *II, Ingredient.getDebugLoc());
66       } else {
67         assert(isa<VPInstruction>(&Ingredient) &&
68                "only VPInstructions expected here");
69         assert(!isa<PHINode>(Inst) && "phis should be handled above");
70         // Create VPWidenMemoryRecipe for loads and stores.
71         if (LoadInst *Load = dyn_cast<LoadInst>(Inst)) {
72           NewRecipe = new VPWidenLoadRecipe(
73               *Load, Ingredient.getOperand(0), nullptr /*Mask*/,
74               false /*Consecutive*/, false /*Reverse*/,
75               Ingredient.getDebugLoc());
76         } else if (StoreInst *Store = dyn_cast<StoreInst>(Inst)) {
77           NewRecipe = new VPWidenStoreRecipe(
78               *Store, Ingredient.getOperand(1), Ingredient.getOperand(0),
79               nullptr /*Mask*/, false /*Consecutive*/, false /*Reverse*/,
80               Ingredient.getDebugLoc());
81         } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Inst)) {
82           NewRecipe = new VPWidenGEPRecipe(GEP, Ingredient.operands());
83         } else if (CallInst *CI = dyn_cast<CallInst>(Inst)) {
84           NewRecipe = new VPWidenIntrinsicRecipe(
85               *CI, getVectorIntrinsicIDForCall(CI, &TLI),
86               {Ingredient.op_begin(), Ingredient.op_end() - 1}, CI->getType(),
87               CI->getDebugLoc());
88         } else if (SelectInst *SI = dyn_cast<SelectInst>(Inst)) {
89           NewRecipe = new VPWidenSelectRecipe(*SI, Ingredient.operands());
90         } else if (auto *CI = dyn_cast<CastInst>(Inst)) {
91           NewRecipe = new VPWidenCastRecipe(
92               CI->getOpcode(), Ingredient.getOperand(0), CI->getType(), *CI);
93         } else {
94           NewRecipe = new VPWidenRecipe(*Inst, Ingredient.operands());
95         }
96       }
97 
98       NewRecipe->insertBefore(&Ingredient);
99       if (NewRecipe->getNumDefinedValues() == 1)
100         VPV->replaceAllUsesWith(NewRecipe->getVPSingleValue());
101       else
102         assert(NewRecipe->getNumDefinedValues() == 0 &&
103                "Only recpies with zero or one defined values expected");
104       Ingredient.eraseFromParent();
105     }
106   }
107 }
108 
109 static bool sinkScalarOperands(VPlan &Plan) {
110   auto Iter = vp_depth_first_deep(Plan.getEntry());
111   bool Changed = false;
112   // First, collect the operands of all recipes in replicate blocks as seeds for
113   // sinking.
114   SetVector<std::pair<VPBasicBlock *, VPSingleDefRecipe *>> WorkList;
115   for (VPRegionBlock *VPR : VPBlockUtils::blocksOnly<VPRegionBlock>(Iter)) {
116     VPBasicBlock *EntryVPBB = VPR->getEntryBasicBlock();
117     if (!VPR->isReplicator() || EntryVPBB->getSuccessors().size() != 2)
118       continue;
119     VPBasicBlock *VPBB = dyn_cast<VPBasicBlock>(EntryVPBB->getSuccessors()[0]);
120     if (!VPBB || VPBB->getSingleSuccessor() != VPR->getExitingBasicBlock())
121       continue;
122     for (auto &Recipe : *VPBB) {
123       for (VPValue *Op : Recipe.operands())
124         if (auto *Def =
125                 dyn_cast_or_null<VPSingleDefRecipe>(Op->getDefiningRecipe()))
126           WorkList.insert(std::make_pair(VPBB, Def));
127     }
128   }
129 
130   bool ScalarVFOnly = Plan.hasScalarVFOnly();
131   // Try to sink each replicate or scalar IV steps recipe in the worklist.
132   for (unsigned I = 0; I != WorkList.size(); ++I) {
133     VPBasicBlock *SinkTo;
134     VPSingleDefRecipe *SinkCandidate;
135     std::tie(SinkTo, SinkCandidate) = WorkList[I];
136     if (SinkCandidate->getParent() == SinkTo ||
137         SinkCandidate->mayHaveSideEffects() ||
138         SinkCandidate->mayReadOrWriteMemory())
139       continue;
140     if (auto *RepR = dyn_cast<VPReplicateRecipe>(SinkCandidate)) {
141       if (!ScalarVFOnly && RepR->isUniform())
142         continue;
143     } else if (!isa<VPScalarIVStepsRecipe>(SinkCandidate))
144       continue;
145 
146     bool NeedsDuplicating = false;
147     // All recipe users of the sink candidate must be in the same block SinkTo
148     // or all users outside of SinkTo must be uniform-after-vectorization (
149     // i.e., only first lane is used) . In the latter case, we need to duplicate
150     // SinkCandidate.
151     auto CanSinkWithUser = [SinkTo, &NeedsDuplicating,
152                             SinkCandidate](VPUser *U) {
153       auto *UI = cast<VPRecipeBase>(U);
154       if (UI->getParent() == SinkTo)
155         return true;
156       NeedsDuplicating = UI->onlyFirstLaneUsed(SinkCandidate);
157       // We only know how to duplicate VPRecipeRecipes for now.
158       return NeedsDuplicating && isa<VPReplicateRecipe>(SinkCandidate);
159     };
160     if (!all_of(SinkCandidate->users(), CanSinkWithUser))
161       continue;
162 
163     if (NeedsDuplicating) {
164       if (ScalarVFOnly)
165         continue;
166       Instruction *I = SinkCandidate->getUnderlyingInstr();
167       auto *Clone = new VPReplicateRecipe(I, SinkCandidate->operands(), true);
168       // TODO: add ".cloned" suffix to name of Clone's VPValue.
169 
170       Clone->insertBefore(SinkCandidate);
171       SinkCandidate->replaceUsesWithIf(Clone, [SinkTo](VPUser &U, unsigned) {
172         return cast<VPRecipeBase>(&U)->getParent() != SinkTo;
173       });
174     }
175     SinkCandidate->moveBefore(*SinkTo, SinkTo->getFirstNonPhi());
176     for (VPValue *Op : SinkCandidate->operands())
177       if (auto *Def =
178               dyn_cast_or_null<VPSingleDefRecipe>(Op->getDefiningRecipe()))
179         WorkList.insert(std::make_pair(SinkTo, Def));
180     Changed = true;
181   }
182   return Changed;
183 }
184 
185 /// If \p R is a region with a VPBranchOnMaskRecipe in the entry block, return
186 /// the mask.
187 VPValue *getPredicatedMask(VPRegionBlock *R) {
188   auto *EntryBB = dyn_cast<VPBasicBlock>(R->getEntry());
189   if (!EntryBB || EntryBB->size() != 1 ||
190       !isa<VPBranchOnMaskRecipe>(EntryBB->begin()))
191     return nullptr;
192 
193   return cast<VPBranchOnMaskRecipe>(&*EntryBB->begin())->getOperand(0);
194 }
195 
196 /// If \p R is a triangle region, return the 'then' block of the triangle.
197 static VPBasicBlock *getPredicatedThenBlock(VPRegionBlock *R) {
198   auto *EntryBB = cast<VPBasicBlock>(R->getEntry());
199   if (EntryBB->getNumSuccessors() != 2)
200     return nullptr;
201 
202   auto *Succ0 = dyn_cast<VPBasicBlock>(EntryBB->getSuccessors()[0]);
203   auto *Succ1 = dyn_cast<VPBasicBlock>(EntryBB->getSuccessors()[1]);
204   if (!Succ0 || !Succ1)
205     return nullptr;
206 
207   if (Succ0->getNumSuccessors() + Succ1->getNumSuccessors() != 1)
208     return nullptr;
209   if (Succ0->getSingleSuccessor() == Succ1)
210     return Succ0;
211   if (Succ1->getSingleSuccessor() == Succ0)
212     return Succ1;
213   return nullptr;
214 }
215 
216 // Merge replicate regions in their successor region, if a replicate region
217 // is connected to a successor replicate region with the same predicate by a
218 // single, empty VPBasicBlock.
219 static bool mergeReplicateRegionsIntoSuccessors(VPlan &Plan) {
220   SmallPtrSet<VPRegionBlock *, 4> TransformedRegions;
221 
222   // Collect replicate regions followed by an empty block, followed by another
223   // replicate region with matching masks to process front. This is to avoid
224   // iterator invalidation issues while merging regions.
225   SmallVector<VPRegionBlock *, 8> WorkList;
226   for (VPRegionBlock *Region1 : VPBlockUtils::blocksOnly<VPRegionBlock>(
227            vp_depth_first_deep(Plan.getEntry()))) {
228     if (!Region1->isReplicator())
229       continue;
230     auto *MiddleBasicBlock =
231         dyn_cast_or_null<VPBasicBlock>(Region1->getSingleSuccessor());
232     if (!MiddleBasicBlock || !MiddleBasicBlock->empty())
233       continue;
234 
235     auto *Region2 =
236         dyn_cast_or_null<VPRegionBlock>(MiddleBasicBlock->getSingleSuccessor());
237     if (!Region2 || !Region2->isReplicator())
238       continue;
239 
240     VPValue *Mask1 = getPredicatedMask(Region1);
241     VPValue *Mask2 = getPredicatedMask(Region2);
242     if (!Mask1 || Mask1 != Mask2)
243       continue;
244 
245     assert(Mask1 && Mask2 && "both region must have conditions");
246     WorkList.push_back(Region1);
247   }
248 
249   // Move recipes from Region1 to its successor region, if both are triangles.
250   for (VPRegionBlock *Region1 : WorkList) {
251     if (TransformedRegions.contains(Region1))
252       continue;
253     auto *MiddleBasicBlock = cast<VPBasicBlock>(Region1->getSingleSuccessor());
254     auto *Region2 = cast<VPRegionBlock>(MiddleBasicBlock->getSingleSuccessor());
255 
256     VPBasicBlock *Then1 = getPredicatedThenBlock(Region1);
257     VPBasicBlock *Then2 = getPredicatedThenBlock(Region2);
258     if (!Then1 || !Then2)
259       continue;
260 
261     // Note: No fusion-preventing memory dependencies are expected in either
262     // region. Such dependencies should be rejected during earlier dependence
263     // checks, which guarantee accesses can be re-ordered for vectorization.
264     //
265     // Move recipes to the successor region.
266     for (VPRecipeBase &ToMove : make_early_inc_range(reverse(*Then1)))
267       ToMove.moveBefore(*Then2, Then2->getFirstNonPhi());
268 
269     auto *Merge1 = cast<VPBasicBlock>(Then1->getSingleSuccessor());
270     auto *Merge2 = cast<VPBasicBlock>(Then2->getSingleSuccessor());
271 
272     // Move VPPredInstPHIRecipes from the merge block to the successor region's
273     // merge block. Update all users inside the successor region to use the
274     // original values.
275     for (VPRecipeBase &Phi1ToMove : make_early_inc_range(reverse(*Merge1))) {
276       VPValue *PredInst1 =
277           cast<VPPredInstPHIRecipe>(&Phi1ToMove)->getOperand(0);
278       VPValue *Phi1ToMoveV = Phi1ToMove.getVPSingleValue();
279       Phi1ToMoveV->replaceUsesWithIf(PredInst1, [Then2](VPUser &U, unsigned) {
280         return cast<VPRecipeBase>(&U)->getParent() == Then2;
281       });
282 
283       // Remove phi recipes that are unused after merging the regions.
284       if (Phi1ToMove.getVPSingleValue()->getNumUsers() == 0) {
285         Phi1ToMove.eraseFromParent();
286         continue;
287       }
288       Phi1ToMove.moveBefore(*Merge2, Merge2->begin());
289     }
290 
291     // Finally, remove the first region.
292     for (VPBlockBase *Pred : make_early_inc_range(Region1->getPredecessors())) {
293       VPBlockUtils::disconnectBlocks(Pred, Region1);
294       VPBlockUtils::connectBlocks(Pred, MiddleBasicBlock);
295     }
296     VPBlockUtils::disconnectBlocks(Region1, MiddleBasicBlock);
297     TransformedRegions.insert(Region1);
298   }
299 
300   return !TransformedRegions.empty();
301 }
302 
303 static VPRegionBlock *createReplicateRegion(VPReplicateRecipe *PredRecipe,
304                                             VPlan &Plan) {
305   Instruction *Instr = PredRecipe->getUnderlyingInstr();
306   // Build the triangular if-then region.
307   std::string RegionName = (Twine("pred.") + Instr->getOpcodeName()).str();
308   assert(Instr->getParent() && "Predicated instruction not in any basic block");
309   auto *BlockInMask = PredRecipe->getMask();
310   auto *BOMRecipe = new VPBranchOnMaskRecipe(BlockInMask);
311   auto *Entry =
312       Plan.createVPBasicBlock(Twine(RegionName) + ".entry", BOMRecipe);
313 
314   // Replace predicated replicate recipe with a replicate recipe without a
315   // mask but in the replicate region.
316   auto *RecipeWithoutMask = new VPReplicateRecipe(
317       PredRecipe->getUnderlyingInstr(),
318       make_range(PredRecipe->op_begin(), std::prev(PredRecipe->op_end())),
319       PredRecipe->isUniform());
320   auto *Pred =
321       Plan.createVPBasicBlock(Twine(RegionName) + ".if", RecipeWithoutMask);
322 
323   VPPredInstPHIRecipe *PHIRecipe = nullptr;
324   if (PredRecipe->getNumUsers() != 0) {
325     PHIRecipe = new VPPredInstPHIRecipe(RecipeWithoutMask,
326                                         RecipeWithoutMask->getDebugLoc());
327     PredRecipe->replaceAllUsesWith(PHIRecipe);
328     PHIRecipe->setOperand(0, RecipeWithoutMask);
329   }
330   PredRecipe->eraseFromParent();
331   auto *Exiting =
332       Plan.createVPBasicBlock(Twine(RegionName) + ".continue", PHIRecipe);
333   VPRegionBlock *Region =
334       Plan.createVPRegionBlock(Entry, Exiting, RegionName, true);
335 
336   // Note: first set Entry as region entry and then connect successors starting
337   // from it in order, to propagate the "parent" of each VPBasicBlock.
338   VPBlockUtils::insertTwoBlocksAfter(Pred, Exiting, Entry);
339   VPBlockUtils::connectBlocks(Pred, Exiting);
340 
341   return Region;
342 }
343 
344 static void addReplicateRegions(VPlan &Plan) {
345   SmallVector<VPReplicateRecipe *> WorkList;
346   for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(
347            vp_depth_first_deep(Plan.getEntry()))) {
348     for (VPRecipeBase &R : *VPBB)
349       if (auto *RepR = dyn_cast<VPReplicateRecipe>(&R)) {
350         if (RepR->isPredicated())
351           WorkList.push_back(RepR);
352       }
353   }
354 
355   unsigned BBNum = 0;
356   for (VPReplicateRecipe *RepR : WorkList) {
357     VPBasicBlock *CurrentBlock = RepR->getParent();
358     VPBasicBlock *SplitBlock = CurrentBlock->splitAt(RepR->getIterator());
359 
360     BasicBlock *OrigBB = RepR->getUnderlyingInstr()->getParent();
361     SplitBlock->setName(
362         OrigBB->hasName() ? OrigBB->getName() + "." + Twine(BBNum++) : "");
363     // Record predicated instructions for above packing optimizations.
364     VPBlockBase *Region = createReplicateRegion(RepR, Plan);
365     Region->setParent(CurrentBlock->getParent());
366     VPBlockUtils::insertOnEdge(CurrentBlock, SplitBlock, Region);
367   }
368 }
369 
370 /// Remove redundant VPBasicBlocks by merging them into their predecessor if
371 /// the predecessor has a single successor.
372 static bool mergeBlocksIntoPredecessors(VPlan &Plan) {
373   SmallVector<VPBasicBlock *> WorkList;
374   for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(
375            vp_depth_first_deep(Plan.getEntry()))) {
376     // Don't fold the blocks in the skeleton of the Plan into their single
377     // predecessors for now.
378     // TODO: Remove restriction once more of the skeleton is modeled in VPlan.
379     if (!VPBB->getParent())
380       continue;
381     auto *PredVPBB =
382         dyn_cast_or_null<VPBasicBlock>(VPBB->getSinglePredecessor());
383     if (!PredVPBB || PredVPBB->getNumSuccessors() != 1 ||
384         isa<VPIRBasicBlock>(PredVPBB))
385       continue;
386     WorkList.push_back(VPBB);
387   }
388 
389   for (VPBasicBlock *VPBB : WorkList) {
390     VPBasicBlock *PredVPBB = cast<VPBasicBlock>(VPBB->getSinglePredecessor());
391     for (VPRecipeBase &R : make_early_inc_range(*VPBB))
392       R.moveBefore(*PredVPBB, PredVPBB->end());
393     VPBlockUtils::disconnectBlocks(PredVPBB, VPBB);
394     auto *ParentRegion = cast_or_null<VPRegionBlock>(VPBB->getParent());
395     if (ParentRegion && ParentRegion->getExiting() == VPBB)
396       ParentRegion->setExiting(PredVPBB);
397     for (auto *Succ : to_vector(VPBB->successors())) {
398       VPBlockUtils::disconnectBlocks(VPBB, Succ);
399       VPBlockUtils::connectBlocks(PredVPBB, Succ);
400     }
401     // VPBB is now dead and will be cleaned up when the plan gets destroyed.
402   }
403   return !WorkList.empty();
404 }
405 
406 void VPlanTransforms::createAndOptimizeReplicateRegions(VPlan &Plan) {
407   // Convert masked VPReplicateRecipes to if-then region blocks.
408   addReplicateRegions(Plan);
409 
410   bool ShouldSimplify = true;
411   while (ShouldSimplify) {
412     ShouldSimplify = sinkScalarOperands(Plan);
413     ShouldSimplify |= mergeReplicateRegionsIntoSuccessors(Plan);
414     ShouldSimplify |= mergeBlocksIntoPredecessors(Plan);
415   }
416 }
417 
418 /// Remove redundant casts of inductions.
419 ///
420 /// Such redundant casts are casts of induction variables that can be ignored,
421 /// because we already proved that the casted phi is equal to the uncasted phi
422 /// in the vectorized loop. There is no need to vectorize the cast - the same
423 /// value can be used for both the phi and casts in the vector loop.
424 static void removeRedundantInductionCasts(VPlan &Plan) {
425   for (auto &Phi : Plan.getVectorLoopRegion()->getEntryBasicBlock()->phis()) {
426     auto *IV = dyn_cast<VPWidenIntOrFpInductionRecipe>(&Phi);
427     if (!IV || IV->getTruncInst())
428       continue;
429 
430     // A sequence of IR Casts has potentially been recorded for IV, which
431     // *must be bypassed* when the IV is vectorized, because the vectorized IV
432     // will produce the desired casted value. This sequence forms a def-use
433     // chain and is provided in reverse order, ending with the cast that uses
434     // the IV phi. Search for the recipe of the last cast in the chain and
435     // replace it with the original IV. Note that only the final cast is
436     // expected to have users outside the cast-chain and the dead casts left
437     // over will be cleaned up later.
438     auto &Casts = IV->getInductionDescriptor().getCastInsts();
439     VPValue *FindMyCast = IV;
440     for (Instruction *IRCast : reverse(Casts)) {
441       VPSingleDefRecipe *FoundUserCast = nullptr;
442       for (auto *U : FindMyCast->users()) {
443         auto *UserCast = dyn_cast<VPSingleDefRecipe>(U);
444         if (UserCast && UserCast->getUnderlyingValue() == IRCast) {
445           FoundUserCast = UserCast;
446           break;
447         }
448       }
449       FindMyCast = FoundUserCast;
450     }
451     FindMyCast->replaceAllUsesWith(IV);
452   }
453 }
454 
455 /// Try to replace VPWidenCanonicalIVRecipes with a widened canonical IV
456 /// recipe, if it exists.
457 static void removeRedundantCanonicalIVs(VPlan &Plan) {
458   VPCanonicalIVPHIRecipe *CanonicalIV = Plan.getCanonicalIV();
459   VPWidenCanonicalIVRecipe *WidenNewIV = nullptr;
460   for (VPUser *U : CanonicalIV->users()) {
461     WidenNewIV = dyn_cast<VPWidenCanonicalIVRecipe>(U);
462     if (WidenNewIV)
463       break;
464   }
465 
466   if (!WidenNewIV)
467     return;
468 
469   VPBasicBlock *HeaderVPBB = Plan.getVectorLoopRegion()->getEntryBasicBlock();
470   for (VPRecipeBase &Phi : HeaderVPBB->phis()) {
471     auto *WidenOriginalIV = dyn_cast<VPWidenIntOrFpInductionRecipe>(&Phi);
472 
473     if (!WidenOriginalIV || !WidenOriginalIV->isCanonical())
474       continue;
475 
476     // Replace WidenNewIV with WidenOriginalIV if WidenOriginalIV provides
477     // everything WidenNewIV's users need. That is, WidenOriginalIV will
478     // generate a vector phi or all users of WidenNewIV demand the first lane
479     // only.
480     if (any_of(WidenOriginalIV->users(),
481                [WidenOriginalIV](VPUser *U) {
482                  return !U->usesScalars(WidenOriginalIV);
483                }) ||
484         vputils::onlyFirstLaneUsed(WidenNewIV)) {
485       WidenNewIV->replaceAllUsesWith(WidenOriginalIV);
486       WidenNewIV->eraseFromParent();
487       return;
488     }
489   }
490 }
491 
492 /// Returns true if \p R is dead and can be removed.
493 static bool isDeadRecipe(VPRecipeBase &R) {
494   using namespace llvm::PatternMatch;
495   // Do remove conditional assume instructions as their conditions may be
496   // flattened.
497   auto *RepR = dyn_cast<VPReplicateRecipe>(&R);
498   bool IsConditionalAssume =
499       RepR && RepR->isPredicated() &&
500       match(RepR->getUnderlyingInstr(), m_Intrinsic<Intrinsic::assume>());
501   if (IsConditionalAssume)
502     return true;
503 
504   if (R.mayHaveSideEffects())
505     return false;
506 
507   // Recipe is dead if no user keeps the recipe alive.
508   return all_of(R.definedValues(),
509                 [](VPValue *V) { return V->getNumUsers() == 0; });
510 }
511 
512 void VPlanTransforms::removeDeadRecipes(VPlan &Plan) {
513   ReversePostOrderTraversal<VPBlockDeepTraversalWrapper<VPBlockBase *>> RPOT(
514       Plan.getEntry());
515 
516   for (VPBasicBlock *VPBB : reverse(VPBlockUtils::blocksOnly<VPBasicBlock>(RPOT))) {
517     // The recipes in the block are processed in reverse order, to catch chains
518     // of dead recipes.
519     for (VPRecipeBase &R : make_early_inc_range(reverse(*VPBB))) {
520       if (isDeadRecipe(R))
521         R.eraseFromParent();
522     }
523   }
524 }
525 
526 static VPScalarIVStepsRecipe *
527 createScalarIVSteps(VPlan &Plan, InductionDescriptor::InductionKind Kind,
528                     Instruction::BinaryOps InductionOpcode,
529                     FPMathOperator *FPBinOp, Instruction *TruncI,
530                     VPValue *StartV, VPValue *Step, DebugLoc DL,
531                     VPBuilder &Builder) {
532   VPBasicBlock *HeaderVPBB = Plan.getVectorLoopRegion()->getEntryBasicBlock();
533   VPCanonicalIVPHIRecipe *CanonicalIV = Plan.getCanonicalIV();
534   VPSingleDefRecipe *BaseIV = Builder.createDerivedIV(
535       Kind, FPBinOp, StartV, CanonicalIV, Step, "offset.idx");
536 
537   // Truncate base induction if needed.
538   Type *CanonicalIVType = CanonicalIV->getScalarType();
539   VPTypeAnalysis TypeInfo(CanonicalIVType);
540   Type *ResultTy = TypeInfo.inferScalarType(BaseIV);
541   if (TruncI) {
542     Type *TruncTy = TruncI->getType();
543     assert(ResultTy->getScalarSizeInBits() > TruncTy->getScalarSizeInBits() &&
544            "Not truncating.");
545     assert(ResultTy->isIntegerTy() && "Truncation requires an integer type");
546     BaseIV = Builder.createScalarCast(Instruction::Trunc, BaseIV, TruncTy, DL);
547     ResultTy = TruncTy;
548   }
549 
550   // Truncate step if needed.
551   Type *StepTy = TypeInfo.inferScalarType(Step);
552   if (ResultTy != StepTy) {
553     assert(StepTy->getScalarSizeInBits() > ResultTy->getScalarSizeInBits() &&
554            "Not truncating.");
555     assert(StepTy->isIntegerTy() && "Truncation requires an integer type");
556     auto *VecPreheader =
557         cast<VPBasicBlock>(HeaderVPBB->getSingleHierarchicalPredecessor());
558     VPBuilder::InsertPointGuard Guard(Builder);
559     Builder.setInsertPoint(VecPreheader);
560     Step = Builder.createScalarCast(Instruction::Trunc, Step, ResultTy, DL);
561   }
562   return Builder.createScalarIVSteps(InductionOpcode, FPBinOp, BaseIV, Step);
563 }
564 
565 static SmallVector<VPUser *> collectUsersRecursively(VPValue *V) {
566   SetVector<VPUser *> Users(V->user_begin(), V->user_end());
567   for (unsigned I = 0; I != Users.size(); ++I) {
568     VPRecipeBase *Cur = cast<VPRecipeBase>(Users[I]);
569     if (isa<VPHeaderPHIRecipe>(Cur))
570       continue;
571     for (VPValue *V : Cur->definedValues())
572       Users.insert(V->user_begin(), V->user_end());
573   }
574   return Users.takeVector();
575 }
576 
577 /// Legalize VPWidenPointerInductionRecipe, by replacing it with a PtrAdd
578 /// (IndStart, ScalarIVSteps (0, Step)) if only its scalar values are used, as
579 /// VPWidenPointerInductionRecipe will generate vectors only. If some users
580 /// require vectors while other require scalars, the scalar uses need to extract
581 /// the scalars from the generated vectors (Note that this is different to how
582 /// int/fp inductions are handled). Legalize extract-from-ends using uniform
583 /// VPReplicateRecipe of wide inductions to use regular VPReplicateRecipe, so
584 /// the correct end value is available. Also optimize
585 /// VPWidenIntOrFpInductionRecipe, if any of its users needs scalar values, by
586 /// providing them scalar steps built on the canonical scalar IV and update the
587 /// original IV's users. This is an optional optimization to reduce the needs of
588 /// vector extracts.
589 static void legalizeAndOptimizeInductions(VPlan &Plan) {
590   using namespace llvm::VPlanPatternMatch;
591   VPBasicBlock *HeaderVPBB = Plan.getVectorLoopRegion()->getEntryBasicBlock();
592   bool HasOnlyVectorVFs = !Plan.hasVF(ElementCount::getFixed(1));
593   VPBuilder Builder(HeaderVPBB, HeaderVPBB->getFirstNonPhi());
594   for (VPRecipeBase &Phi : HeaderVPBB->phis()) {
595     auto *PhiR = dyn_cast<VPWidenInductionRecipe>(&Phi);
596     if (!PhiR)
597       continue;
598 
599     // Check if any uniform VPReplicateRecipes using the phi recipe are used by
600     // ExtractFromEnd. Those must be replaced by a regular VPReplicateRecipe to
601     // ensure the final value is available.
602     // TODO: Remove once uniformity analysis is done on VPlan.
603     for (VPUser *U : collectUsersRecursively(PhiR)) {
604       auto *ExitIRI = dyn_cast<VPIRInstruction>(U);
605       VPValue *Op;
606       if (!ExitIRI || !match(ExitIRI->getOperand(0),
607                              m_VPInstruction<VPInstruction::ExtractFromEnd>(
608                                  m_VPValue(Op), m_VPValue())))
609         continue;
610       auto *RepR = dyn_cast<VPReplicateRecipe>(Op);
611       if (!RepR || !RepR->isUniform())
612         continue;
613       assert(!RepR->isPredicated() && "RepR must not be predicated");
614       Instruction *I = RepR->getUnderlyingInstr();
615       auto *Clone =
616           new VPReplicateRecipe(I, RepR->operands(), /*IsUniform*/ false);
617       Clone->insertAfter(RepR);
618       RepR->replaceAllUsesWith(Clone);
619     }
620 
621     // Replace wide pointer inductions which have only their scalars used by
622     // PtrAdd(IndStart, ScalarIVSteps (0, Step)).
623     if (auto *PtrIV = dyn_cast<VPWidenPointerInductionRecipe>(&Phi)) {
624       if (!PtrIV->onlyScalarsGenerated(Plan.hasScalableVF()))
625         continue;
626 
627       const InductionDescriptor &ID = PtrIV->getInductionDescriptor();
628       VPValue *StartV =
629           Plan.getOrAddLiveIn(ConstantInt::get(ID.getStep()->getType(), 0));
630       VPValue *StepV = PtrIV->getOperand(1);
631       VPScalarIVStepsRecipe *Steps = createScalarIVSteps(
632           Plan, InductionDescriptor::IK_IntInduction, Instruction::Add, nullptr,
633           nullptr, StartV, StepV, PtrIV->getDebugLoc(), Builder);
634 
635       VPValue *PtrAdd = Builder.createPtrAdd(PtrIV->getStartValue(), Steps,
636                                              PtrIV->getDebugLoc(), "next.gep");
637 
638       PtrIV->replaceAllUsesWith(PtrAdd);
639       continue;
640     }
641 
642     // Replace widened induction with scalar steps for users that only use
643     // scalars.
644     auto *WideIV = cast<VPWidenIntOrFpInductionRecipe>(&Phi);
645     if (HasOnlyVectorVFs && none_of(WideIV->users(), [WideIV](VPUser *U) {
646           return U->usesScalars(WideIV);
647         }))
648       continue;
649 
650     const InductionDescriptor &ID = WideIV->getInductionDescriptor();
651     VPScalarIVStepsRecipe *Steps = createScalarIVSteps(
652         Plan, ID.getKind(), ID.getInductionOpcode(),
653         dyn_cast_or_null<FPMathOperator>(ID.getInductionBinOp()),
654         WideIV->getTruncInst(), WideIV->getStartValue(), WideIV->getStepValue(),
655         WideIV->getDebugLoc(), Builder);
656 
657     // Update scalar users of IV to use Step instead.
658     if (!HasOnlyVectorVFs)
659       WideIV->replaceAllUsesWith(Steps);
660     else
661       WideIV->replaceUsesWithIf(Steps, [WideIV](VPUser &U, unsigned) {
662         return U.usesScalars(WideIV);
663       });
664   }
665 }
666 
667 /// Remove redundant EpxandSCEVRecipes in \p Plan's entry block by replacing
668 /// them with already existing recipes expanding the same SCEV expression.
669 static void removeRedundantExpandSCEVRecipes(VPlan &Plan) {
670   DenseMap<const SCEV *, VPValue *> SCEV2VPV;
671 
672   for (VPRecipeBase &R :
673        make_early_inc_range(*Plan.getEntry()->getEntryBasicBlock())) {
674     auto *ExpR = dyn_cast<VPExpandSCEVRecipe>(&R);
675     if (!ExpR)
676       continue;
677 
678     auto I = SCEV2VPV.insert({ExpR->getSCEV(), ExpR});
679     if (I.second)
680       continue;
681     ExpR->replaceAllUsesWith(I.first->second);
682     ExpR->eraseFromParent();
683   }
684 }
685 
686 static void recursivelyDeleteDeadRecipes(VPValue *V) {
687   SmallVector<VPValue *> WorkList;
688   SmallPtrSet<VPValue *, 8> Seen;
689   WorkList.push_back(V);
690 
691   while (!WorkList.empty()) {
692     VPValue *Cur = WorkList.pop_back_val();
693     if (!Seen.insert(Cur).second)
694       continue;
695     VPRecipeBase *R = Cur->getDefiningRecipe();
696     if (!R)
697       continue;
698     if (!isDeadRecipe(*R))
699       continue;
700     WorkList.append(R->op_begin(), R->op_end());
701     R->eraseFromParent();
702   }
703 }
704 
705 /// Try to simplify recipe \p R.
706 static void simplifyRecipe(VPRecipeBase &R, VPTypeAnalysis &TypeInfo) {
707   using namespace llvm::VPlanPatternMatch;
708 
709   if (auto *Blend = dyn_cast<VPBlendRecipe>(&R)) {
710     // Try to remove redundant blend recipes.
711     SmallPtrSet<VPValue *, 4> UniqueValues;
712     if (Blend->isNormalized() || !match(Blend->getMask(0), m_False()))
713       UniqueValues.insert(Blend->getIncomingValue(0));
714     for (unsigned I = 1; I != Blend->getNumIncomingValues(); ++I)
715       if (!match(Blend->getMask(I), m_False()))
716         UniqueValues.insert(Blend->getIncomingValue(I));
717 
718     if (UniqueValues.size() == 1) {
719       Blend->replaceAllUsesWith(*UniqueValues.begin());
720       Blend->eraseFromParent();
721       return;
722     }
723 
724     if (Blend->isNormalized())
725       return;
726 
727     // Normalize the blend so its first incoming value is used as the initial
728     // value with the others blended into it.
729 
730     unsigned StartIndex = 0;
731     for (unsigned I = 0; I != Blend->getNumIncomingValues(); ++I) {
732       // If a value's mask is used only by the blend then is can be deadcoded.
733       // TODO: Find the most expensive mask that can be deadcoded, or a mask
734       // that's used by multiple blends where it can be removed from them all.
735       VPValue *Mask = Blend->getMask(I);
736       if (Mask->getNumUsers() == 1 && !match(Mask, m_False())) {
737         StartIndex = I;
738         break;
739       }
740     }
741 
742     SmallVector<VPValue *, 4> OperandsWithMask;
743     OperandsWithMask.push_back(Blend->getIncomingValue(StartIndex));
744 
745     for (unsigned I = 0; I != Blend->getNumIncomingValues(); ++I) {
746       if (I == StartIndex)
747         continue;
748       OperandsWithMask.push_back(Blend->getIncomingValue(I));
749       OperandsWithMask.push_back(Blend->getMask(I));
750     }
751 
752     auto *NewBlend = new VPBlendRecipe(
753         cast<PHINode>(Blend->getUnderlyingValue()), OperandsWithMask);
754     NewBlend->insertBefore(&R);
755 
756     VPValue *DeadMask = Blend->getMask(StartIndex);
757     Blend->replaceAllUsesWith(NewBlend);
758     Blend->eraseFromParent();
759     recursivelyDeleteDeadRecipes(DeadMask);
760     return;
761   }
762 
763   VPValue *A;
764   if (match(&R, m_Trunc(m_ZExtOrSExt(m_VPValue(A))))) {
765     VPValue *Trunc = R.getVPSingleValue();
766     Type *TruncTy = TypeInfo.inferScalarType(Trunc);
767     Type *ATy = TypeInfo.inferScalarType(A);
768     if (TruncTy == ATy) {
769       Trunc->replaceAllUsesWith(A);
770     } else {
771       // Don't replace a scalarizing recipe with a widened cast.
772       if (isa<VPReplicateRecipe>(&R))
773         return;
774       if (ATy->getScalarSizeInBits() < TruncTy->getScalarSizeInBits()) {
775 
776         unsigned ExtOpcode = match(R.getOperand(0), m_SExt(m_VPValue()))
777                                  ? Instruction::SExt
778                                  : Instruction::ZExt;
779         auto *VPC =
780             new VPWidenCastRecipe(Instruction::CastOps(ExtOpcode), A, TruncTy);
781         if (auto *UnderlyingExt = R.getOperand(0)->getUnderlyingValue()) {
782           // UnderlyingExt has distinct return type, used to retain legacy cost.
783           VPC->setUnderlyingValue(UnderlyingExt);
784         }
785         VPC->insertBefore(&R);
786         Trunc->replaceAllUsesWith(VPC);
787       } else if (ATy->getScalarSizeInBits() > TruncTy->getScalarSizeInBits()) {
788         auto *VPC = new VPWidenCastRecipe(Instruction::Trunc, A, TruncTy);
789         VPC->insertBefore(&R);
790         Trunc->replaceAllUsesWith(VPC);
791       }
792     }
793 #ifndef NDEBUG
794     // Verify that the cached type info is for both A and its users is still
795     // accurate by comparing it to freshly computed types.
796     VPTypeAnalysis TypeInfo2(
797         R.getParent()->getPlan()->getCanonicalIV()->getScalarType());
798     assert(TypeInfo.inferScalarType(A) == TypeInfo2.inferScalarType(A));
799     for (VPUser *U : A->users()) {
800       auto *R = cast<VPRecipeBase>(U);
801       for (VPValue *VPV : R->definedValues())
802         assert(TypeInfo.inferScalarType(VPV) == TypeInfo2.inferScalarType(VPV));
803     }
804 #endif
805   }
806 
807   // Simplify (X && Y) || (X && !Y) -> X.
808   // TODO: Split up into simpler, modular combines: (X && Y) || (X && Z) into X
809   // && (Y || Z) and (X || !X) into true. This requires queuing newly created
810   // recipes to be visited during simplification.
811   VPValue *X, *Y, *X1, *Y1;
812   if (match(&R,
813             m_c_BinaryOr(m_LogicalAnd(m_VPValue(X), m_VPValue(Y)),
814                          m_LogicalAnd(m_VPValue(X1), m_Not(m_VPValue(Y1))))) &&
815       X == X1 && Y == Y1) {
816     R.getVPSingleValue()->replaceAllUsesWith(X);
817     R.eraseFromParent();
818     return;
819   }
820 
821   if (match(&R, m_c_Mul(m_VPValue(A), m_SpecificInt(1))))
822     return R.getVPSingleValue()->replaceAllUsesWith(A);
823 
824   if (match(&R, m_Not(m_Not(m_VPValue(A)))))
825     return R.getVPSingleValue()->replaceAllUsesWith(A);
826 
827   // Remove redundant DerviedIVs, that is 0 + A * 1 -> A and 0 + 0 * x -> 0.
828   if ((match(&R,
829              m_DerivedIV(m_SpecificInt(0), m_VPValue(A), m_SpecificInt(1))) ||
830        match(&R,
831              m_DerivedIV(m_SpecificInt(0), m_SpecificInt(0), m_VPValue()))) &&
832       TypeInfo.inferScalarType(R.getOperand(1)) ==
833           TypeInfo.inferScalarType(R.getVPSingleValue()))
834     return R.getVPSingleValue()->replaceAllUsesWith(R.getOperand(1));
835 }
836 
837 /// Try to simplify the recipes in \p Plan. Use \p CanonicalIVTy as type for all
838 /// un-typed live-ins in VPTypeAnalysis.
839 static void simplifyRecipes(VPlan &Plan, Type *CanonicalIVTy) {
840   ReversePostOrderTraversal<VPBlockDeepTraversalWrapper<VPBlockBase *>> RPOT(
841       Plan.getEntry());
842   VPTypeAnalysis TypeInfo(CanonicalIVTy);
843   for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(RPOT)) {
844     for (VPRecipeBase &R : make_early_inc_range(*VPBB)) {
845       simplifyRecipe(R, TypeInfo);
846     }
847   }
848 }
849 
850 void VPlanTransforms::optimizeForVFAndUF(VPlan &Plan, ElementCount BestVF,
851                                          unsigned BestUF,
852                                          PredicatedScalarEvolution &PSE) {
853   assert(Plan.hasVF(BestVF) && "BestVF is not available in Plan");
854   assert(Plan.hasUF(BestUF) && "BestUF is not available in Plan");
855   VPRegionBlock *VectorRegion = Plan.getVectorLoopRegion();
856   VPBasicBlock *ExitingVPBB = VectorRegion->getExitingBasicBlock();
857   auto *Term = &ExitingVPBB->back();
858   // Try to simplify the branch condition if TC <= VF * UF when preparing to
859   // execute the plan for the main vector loop. We only do this if the
860   // terminator is:
861   //  1. BranchOnCount, or
862   //  2. BranchOnCond where the input is Not(ActiveLaneMask).
863   using namespace llvm::VPlanPatternMatch;
864   if (!match(Term, m_BranchOnCount(m_VPValue(), m_VPValue())) &&
865       !match(Term,
866              m_BranchOnCond(m_Not(m_ActiveLaneMask(m_VPValue(), m_VPValue())))))
867     return;
868 
869   ScalarEvolution &SE = *PSE.getSE();
870   const SCEV *TripCount =
871       vputils::getSCEVExprForVPValue(Plan.getTripCount(), SE);
872   assert(!isa<SCEVCouldNotCompute>(TripCount) &&
873          "Trip count SCEV must be computable");
874   ElementCount NumElements = BestVF.multiplyCoefficientBy(BestUF);
875   const SCEV *C = SE.getElementCount(TripCount->getType(), NumElements);
876   if (TripCount->isZero() ||
877       !SE.isKnownPredicate(CmpInst::ICMP_ULE, TripCount, C))
878     return;
879 
880   // The vector loop region only executes once. If possible, completely remove
881   // the region, otherwise replace the terminator controlling the latch with
882   // (BranchOnCond true).
883   auto *Header = cast<VPBasicBlock>(VectorRegion->getEntry());
884   auto *CanIVTy = Plan.getCanonicalIV()->getScalarType();
885   if (all_of(
886           Header->phis(),
887           IsaPred<VPCanonicalIVPHIRecipe, VPFirstOrderRecurrencePHIRecipe>)) {
888     for (VPRecipeBase &HeaderR : make_early_inc_range(Header->phis())) {
889       auto *HeaderPhiR = cast<VPHeaderPHIRecipe>(&HeaderR);
890       HeaderPhiR->replaceAllUsesWith(HeaderPhiR->getStartValue());
891       HeaderPhiR->eraseFromParent();
892     }
893 
894     VPBlockBase *Preheader = VectorRegion->getSinglePredecessor();
895     VPBlockBase *Exit = VectorRegion->getSingleSuccessor();
896     VPBlockUtils::disconnectBlocks(Preheader, VectorRegion);
897     VPBlockUtils::disconnectBlocks(VectorRegion, Exit);
898 
899     for (VPBlockBase *B : vp_depth_first_shallow(VectorRegion->getEntry()))
900       B->setParent(nullptr);
901 
902     VPBlockUtils::connectBlocks(Preheader, Header);
903     VPBlockUtils::connectBlocks(ExitingVPBB, Exit);
904     simplifyRecipes(Plan, CanIVTy);
905   } else {
906     // The vector region contains header phis for which we cannot remove the
907     // loop region yet.
908     LLVMContext &Ctx = SE.getContext();
909     auto *BOC = new VPInstruction(
910         VPInstruction::BranchOnCond,
911         {Plan.getOrAddLiveIn(ConstantInt::getTrue(Ctx))}, Term->getDebugLoc());
912     ExitingVPBB->appendRecipe(BOC);
913   }
914 
915   Term->eraseFromParent();
916   VPlanTransforms::removeDeadRecipes(Plan);
917 
918   Plan.setVF(BestVF);
919   Plan.setUF(BestUF);
920   // TODO: Further simplifications are possible
921   //      1. Replace inductions with constants.
922   //      2. Replace vector loop region with VPBasicBlock.
923 }
924 
925 /// Sink users of \p FOR after the recipe defining the previous value \p
926 /// Previous of the recurrence. \returns true if all users of \p FOR could be
927 /// re-arranged as needed or false if it is not possible.
928 static bool
929 sinkRecurrenceUsersAfterPrevious(VPFirstOrderRecurrencePHIRecipe *FOR,
930                                  VPRecipeBase *Previous,
931                                  VPDominatorTree &VPDT) {
932   // Collect recipes that need sinking.
933   SmallVector<VPRecipeBase *> WorkList;
934   SmallPtrSet<VPRecipeBase *, 8> Seen;
935   Seen.insert(Previous);
936   auto TryToPushSinkCandidate = [&](VPRecipeBase *SinkCandidate) {
937     // The previous value must not depend on the users of the recurrence phi. In
938     // that case, FOR is not a fixed order recurrence.
939     if (SinkCandidate == Previous)
940       return false;
941 
942     if (isa<VPHeaderPHIRecipe>(SinkCandidate) ||
943         !Seen.insert(SinkCandidate).second ||
944         VPDT.properlyDominates(Previous, SinkCandidate))
945       return true;
946 
947     if (SinkCandidate->mayHaveSideEffects())
948       return false;
949 
950     WorkList.push_back(SinkCandidate);
951     return true;
952   };
953 
954   // Recursively sink users of FOR after Previous.
955   WorkList.push_back(FOR);
956   for (unsigned I = 0; I != WorkList.size(); ++I) {
957     VPRecipeBase *Current = WorkList[I];
958     assert(Current->getNumDefinedValues() == 1 &&
959            "only recipes with a single defined value expected");
960 
961     for (VPUser *User : Current->getVPSingleValue()->users()) {
962       if (!TryToPushSinkCandidate(cast<VPRecipeBase>(User)))
963         return false;
964     }
965   }
966 
967   // Keep recipes to sink ordered by dominance so earlier instructions are
968   // processed first.
969   sort(WorkList, [&VPDT](const VPRecipeBase *A, const VPRecipeBase *B) {
970     return VPDT.properlyDominates(A, B);
971   });
972 
973   for (VPRecipeBase *SinkCandidate : WorkList) {
974     if (SinkCandidate == FOR)
975       continue;
976 
977     SinkCandidate->moveAfter(Previous);
978     Previous = SinkCandidate;
979   }
980   return true;
981 }
982 
983 /// Try to hoist \p Previous and its operands before all users of \p FOR.
984 static bool hoistPreviousBeforeFORUsers(VPFirstOrderRecurrencePHIRecipe *FOR,
985                                         VPRecipeBase *Previous,
986                                         VPDominatorTree &VPDT) {
987   if (Previous->mayHaveSideEffects() || Previous->mayReadFromMemory())
988     return false;
989 
990   // Collect recipes that need hoisting.
991   SmallVector<VPRecipeBase *> HoistCandidates;
992   SmallPtrSet<VPRecipeBase *, 8> Visited;
993   VPRecipeBase *HoistPoint = nullptr;
994   // Find the closest hoist point by looking at all users of FOR and selecting
995   // the recipe dominating all other users.
996   for (VPUser *U : FOR->users()) {
997     auto *R = cast<VPRecipeBase>(U);
998     if (!HoistPoint || VPDT.properlyDominates(R, HoistPoint))
999       HoistPoint = R;
1000   }
1001   assert(all_of(FOR->users(),
1002                 [&VPDT, HoistPoint](VPUser *U) {
1003                   auto *R = cast<VPRecipeBase>(U);
1004                   return HoistPoint == R ||
1005                          VPDT.properlyDominates(HoistPoint, R);
1006                 }) &&
1007          "HoistPoint must dominate all users of FOR");
1008 
1009   auto NeedsHoisting = [HoistPoint, &VPDT,
1010                         &Visited](VPValue *HoistCandidateV) -> VPRecipeBase * {
1011     VPRecipeBase *HoistCandidate = HoistCandidateV->getDefiningRecipe();
1012     if (!HoistCandidate)
1013       return nullptr;
1014     VPRegionBlock *EnclosingLoopRegion =
1015         HoistCandidate->getParent()->getEnclosingLoopRegion();
1016     assert((!HoistCandidate->getParent()->getParent() ||
1017             HoistCandidate->getParent()->getParent() == EnclosingLoopRegion) &&
1018            "CFG in VPlan should still be flat, without replicate regions");
1019     // Hoist candidate was already visited, no need to hoist.
1020     if (!Visited.insert(HoistCandidate).second)
1021       return nullptr;
1022 
1023     // Candidate is outside loop region or a header phi, dominates FOR users w/o
1024     // hoisting.
1025     if (!EnclosingLoopRegion || isa<VPHeaderPHIRecipe>(HoistCandidate))
1026       return nullptr;
1027 
1028     // If we reached a recipe that dominates HoistPoint, we don't need to
1029     // hoist the recipe.
1030     if (VPDT.properlyDominates(HoistCandidate, HoistPoint))
1031       return nullptr;
1032     return HoistCandidate;
1033   };
1034   auto CanHoist = [&](VPRecipeBase *HoistCandidate) {
1035     // Avoid hoisting candidates with side-effects, as we do not yet analyze
1036     // associated dependencies.
1037     return !HoistCandidate->mayHaveSideEffects();
1038   };
1039 
1040   if (!NeedsHoisting(Previous->getVPSingleValue()))
1041     return true;
1042 
1043   // Recursively try to hoist Previous and its operands before all users of FOR.
1044   HoistCandidates.push_back(Previous);
1045 
1046   for (unsigned I = 0; I != HoistCandidates.size(); ++I) {
1047     VPRecipeBase *Current = HoistCandidates[I];
1048     assert(Current->getNumDefinedValues() == 1 &&
1049            "only recipes with a single defined value expected");
1050     if (!CanHoist(Current))
1051       return false;
1052 
1053     for (VPValue *Op : Current->operands()) {
1054       // If we reach FOR, it means the original Previous depends on some other
1055       // recurrence that in turn depends on FOR. If that is the case, we would
1056       // also need to hoist recipes involving the other FOR, which may break
1057       // dependencies.
1058       if (Op == FOR)
1059         return false;
1060 
1061       if (auto *R = NeedsHoisting(Op))
1062         HoistCandidates.push_back(R);
1063     }
1064   }
1065 
1066   // Order recipes to hoist by dominance so earlier instructions are processed
1067   // first.
1068   sort(HoistCandidates, [&VPDT](const VPRecipeBase *A, const VPRecipeBase *B) {
1069     return VPDT.properlyDominates(A, B);
1070   });
1071 
1072   for (VPRecipeBase *HoistCandidate : HoistCandidates) {
1073     HoistCandidate->moveBefore(*HoistPoint->getParent(),
1074                                HoistPoint->getIterator());
1075   }
1076 
1077   return true;
1078 }
1079 
1080 bool VPlanTransforms::adjustFixedOrderRecurrences(VPlan &Plan,
1081                                                   VPBuilder &LoopBuilder) {
1082   VPDominatorTree VPDT;
1083   VPDT.recalculate(Plan);
1084 
1085   SmallVector<VPFirstOrderRecurrencePHIRecipe *> RecurrencePhis;
1086   for (VPRecipeBase &R :
1087        Plan.getVectorLoopRegion()->getEntry()->getEntryBasicBlock()->phis())
1088     if (auto *FOR = dyn_cast<VPFirstOrderRecurrencePHIRecipe>(&R))
1089       RecurrencePhis.push_back(FOR);
1090 
1091   for (VPFirstOrderRecurrencePHIRecipe *FOR : RecurrencePhis) {
1092     SmallPtrSet<VPFirstOrderRecurrencePHIRecipe *, 4> SeenPhis;
1093     VPRecipeBase *Previous = FOR->getBackedgeValue()->getDefiningRecipe();
1094     // Fixed-order recurrences do not contain cycles, so this loop is guaranteed
1095     // to terminate.
1096     while (auto *PrevPhi =
1097                dyn_cast_or_null<VPFirstOrderRecurrencePHIRecipe>(Previous)) {
1098       assert(PrevPhi->getParent() == FOR->getParent());
1099       assert(SeenPhis.insert(PrevPhi).second);
1100       Previous = PrevPhi->getBackedgeValue()->getDefiningRecipe();
1101     }
1102 
1103     if (!sinkRecurrenceUsersAfterPrevious(FOR, Previous, VPDT) &&
1104         !hoistPreviousBeforeFORUsers(FOR, Previous, VPDT))
1105       return false;
1106 
1107     // Introduce a recipe to combine the incoming and previous values of a
1108     // fixed-order recurrence.
1109     VPBasicBlock *InsertBlock = Previous->getParent();
1110     if (isa<VPHeaderPHIRecipe>(Previous))
1111       LoopBuilder.setInsertPoint(InsertBlock, InsertBlock->getFirstNonPhi());
1112     else
1113       LoopBuilder.setInsertPoint(InsertBlock,
1114                                  std::next(Previous->getIterator()));
1115 
1116     auto *RecurSplice = cast<VPInstruction>(
1117         LoopBuilder.createNaryOp(VPInstruction::FirstOrderRecurrenceSplice,
1118                                  {FOR, FOR->getBackedgeValue()}));
1119 
1120     FOR->replaceAllUsesWith(RecurSplice);
1121     // Set the first operand of RecurSplice to FOR again, after replacing
1122     // all users.
1123     RecurSplice->setOperand(0, FOR);
1124   }
1125   return true;
1126 }
1127 
1128 void VPlanTransforms::clearReductionWrapFlags(VPlan &Plan) {
1129   for (VPRecipeBase &R :
1130        Plan.getVectorLoopRegion()->getEntryBasicBlock()->phis()) {
1131     auto *PhiR = dyn_cast<VPReductionPHIRecipe>(&R);
1132     if (!PhiR)
1133       continue;
1134     const RecurrenceDescriptor &RdxDesc = PhiR->getRecurrenceDescriptor();
1135     RecurKind RK = RdxDesc.getRecurrenceKind();
1136     if (RK != RecurKind::Add && RK != RecurKind::Mul)
1137       continue;
1138 
1139     for (VPUser *U : collectUsersRecursively(PhiR))
1140       if (auto *RecWithFlags = dyn_cast<VPRecipeWithIRFlags>(U)) {
1141         RecWithFlags->dropPoisonGeneratingFlags();
1142       }
1143   }
1144 }
1145 
1146 /// Move loop-invariant recipes out of the vector loop region in \p Plan.
1147 static void licm(VPlan &Plan) {
1148   VPBasicBlock *Preheader = Plan.getVectorPreheader();
1149 
1150   // Return true if we do not know how to (mechanically) hoist a given recipe
1151   // out of a loop region. Does not address legality concerns such as aliasing
1152   // or speculation safety.
1153   auto CannotHoistRecipe = [](VPRecipeBase &R) {
1154     // Allocas cannot be hoisted.
1155     auto *RepR = dyn_cast<VPReplicateRecipe>(&R);
1156     return RepR && RepR->getOpcode() == Instruction::Alloca;
1157   };
1158 
1159   // Hoist any loop invariant recipes from the vector loop region to the
1160   // preheader. Preform a shallow traversal of the vector loop region, to
1161   // exclude recipes in replicate regions.
1162   VPRegionBlock *LoopRegion = Plan.getVectorLoopRegion();
1163   for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(
1164            vp_depth_first_shallow(LoopRegion->getEntry()))) {
1165     for (VPRecipeBase &R : make_early_inc_range(*VPBB)) {
1166       if (CannotHoistRecipe(R))
1167         continue;
1168       // TODO: Relax checks in the future, e.g. we could also hoist reads, if
1169       // their memory location is not modified in the vector loop.
1170       if (R.mayHaveSideEffects() || R.mayReadFromMemory() || R.isPhi() ||
1171           any_of(R.operands(), [](VPValue *Op) {
1172             return !Op->isDefinedOutsideLoopRegions();
1173           }))
1174         continue;
1175       R.moveBefore(*Preheader, Preheader->end());
1176     }
1177   }
1178 }
1179 
1180 void VPlanTransforms::truncateToMinimalBitwidths(
1181     VPlan &Plan, const MapVector<Instruction *, uint64_t> &MinBWs) {
1182 #ifndef NDEBUG
1183   // Count the processed recipes and cross check the count later with MinBWs
1184   // size, to make sure all entries in MinBWs have been handled.
1185   unsigned NumProcessedRecipes = 0;
1186 #endif
1187   // Keep track of created truncates, so they can be re-used. Note that we
1188   // cannot use RAUW after creating a new truncate, as this would could make
1189   // other uses have different types for their operands, making them invalidly
1190   // typed.
1191   DenseMap<VPValue *, VPWidenCastRecipe *> ProcessedTruncs;
1192   Type *CanonicalIVType = Plan.getCanonicalIV()->getScalarType();
1193   VPTypeAnalysis TypeInfo(CanonicalIVType);
1194   VPBasicBlock *PH = Plan.getVectorPreheader();
1195   for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(
1196            vp_depth_first_deep(Plan.getVectorLoopRegion()))) {
1197     for (VPRecipeBase &R : make_early_inc_range(*VPBB)) {
1198       if (!isa<VPWidenRecipe, VPWidenCastRecipe, VPReplicateRecipe,
1199                VPWidenSelectRecipe, VPWidenLoadRecipe>(&R))
1200         continue;
1201 
1202       VPValue *ResultVPV = R.getVPSingleValue();
1203       auto *UI = cast_or_null<Instruction>(ResultVPV->getUnderlyingValue());
1204       unsigned NewResSizeInBits = MinBWs.lookup(UI);
1205       if (!NewResSizeInBits)
1206         continue;
1207 
1208 #ifndef NDEBUG
1209       NumProcessedRecipes++;
1210 #endif
1211       // If the value wasn't vectorized, we must maintain the original scalar
1212       // type. Skip those here, after incrementing NumProcessedRecipes. Also
1213       // skip casts which do not need to be handled explicitly here, as
1214       // redundant casts will be removed during recipe simplification.
1215       if (isa<VPReplicateRecipe, VPWidenCastRecipe>(&R)) {
1216 #ifndef NDEBUG
1217         // If any of the operands is a live-in and not used by VPWidenRecipe or
1218         // VPWidenSelectRecipe, but in MinBWs, make sure it is counted as
1219         // processed as well. When MinBWs is currently constructed, there is no
1220         // information about whether recipes are widened or replicated and in
1221         // case they are reciplicated the operands are not truncated. Counting
1222         // them them here ensures we do not miss any recipes in MinBWs.
1223         // TODO: Remove once the analysis is done on VPlan.
1224         for (VPValue *Op : R.operands()) {
1225           if (!Op->isLiveIn())
1226             continue;
1227           auto *UV = dyn_cast_or_null<Instruction>(Op->getUnderlyingValue());
1228           if (UV && MinBWs.contains(UV) && !ProcessedTruncs.contains(Op) &&
1229               none_of(Op->users(),
1230                       IsaPred<VPWidenRecipe, VPWidenSelectRecipe>)) {
1231             // Add an entry to ProcessedTruncs to avoid counting the same
1232             // operand multiple times.
1233             ProcessedTruncs[Op] = nullptr;
1234             NumProcessedRecipes += 1;
1235           }
1236         }
1237 #endif
1238         continue;
1239       }
1240 
1241       Type *OldResTy = TypeInfo.inferScalarType(ResultVPV);
1242       unsigned OldResSizeInBits = OldResTy->getScalarSizeInBits();
1243       assert(OldResTy->isIntegerTy() && "only integer types supported");
1244       (void)OldResSizeInBits;
1245 
1246       LLVMContext &Ctx = CanonicalIVType->getContext();
1247       auto *NewResTy = IntegerType::get(Ctx, NewResSizeInBits);
1248 
1249       // Any wrapping introduced by shrinking this operation shouldn't be
1250       // considered undefined behavior. So, we can't unconditionally copy
1251       // arithmetic wrapping flags to VPW.
1252       if (auto *VPW = dyn_cast<VPRecipeWithIRFlags>(&R))
1253         VPW->dropPoisonGeneratingFlags();
1254 
1255       using namespace llvm::VPlanPatternMatch;
1256       if (OldResSizeInBits != NewResSizeInBits &&
1257           !match(&R, m_Binary<Instruction::ICmp>(m_VPValue(), m_VPValue()))) {
1258         // Extend result to original width.
1259         auto *Ext =
1260             new VPWidenCastRecipe(Instruction::ZExt, ResultVPV, OldResTy);
1261         Ext->insertAfter(&R);
1262         ResultVPV->replaceAllUsesWith(Ext);
1263         Ext->setOperand(0, ResultVPV);
1264         assert(OldResSizeInBits > NewResSizeInBits && "Nothing to shrink?");
1265       } else {
1266         assert(
1267             match(&R, m_Binary<Instruction::ICmp>(m_VPValue(), m_VPValue())) &&
1268             "Only ICmps should not need extending the result.");
1269       }
1270 
1271       assert(!isa<VPWidenStoreRecipe>(&R) && "stores cannot be narrowed");
1272       if (isa<VPWidenLoadRecipe>(&R))
1273         continue;
1274 
1275       // Shrink operands by introducing truncates as needed.
1276       unsigned StartIdx = isa<VPWidenSelectRecipe>(&R) ? 1 : 0;
1277       for (unsigned Idx = StartIdx; Idx != R.getNumOperands(); ++Idx) {
1278         auto *Op = R.getOperand(Idx);
1279         unsigned OpSizeInBits =
1280             TypeInfo.inferScalarType(Op)->getScalarSizeInBits();
1281         if (OpSizeInBits == NewResSizeInBits)
1282           continue;
1283         assert(OpSizeInBits > NewResSizeInBits && "nothing to truncate");
1284         auto [ProcessedIter, IterIsEmpty] =
1285             ProcessedTruncs.insert({Op, nullptr});
1286         VPWidenCastRecipe *NewOp =
1287             IterIsEmpty
1288                 ? new VPWidenCastRecipe(Instruction::Trunc, Op, NewResTy)
1289                 : ProcessedIter->second;
1290         R.setOperand(Idx, NewOp);
1291         if (!IterIsEmpty)
1292           continue;
1293         ProcessedIter->second = NewOp;
1294         if (!Op->isLiveIn()) {
1295           NewOp->insertBefore(&R);
1296         } else {
1297           PH->appendRecipe(NewOp);
1298 #ifndef NDEBUG
1299           auto *OpInst = dyn_cast<Instruction>(Op->getLiveInIRValue());
1300           bool IsContained = MinBWs.contains(OpInst);
1301           NumProcessedRecipes += IsContained;
1302 #endif
1303         }
1304       }
1305 
1306     }
1307   }
1308 
1309   assert(MinBWs.size() == NumProcessedRecipes &&
1310          "some entries in MinBWs haven't been processed");
1311 }
1312 
1313 void VPlanTransforms::optimize(VPlan &Plan) {
1314   removeRedundantCanonicalIVs(Plan);
1315   removeRedundantInductionCasts(Plan);
1316 
1317   simplifyRecipes(Plan, Plan.getCanonicalIV()->getScalarType());
1318   legalizeAndOptimizeInductions(Plan);
1319   removeRedundantExpandSCEVRecipes(Plan);
1320   simplifyRecipes(Plan, Plan.getCanonicalIV()->getScalarType());
1321   removeDeadRecipes(Plan);
1322 
1323   createAndOptimizeReplicateRegions(Plan);
1324   mergeBlocksIntoPredecessors(Plan);
1325   licm(Plan);
1326 }
1327 
1328 // Add a VPActiveLaneMaskPHIRecipe and related recipes to \p Plan and replace
1329 // the loop terminator with a branch-on-cond recipe with the negated
1330 // active-lane-mask as operand. Note that this turns the loop into an
1331 // uncountable one. Only the existing terminator is replaced, all other existing
1332 // recipes/users remain unchanged, except for poison-generating flags being
1333 // dropped from the canonical IV increment. Return the created
1334 // VPActiveLaneMaskPHIRecipe.
1335 //
1336 // The function uses the following definitions:
1337 //
1338 //  %TripCount = DataWithControlFlowWithoutRuntimeCheck ?
1339 //    calculate-trip-count-minus-VF (original TC) : original TC
1340 //  %IncrementValue = DataWithControlFlowWithoutRuntimeCheck ?
1341 //     CanonicalIVPhi : CanonicalIVIncrement
1342 //  %StartV is the canonical induction start value.
1343 //
1344 // The function adds the following recipes:
1345 //
1346 // vector.ph:
1347 //   %TripCount = calculate-trip-count-minus-VF (original TC)
1348 //       [if DataWithControlFlowWithoutRuntimeCheck]
1349 //   %EntryInc = canonical-iv-increment-for-part %StartV
1350 //   %EntryALM = active-lane-mask %EntryInc, %TripCount
1351 //
1352 // vector.body:
1353 //   ...
1354 //   %P = active-lane-mask-phi [ %EntryALM, %vector.ph ], [ %ALM, %vector.body ]
1355 //   ...
1356 //   %InLoopInc = canonical-iv-increment-for-part %IncrementValue
1357 //   %ALM = active-lane-mask %InLoopInc, TripCount
1358 //   %Negated = Not %ALM
1359 //   branch-on-cond %Negated
1360 //
1361 static VPActiveLaneMaskPHIRecipe *addVPLaneMaskPhiAndUpdateExitBranch(
1362     VPlan &Plan, bool DataAndControlFlowWithoutRuntimeCheck) {
1363   VPRegionBlock *TopRegion = Plan.getVectorLoopRegion();
1364   VPBasicBlock *EB = TopRegion->getExitingBasicBlock();
1365   auto *CanonicalIVPHI = Plan.getCanonicalIV();
1366   VPValue *StartV = CanonicalIVPHI->getStartValue();
1367 
1368   auto *CanonicalIVIncrement =
1369       cast<VPInstruction>(CanonicalIVPHI->getBackedgeValue());
1370   // TODO: Check if dropping the flags is needed if
1371   // !DataAndControlFlowWithoutRuntimeCheck.
1372   CanonicalIVIncrement->dropPoisonGeneratingFlags();
1373   DebugLoc DL = CanonicalIVIncrement->getDebugLoc();
1374   // We can't use StartV directly in the ActiveLaneMask VPInstruction, since
1375   // we have to take unrolling into account. Each part needs to start at
1376   //   Part * VF
1377   auto *VecPreheader = Plan.getVectorPreheader();
1378   VPBuilder Builder(VecPreheader);
1379 
1380   // Create the ActiveLaneMask instruction using the correct start values.
1381   VPValue *TC = Plan.getTripCount();
1382 
1383   VPValue *TripCount, *IncrementValue;
1384   if (!DataAndControlFlowWithoutRuntimeCheck) {
1385     // When the loop is guarded by a runtime overflow check for the loop
1386     // induction variable increment by VF, we can increment the value before
1387     // the get.active.lane mask and use the unmodified tripcount.
1388     IncrementValue = CanonicalIVIncrement;
1389     TripCount = TC;
1390   } else {
1391     // When avoiding a runtime check, the active.lane.mask inside the loop
1392     // uses a modified trip count and the induction variable increment is
1393     // done after the active.lane.mask intrinsic is called.
1394     IncrementValue = CanonicalIVPHI;
1395     TripCount = Builder.createNaryOp(VPInstruction::CalculateTripCountMinusVF,
1396                                      {TC}, DL);
1397   }
1398   auto *EntryIncrement = Builder.createOverflowingOp(
1399       VPInstruction::CanonicalIVIncrementForPart, {StartV}, {false, false}, DL,
1400       "index.part.next");
1401 
1402   // Create the active lane mask instruction in the VPlan preheader.
1403   auto *EntryALM =
1404       Builder.createNaryOp(VPInstruction::ActiveLaneMask, {EntryIncrement, TC},
1405                            DL, "active.lane.mask.entry");
1406 
1407   // Now create the ActiveLaneMaskPhi recipe in the main loop using the
1408   // preheader ActiveLaneMask instruction.
1409   auto *LaneMaskPhi = new VPActiveLaneMaskPHIRecipe(EntryALM, DebugLoc());
1410   LaneMaskPhi->insertAfter(CanonicalIVPHI);
1411 
1412   // Create the active lane mask for the next iteration of the loop before the
1413   // original terminator.
1414   VPRecipeBase *OriginalTerminator = EB->getTerminator();
1415   Builder.setInsertPoint(OriginalTerminator);
1416   auto *InLoopIncrement =
1417       Builder.createOverflowingOp(VPInstruction::CanonicalIVIncrementForPart,
1418                                   {IncrementValue}, {false, false}, DL);
1419   auto *ALM = Builder.createNaryOp(VPInstruction::ActiveLaneMask,
1420                                    {InLoopIncrement, TripCount}, DL,
1421                                    "active.lane.mask.next");
1422   LaneMaskPhi->addOperand(ALM);
1423 
1424   // Replace the original terminator with BranchOnCond. We have to invert the
1425   // mask here because a true condition means jumping to the exit block.
1426   auto *NotMask = Builder.createNot(ALM, DL);
1427   Builder.createNaryOp(VPInstruction::BranchOnCond, {NotMask}, DL);
1428   OriginalTerminator->eraseFromParent();
1429   return LaneMaskPhi;
1430 }
1431 
1432 /// Collect all VPValues representing a header mask through the (ICMP_ULE,
1433 /// WideCanonicalIV, backedge-taken-count) pattern.
1434 /// TODO: Introduce explicit recipe for header-mask instead of searching
1435 /// for the header-mask pattern manually.
1436 static SmallVector<VPValue *> collectAllHeaderMasks(VPlan &Plan) {
1437   SmallVector<VPValue *> WideCanonicalIVs;
1438   auto *FoundWidenCanonicalIVUser =
1439       find_if(Plan.getCanonicalIV()->users(),
1440               [](VPUser *U) { return isa<VPWidenCanonicalIVRecipe>(U); });
1441   assert(count_if(Plan.getCanonicalIV()->users(),
1442                   [](VPUser *U) { return isa<VPWidenCanonicalIVRecipe>(U); }) <=
1443              1 &&
1444          "Must have at most one VPWideCanonicalIVRecipe");
1445   if (FoundWidenCanonicalIVUser != Plan.getCanonicalIV()->users().end()) {
1446     auto *WideCanonicalIV =
1447         cast<VPWidenCanonicalIVRecipe>(*FoundWidenCanonicalIVUser);
1448     WideCanonicalIVs.push_back(WideCanonicalIV);
1449   }
1450 
1451   // Also include VPWidenIntOrFpInductionRecipes that represent a widened
1452   // version of the canonical induction.
1453   VPBasicBlock *HeaderVPBB = Plan.getVectorLoopRegion()->getEntryBasicBlock();
1454   for (VPRecipeBase &Phi : HeaderVPBB->phis()) {
1455     auto *WidenOriginalIV = dyn_cast<VPWidenIntOrFpInductionRecipe>(&Phi);
1456     if (WidenOriginalIV && WidenOriginalIV->isCanonical())
1457       WideCanonicalIVs.push_back(WidenOriginalIV);
1458   }
1459 
1460   // Walk users of wide canonical IVs and collect to all compares of the form
1461   // (ICMP_ULE, WideCanonicalIV, backedge-taken-count).
1462   SmallVector<VPValue *> HeaderMasks;
1463   for (auto *Wide : WideCanonicalIVs) {
1464     for (VPUser *U : SmallVector<VPUser *>(Wide->users())) {
1465       auto *HeaderMask = dyn_cast<VPInstruction>(U);
1466       if (!HeaderMask || !vputils::isHeaderMask(HeaderMask, Plan))
1467         continue;
1468 
1469       assert(HeaderMask->getOperand(0) == Wide &&
1470              "WidenCanonicalIV must be the first operand of the compare");
1471       HeaderMasks.push_back(HeaderMask);
1472     }
1473   }
1474   return HeaderMasks;
1475 }
1476 
1477 void VPlanTransforms::addActiveLaneMask(
1478     VPlan &Plan, bool UseActiveLaneMaskForControlFlow,
1479     bool DataAndControlFlowWithoutRuntimeCheck) {
1480   assert((!DataAndControlFlowWithoutRuntimeCheck ||
1481           UseActiveLaneMaskForControlFlow) &&
1482          "DataAndControlFlowWithoutRuntimeCheck implies "
1483          "UseActiveLaneMaskForControlFlow");
1484 
1485   auto *FoundWidenCanonicalIVUser =
1486       find_if(Plan.getCanonicalIV()->users(),
1487               [](VPUser *U) { return isa<VPWidenCanonicalIVRecipe>(U); });
1488   assert(FoundWidenCanonicalIVUser &&
1489          "Must have widened canonical IV when tail folding!");
1490   auto *WideCanonicalIV =
1491       cast<VPWidenCanonicalIVRecipe>(*FoundWidenCanonicalIVUser);
1492   VPSingleDefRecipe *LaneMask;
1493   if (UseActiveLaneMaskForControlFlow) {
1494     LaneMask = addVPLaneMaskPhiAndUpdateExitBranch(
1495         Plan, DataAndControlFlowWithoutRuntimeCheck);
1496   } else {
1497     VPBuilder B = VPBuilder::getToInsertAfter(WideCanonicalIV);
1498     LaneMask = B.createNaryOp(VPInstruction::ActiveLaneMask,
1499                               {WideCanonicalIV, Plan.getTripCount()}, nullptr,
1500                               "active.lane.mask");
1501   }
1502 
1503   // Walk users of WideCanonicalIV and replace all compares of the form
1504   // (ICMP_ULE, WideCanonicalIV, backedge-taken-count) with an
1505   // active-lane-mask.
1506   for (VPValue *HeaderMask : collectAllHeaderMasks(Plan))
1507     HeaderMask->replaceAllUsesWith(LaneMask);
1508 }
1509 
1510 /// Try to convert \p CurRecipe to a corresponding EVL-based recipe. Returns
1511 /// nullptr if no EVL-based recipe could be created.
1512 /// \p HeaderMask  Header Mask.
1513 /// \p CurRecipe   Recipe to be transform.
1514 /// \p TypeInfo    VPlan-based type analysis.
1515 /// \p AllOneMask  The vector mask parameter of vector-predication intrinsics.
1516 /// \p EVL         The explicit vector length parameter of vector-predication
1517 /// intrinsics.
1518 static VPRecipeBase *createEVLRecipe(VPValue *HeaderMask,
1519                                      VPRecipeBase &CurRecipe,
1520                                      VPTypeAnalysis &TypeInfo,
1521                                      VPValue &AllOneMask, VPValue &EVL) {
1522   using namespace llvm::VPlanPatternMatch;
1523   auto GetNewMask = [&](VPValue *OrigMask) -> VPValue * {
1524     assert(OrigMask && "Unmasked recipe when folding tail");
1525     return HeaderMask == OrigMask ? nullptr : OrigMask;
1526   };
1527 
1528   return TypeSwitch<VPRecipeBase *, VPRecipeBase *>(&CurRecipe)
1529       .Case<VPWidenLoadRecipe>([&](VPWidenLoadRecipe *L) {
1530         VPValue *NewMask = GetNewMask(L->getMask());
1531         return new VPWidenLoadEVLRecipe(*L, EVL, NewMask);
1532       })
1533       .Case<VPWidenStoreRecipe>([&](VPWidenStoreRecipe *S) {
1534         VPValue *NewMask = GetNewMask(S->getMask());
1535         return new VPWidenStoreEVLRecipe(*S, EVL, NewMask);
1536       })
1537       .Case<VPWidenRecipe>([&](VPWidenRecipe *W) -> VPRecipeBase * {
1538         unsigned Opcode = W->getOpcode();
1539         if (!Instruction::isBinaryOp(Opcode) && !Instruction::isUnaryOp(Opcode))
1540           return nullptr;
1541         return new VPWidenEVLRecipe(*W, EVL);
1542       })
1543       .Case<VPReductionRecipe>([&](VPReductionRecipe *Red) {
1544         VPValue *NewMask = GetNewMask(Red->getCondOp());
1545         return new VPReductionEVLRecipe(*Red, EVL, NewMask);
1546       })
1547       .Case<VPWidenIntrinsicRecipe, VPWidenCastRecipe>(
1548           [&](auto *CR) -> VPRecipeBase * {
1549             Intrinsic::ID VPID;
1550             if (auto *CallR = dyn_cast<VPWidenIntrinsicRecipe>(CR)) {
1551               VPID =
1552                   VPIntrinsic::getForIntrinsic(CallR->getVectorIntrinsicID());
1553             } else {
1554               auto *CastR = cast<VPWidenCastRecipe>(CR);
1555               VPID = VPIntrinsic::getForOpcode(CastR->getOpcode());
1556             }
1557 
1558             // Not all intrinsics have a corresponding VP intrinsic.
1559             if (VPID == Intrinsic::not_intrinsic)
1560               return nullptr;
1561             assert(VPIntrinsic::getMaskParamPos(VPID) &&
1562                    VPIntrinsic::getVectorLengthParamPos(VPID) &&
1563                    "Expected VP intrinsic to have mask and EVL");
1564 
1565             SmallVector<VPValue *> Ops(CR->operands());
1566             Ops.push_back(&AllOneMask);
1567             Ops.push_back(&EVL);
1568             return new VPWidenIntrinsicRecipe(
1569                 VPID, Ops, TypeInfo.inferScalarType(CR), CR->getDebugLoc());
1570           })
1571       .Case<VPWidenSelectRecipe>([&](VPWidenSelectRecipe *Sel) {
1572         SmallVector<VPValue *> Ops(Sel->operands());
1573         Ops.push_back(&EVL);
1574         return new VPWidenIntrinsicRecipe(Intrinsic::vp_select, Ops,
1575                                           TypeInfo.inferScalarType(Sel),
1576                                           Sel->getDebugLoc());
1577       })
1578       .Case<VPInstruction>([&](VPInstruction *VPI) -> VPRecipeBase * {
1579         VPValue *LHS, *RHS;
1580         // Transform select with a header mask condition
1581         //   select(header_mask, LHS, RHS)
1582         // into vector predication merge.
1583         //   vp.merge(all-true, LHS, RHS, EVL)
1584         if (!match(VPI, m_Select(m_Specific(HeaderMask), m_VPValue(LHS),
1585                                  m_VPValue(RHS))))
1586           return nullptr;
1587         // Use all true as the condition because this transformation is
1588         // limited to selects whose condition is a header mask.
1589         return new VPWidenIntrinsicRecipe(
1590             Intrinsic::vp_merge, {&AllOneMask, LHS, RHS, &EVL},
1591             TypeInfo.inferScalarType(LHS), VPI->getDebugLoc());
1592       })
1593       .Default([&](VPRecipeBase *R) { return nullptr; });
1594 }
1595 
1596 /// Replace recipes with their EVL variants.
1597 static void transformRecipestoEVLRecipes(VPlan &Plan, VPValue &EVL) {
1598   Type *CanonicalIVType = Plan.getCanonicalIV()->getScalarType();
1599   VPTypeAnalysis TypeInfo(CanonicalIVType);
1600   LLVMContext &Ctx = CanonicalIVType->getContext();
1601   VPValue *AllOneMask = Plan.getOrAddLiveIn(ConstantInt::getTrue(Ctx));
1602 
1603   for (VPUser *U : Plan.getVF().users()) {
1604     if (auto *R = dyn_cast<VPReverseVectorPointerRecipe>(U))
1605       R->setOperand(1, &EVL);
1606   }
1607 
1608   SmallVector<VPRecipeBase *> ToErase;
1609 
1610   for (VPValue *HeaderMask : collectAllHeaderMasks(Plan)) {
1611     for (VPUser *U : collectUsersRecursively(HeaderMask)) {
1612       auto *CurRecipe = cast<VPRecipeBase>(U);
1613       VPRecipeBase *EVLRecipe =
1614           createEVLRecipe(HeaderMask, *CurRecipe, TypeInfo, *AllOneMask, EVL);
1615       if (!EVLRecipe)
1616         continue;
1617 
1618       [[maybe_unused]] unsigned NumDefVal = EVLRecipe->getNumDefinedValues();
1619       assert(NumDefVal == CurRecipe->getNumDefinedValues() &&
1620              "New recipe must define the same number of values as the "
1621              "original.");
1622       assert(
1623           NumDefVal <= 1 &&
1624           "Only supports recipes with a single definition or without users.");
1625       EVLRecipe->insertBefore(CurRecipe);
1626       if (isa<VPSingleDefRecipe, VPWidenLoadEVLRecipe>(EVLRecipe)) {
1627         VPValue *CurVPV = CurRecipe->getVPSingleValue();
1628         CurVPV->replaceAllUsesWith(EVLRecipe->getVPSingleValue());
1629       }
1630       // Defer erasing recipes till the end so that we don't invalidate the
1631       // VPTypeAnalysis cache.
1632       ToErase.push_back(CurRecipe);
1633     }
1634   }
1635 
1636   for (VPRecipeBase *R : reverse(ToErase)) {
1637     SmallVector<VPValue *> PossiblyDead(R->operands());
1638     R->eraseFromParent();
1639     for (VPValue *Op : PossiblyDead)
1640       recursivelyDeleteDeadRecipes(Op);
1641   }
1642 }
1643 
1644 /// Add a VPEVLBasedIVPHIRecipe and related recipes to \p Plan and
1645 /// replaces all uses except the canonical IV increment of
1646 /// VPCanonicalIVPHIRecipe with a VPEVLBasedIVPHIRecipe. VPCanonicalIVPHIRecipe
1647 /// is used only for loop iterations counting after this transformation.
1648 ///
1649 /// The function uses the following definitions:
1650 ///  %StartV is the canonical induction start value.
1651 ///
1652 /// The function adds the following recipes:
1653 ///
1654 /// vector.ph:
1655 /// ...
1656 ///
1657 /// vector.body:
1658 /// ...
1659 /// %EVLPhi = EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI [ %StartV, %vector.ph ],
1660 ///                                               [ %NextEVLIV, %vector.body ]
1661 /// %AVL = sub original TC, %EVLPhi
1662 /// %VPEVL = EXPLICIT-VECTOR-LENGTH %AVL
1663 /// ...
1664 /// %NextEVLIV = add IVSize (cast i32 %VPEVVL to IVSize), %EVLPhi
1665 /// ...
1666 ///
1667 /// If MaxSafeElements is provided, the function adds the following recipes:
1668 /// vector.ph:
1669 /// ...
1670 ///
1671 /// vector.body:
1672 /// ...
1673 /// %EVLPhi = EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI [ %StartV, %vector.ph ],
1674 ///                                               [ %NextEVLIV, %vector.body ]
1675 /// %AVL = sub original TC, %EVLPhi
1676 /// %cmp = cmp ult %AVL, MaxSafeElements
1677 /// %SAFE_AVL = select %cmp, %AVL, MaxSafeElements
1678 /// %VPEVL = EXPLICIT-VECTOR-LENGTH %SAFE_AVL
1679 /// ...
1680 /// %NextEVLIV = add IVSize (cast i32 %VPEVL to IVSize), %EVLPhi
1681 /// ...
1682 ///
1683 bool VPlanTransforms::tryAddExplicitVectorLength(
1684     VPlan &Plan, const std::optional<unsigned> &MaxSafeElements) {
1685   VPBasicBlock *Header = Plan.getVectorLoopRegion()->getEntryBasicBlock();
1686   // The transform updates all users of inductions to work based on EVL, instead
1687   // of the VF directly. At the moment, widened inductions cannot be updated, so
1688   // bail out if the plan contains any.
1689   bool ContainsWidenInductions = any_of(
1690       Header->phis(),
1691       IsaPred<VPWidenIntOrFpInductionRecipe, VPWidenPointerInductionRecipe>);
1692   if (ContainsWidenInductions)
1693     return false;
1694 
1695   auto *CanonicalIVPHI = Plan.getCanonicalIV();
1696   VPValue *StartV = CanonicalIVPHI->getStartValue();
1697 
1698   // Create the ExplicitVectorLengthPhi recipe in the main loop.
1699   auto *EVLPhi = new VPEVLBasedIVPHIRecipe(StartV, DebugLoc());
1700   EVLPhi->insertAfter(CanonicalIVPHI);
1701   VPBuilder Builder(Header, Header->getFirstNonPhi());
1702   // Compute original TC - IV as the AVL (application vector length).
1703   VPValue *AVL = Builder.createNaryOp(
1704       Instruction::Sub, {Plan.getTripCount(), EVLPhi}, DebugLoc(), "avl");
1705   if (MaxSafeElements) {
1706     // Support for MaxSafeDist for correct loop emission.
1707     VPValue *AVLSafe = Plan.getOrAddLiveIn(
1708         ConstantInt::get(CanonicalIVPHI->getScalarType(), *MaxSafeElements));
1709     VPValue *Cmp = Builder.createICmp(ICmpInst::ICMP_ULT, AVL, AVLSafe);
1710     AVL = Builder.createSelect(Cmp, AVL, AVLSafe, DebugLoc(), "safe_avl");
1711   }
1712   auto *VPEVL = Builder.createNaryOp(VPInstruction::ExplicitVectorLength, AVL,
1713                                      DebugLoc());
1714 
1715   auto *CanonicalIVIncrement =
1716       cast<VPInstruction>(CanonicalIVPHI->getBackedgeValue());
1717   VPSingleDefRecipe *OpVPEVL = VPEVL;
1718   if (unsigned IVSize = CanonicalIVPHI->getScalarType()->getScalarSizeInBits();
1719       IVSize != 32) {
1720     OpVPEVL = new VPScalarCastRecipe(
1721         IVSize < 32 ? Instruction::Trunc : Instruction::ZExt, OpVPEVL,
1722         CanonicalIVPHI->getScalarType(), CanonicalIVIncrement->getDebugLoc());
1723     OpVPEVL->insertBefore(CanonicalIVIncrement);
1724   }
1725   auto *NextEVLIV =
1726       new VPInstruction(Instruction::Add, {OpVPEVL, EVLPhi},
1727                         {CanonicalIVIncrement->hasNoUnsignedWrap(),
1728                          CanonicalIVIncrement->hasNoSignedWrap()},
1729                         CanonicalIVIncrement->getDebugLoc(), "index.evl.next");
1730   NextEVLIV->insertBefore(CanonicalIVIncrement);
1731   EVLPhi->addOperand(NextEVLIV);
1732 
1733   transformRecipestoEVLRecipes(Plan, *VPEVL);
1734 
1735   // Replace all uses of VPCanonicalIVPHIRecipe by
1736   // VPEVLBasedIVPHIRecipe except for the canonical IV increment.
1737   CanonicalIVPHI->replaceAllUsesWith(EVLPhi);
1738   CanonicalIVIncrement->setOperand(0, CanonicalIVPHI);
1739   // TODO: support unroll factor > 1.
1740   Plan.setUF(1);
1741   return true;
1742 }
1743 
1744 void VPlanTransforms::dropPoisonGeneratingRecipes(
1745     VPlan &Plan, function_ref<bool(BasicBlock *)> BlockNeedsPredication) {
1746   // Collect recipes in the backward slice of `Root` that may generate a poison
1747   // value that is used after vectorization.
1748   SmallPtrSet<VPRecipeBase *, 16> Visited;
1749   auto CollectPoisonGeneratingInstrsInBackwardSlice([&](VPRecipeBase *Root) {
1750     SmallVector<VPRecipeBase *, 16> Worklist;
1751     Worklist.push_back(Root);
1752 
1753     // Traverse the backward slice of Root through its use-def chain.
1754     while (!Worklist.empty()) {
1755       VPRecipeBase *CurRec = Worklist.pop_back_val();
1756 
1757       if (!Visited.insert(CurRec).second)
1758         continue;
1759 
1760       // Prune search if we find another recipe generating a widen memory
1761       // instruction. Widen memory instructions involved in address computation
1762       // will lead to gather/scatter instructions, which don't need to be
1763       // handled.
1764       if (isa<VPWidenMemoryRecipe, VPInterleaveRecipe, VPScalarIVStepsRecipe,
1765               VPHeaderPHIRecipe>(CurRec))
1766         continue;
1767 
1768       // This recipe contributes to the address computation of a widen
1769       // load/store. If the underlying instruction has poison-generating flags,
1770       // drop them directly.
1771       if (auto *RecWithFlags = dyn_cast<VPRecipeWithIRFlags>(CurRec)) {
1772         VPValue *A, *B;
1773         using namespace llvm::VPlanPatternMatch;
1774         // Dropping disjoint from an OR may yield incorrect results, as some
1775         // analysis may have converted it to an Add implicitly (e.g. SCEV used
1776         // for dependence analysis). Instead, replace it with an equivalent Add.
1777         // This is possible as all users of the disjoint OR only access lanes
1778         // where the operands are disjoint or poison otherwise.
1779         if (match(RecWithFlags, m_BinaryOr(m_VPValue(A), m_VPValue(B))) &&
1780             RecWithFlags->isDisjoint()) {
1781           VPBuilder Builder(RecWithFlags);
1782           VPInstruction *New = Builder.createOverflowingOp(
1783               Instruction::Add, {A, B}, {false, false},
1784               RecWithFlags->getDebugLoc());
1785           New->setUnderlyingValue(RecWithFlags->getUnderlyingValue());
1786           RecWithFlags->replaceAllUsesWith(New);
1787           RecWithFlags->eraseFromParent();
1788           CurRec = New;
1789         } else
1790           RecWithFlags->dropPoisonGeneratingFlags();
1791       } else {
1792         Instruction *Instr = dyn_cast_or_null<Instruction>(
1793             CurRec->getVPSingleValue()->getUnderlyingValue());
1794         (void)Instr;
1795         assert((!Instr || !Instr->hasPoisonGeneratingFlags()) &&
1796                "found instruction with poison generating flags not covered by "
1797                "VPRecipeWithIRFlags");
1798       }
1799 
1800       // Add new definitions to the worklist.
1801       for (VPValue *Operand : CurRec->operands())
1802         if (VPRecipeBase *OpDef = Operand->getDefiningRecipe())
1803           Worklist.push_back(OpDef);
1804     }
1805   });
1806 
1807   // Traverse all the recipes in the VPlan and collect the poison-generating
1808   // recipes in the backward slice starting at the address of a VPWidenRecipe or
1809   // VPInterleaveRecipe.
1810   auto Iter = vp_depth_first_deep(Plan.getEntry());
1811   for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(Iter)) {
1812     for (VPRecipeBase &Recipe : *VPBB) {
1813       if (auto *WidenRec = dyn_cast<VPWidenMemoryRecipe>(&Recipe)) {
1814         Instruction &UnderlyingInstr = WidenRec->getIngredient();
1815         VPRecipeBase *AddrDef = WidenRec->getAddr()->getDefiningRecipe();
1816         if (AddrDef && WidenRec->isConsecutive() &&
1817             BlockNeedsPredication(UnderlyingInstr.getParent()))
1818           CollectPoisonGeneratingInstrsInBackwardSlice(AddrDef);
1819       } else if (auto *InterleaveRec = dyn_cast<VPInterleaveRecipe>(&Recipe)) {
1820         VPRecipeBase *AddrDef = InterleaveRec->getAddr()->getDefiningRecipe();
1821         if (AddrDef) {
1822           // Check if any member of the interleave group needs predication.
1823           const InterleaveGroup<Instruction> *InterGroup =
1824               InterleaveRec->getInterleaveGroup();
1825           bool NeedPredication = false;
1826           for (int I = 0, NumMembers = InterGroup->getNumMembers();
1827                I < NumMembers; ++I) {
1828             Instruction *Member = InterGroup->getMember(I);
1829             if (Member)
1830               NeedPredication |= BlockNeedsPredication(Member->getParent());
1831           }
1832 
1833           if (NeedPredication)
1834             CollectPoisonGeneratingInstrsInBackwardSlice(AddrDef);
1835         }
1836       }
1837     }
1838   }
1839 }
1840 
1841 void VPlanTransforms::createInterleaveGroups(
1842     VPlan &Plan,
1843     const SmallPtrSetImpl<const InterleaveGroup<Instruction> *>
1844         &InterleaveGroups,
1845     VPRecipeBuilder &RecipeBuilder, bool ScalarEpilogueAllowed) {
1846   if (InterleaveGroups.empty())
1847     return;
1848 
1849   // Interleave memory: for each Interleave Group we marked earlier as relevant
1850   // for this VPlan, replace the Recipes widening its memory instructions with a
1851   // single VPInterleaveRecipe at its insertion point.
1852   VPDominatorTree VPDT;
1853   VPDT.recalculate(Plan);
1854   for (const auto *IG : InterleaveGroups) {
1855     SmallVector<VPValue *, 4> StoredValues;
1856     for (unsigned i = 0; i < IG->getFactor(); ++i)
1857       if (auto *SI = dyn_cast_or_null<StoreInst>(IG->getMember(i))) {
1858         auto *StoreR = cast<VPWidenStoreRecipe>(RecipeBuilder.getRecipe(SI));
1859         StoredValues.push_back(StoreR->getStoredValue());
1860       }
1861 
1862     bool NeedsMaskForGaps =
1863         IG->requiresScalarEpilogue() && !ScalarEpilogueAllowed;
1864 
1865     Instruction *IRInsertPos = IG->getInsertPos();
1866     auto *InsertPos =
1867         cast<VPWidenMemoryRecipe>(RecipeBuilder.getRecipe(IRInsertPos));
1868 
1869     // Get or create the start address for the interleave group.
1870     auto *Start =
1871         cast<VPWidenMemoryRecipe>(RecipeBuilder.getRecipe(IG->getMember(0)));
1872     VPValue *Addr = Start->getAddr();
1873     VPRecipeBase *AddrDef = Addr->getDefiningRecipe();
1874     if (AddrDef && !VPDT.properlyDominates(AddrDef, InsertPos)) {
1875       // TODO: Hoist Addr's defining recipe (and any operands as needed) to
1876       // InsertPos or sink loads above zero members to join it.
1877       bool InBounds = false;
1878       if (auto *Gep = dyn_cast<GetElementPtrInst>(
1879               getLoadStorePointerOperand(IRInsertPos)->stripPointerCasts()))
1880         InBounds = Gep->isInBounds();
1881 
1882       // We cannot re-use the address of member zero because it does not
1883       // dominate the insert position. Instead, use the address of the insert
1884       // position and create a PtrAdd adjusting it to the address of member
1885       // zero.
1886       assert(IG->getIndex(IRInsertPos) != 0 &&
1887              "index of insert position shouldn't be zero");
1888       auto &DL = IRInsertPos->getDataLayout();
1889       APInt Offset(32,
1890                    DL.getTypeAllocSize(getLoadStoreType(IRInsertPos)) *
1891                        IG->getIndex(IRInsertPos),
1892                    /*IsSigned=*/true);
1893       VPValue *OffsetVPV = Plan.getOrAddLiveIn(
1894           ConstantInt::get(IRInsertPos->getParent()->getContext(), -Offset));
1895       VPBuilder B(InsertPos);
1896       Addr = InBounds ? B.createInBoundsPtrAdd(InsertPos->getAddr(), OffsetVPV)
1897                       : B.createPtrAdd(InsertPos->getAddr(), OffsetVPV);
1898     }
1899     auto *VPIG = new VPInterleaveRecipe(IG, Addr, StoredValues,
1900                                         InsertPos->getMask(), NeedsMaskForGaps);
1901     VPIG->insertBefore(InsertPos);
1902 
1903     unsigned J = 0;
1904     for (unsigned i = 0; i < IG->getFactor(); ++i)
1905       if (Instruction *Member = IG->getMember(i)) {
1906         VPRecipeBase *MemberR = RecipeBuilder.getRecipe(Member);
1907         if (!Member->getType()->isVoidTy()) {
1908           VPValue *OriginalV = MemberR->getVPSingleValue();
1909           OriginalV->replaceAllUsesWith(VPIG->getVPValue(J));
1910           J++;
1911         }
1912         MemberR->eraseFromParent();
1913       }
1914   }
1915 }
1916 
1917 void VPlanTransforms::convertToConcreteRecipes(VPlan &Plan) {
1918   for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(
1919            vp_depth_first_deep(Plan.getEntry()))) {
1920     for (VPRecipeBase &R : make_early_inc_range(VPBB->phis())) {
1921       if (!isa<VPCanonicalIVPHIRecipe, VPEVLBasedIVPHIRecipe>(&R))
1922         continue;
1923       auto *PhiR = cast<VPHeaderPHIRecipe>(&R);
1924       StringRef Name =
1925           isa<VPCanonicalIVPHIRecipe>(PhiR) ? "index" : "evl.based.iv";
1926       auto *ScalarR =
1927           new VPScalarPHIRecipe(PhiR->getStartValue(), PhiR->getBackedgeValue(),
1928                                 PhiR->getDebugLoc(), Name);
1929       ScalarR->insertBefore(PhiR);
1930       PhiR->replaceAllUsesWith(ScalarR);
1931       PhiR->eraseFromParent();
1932     }
1933   }
1934 }
1935 
1936 void VPlanTransforms::handleUncountableEarlyExit(
1937     VPlan &Plan, ScalarEvolution &SE, Loop *OrigLoop,
1938     BasicBlock *UncountableExitingBlock, VPRecipeBuilder &RecipeBuilder) {
1939   VPRegionBlock *LoopRegion = Plan.getVectorLoopRegion();
1940   auto *LatchVPBB = cast<VPBasicBlock>(LoopRegion->getExiting());
1941   VPBuilder Builder(LatchVPBB->getTerminator());
1942   auto *MiddleVPBB = Plan.getMiddleBlock();
1943   VPValue *IsEarlyExitTaken = nullptr;
1944 
1945   // Process the uncountable exiting block. Update IsEarlyExitTaken, which
1946   // tracks if the uncountable early exit has been taken. Also split the middle
1947   // block and have it conditionally branch to the early exit block if
1948   // EarlyExitTaken.
1949   auto *EarlyExitingBranch =
1950       cast<BranchInst>(UncountableExitingBlock->getTerminator());
1951   BasicBlock *TrueSucc = EarlyExitingBranch->getSuccessor(0);
1952   BasicBlock *FalseSucc = EarlyExitingBranch->getSuccessor(1);
1953 
1954   // The early exit block may or may not be the same as the "countable" exit
1955   // block. Creates a new VPIRBB for the early exit block in case it is distinct
1956   // from the countable exit block.
1957   // TODO: Introduce both exit blocks during VPlan skeleton construction.
1958   VPIRBasicBlock *VPEarlyExitBlock;
1959   if (OrigLoop->getUniqueExitBlock()) {
1960     VPEarlyExitBlock = cast<VPIRBasicBlock>(MiddleVPBB->getSuccessors()[0]);
1961   } else {
1962     VPEarlyExitBlock = Plan.createVPIRBasicBlock(
1963         !OrigLoop->contains(TrueSucc) ? TrueSucc : FalseSucc);
1964   }
1965 
1966   VPValue *EarlyExitNotTakenCond = RecipeBuilder.getBlockInMask(
1967       OrigLoop->contains(TrueSucc) ? TrueSucc : FalseSucc);
1968   auto *EarlyExitTakenCond = Builder.createNot(EarlyExitNotTakenCond);
1969   IsEarlyExitTaken =
1970       Builder.createNaryOp(VPInstruction::AnyOf, {EarlyExitTakenCond});
1971 
1972   VPBasicBlock *NewMiddle = Plan.createVPBasicBlock("middle.split");
1973   VPBlockUtils::insertOnEdge(LoopRegion, MiddleVPBB, NewMiddle);
1974   VPBlockUtils::connectBlocks(NewMiddle, VPEarlyExitBlock);
1975   NewMiddle->swapSuccessors();
1976 
1977   VPBuilder MiddleBuilder(NewMiddle);
1978   MiddleBuilder.createNaryOp(VPInstruction::BranchOnCond, {IsEarlyExitTaken});
1979 
1980   // Replace the condition controlling the non-early exit from the vector loop
1981   // with one exiting if either the original condition of the vector latch is
1982   // true or the early exit has been taken.
1983   auto *LatchExitingBranch = cast<VPInstruction>(LatchVPBB->getTerminator());
1984   assert(LatchExitingBranch->getOpcode() == VPInstruction::BranchOnCount &&
1985          "Unexpected terminator");
1986   auto *IsLatchExitTaken =
1987       Builder.createICmp(CmpInst::ICMP_EQ, LatchExitingBranch->getOperand(0),
1988                          LatchExitingBranch->getOperand(1));
1989   auto *AnyExitTaken = Builder.createNaryOp(
1990       Instruction::Or, {IsEarlyExitTaken, IsLatchExitTaken});
1991   Builder.createNaryOp(VPInstruction::BranchOnCond, AnyExitTaken);
1992   LatchExitingBranch->eraseFromParent();
1993 }
1994