xref: /llvm-project/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp (revision 9b496deb90381f352a2bceb8794a0c7881a63bd5)
1 //===-- VPlanTransforms.cpp - Utility VPlan to VPlan transforms -----------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 ///
9 /// \file
10 /// This file implements a set of utility VPlan to VPlan transformations.
11 ///
12 //===----------------------------------------------------------------------===//
13 
14 #include "VPlanTransforms.h"
15 #include "VPRecipeBuilder.h"
16 #include "VPlan.h"
17 #include "VPlanAnalysis.h"
18 #include "VPlanCFG.h"
19 #include "VPlanDominatorTree.h"
20 #include "VPlanPatternMatch.h"
21 #include "VPlanUtils.h"
22 #include "llvm/ADT/PostOrderIterator.h"
23 #include "llvm/ADT/STLExtras.h"
24 #include "llvm/ADT/SetVector.h"
25 #include "llvm/ADT/TypeSwitch.h"
26 #include "llvm/Analysis/IVDescriptors.h"
27 #include "llvm/Analysis/VectorUtils.h"
28 #include "llvm/IR/Intrinsics.h"
29 #include "llvm/IR/PatternMatch.h"
30 
31 using namespace llvm;
32 
33 void VPlanTransforms::VPInstructionsToVPRecipes(
34     VPlanPtr &Plan,
35     function_ref<const InductionDescriptor *(PHINode *)>
36         GetIntOrFpInductionDescriptor,
37     ScalarEvolution &SE, const TargetLibraryInfo &TLI) {
38 
39   ReversePostOrderTraversal<VPBlockDeepTraversalWrapper<VPBlockBase *>> RPOT(
40       Plan->getVectorLoopRegion());
41   for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(RPOT)) {
42     // Skip blocks outside region
43     if (!VPBB->getParent())
44       break;
45     VPRecipeBase *Term = VPBB->getTerminator();
46     auto EndIter = Term ? Term->getIterator() : VPBB->end();
47     // Introduce each ingredient into VPlan.
48     for (VPRecipeBase &Ingredient :
49          make_early_inc_range(make_range(VPBB->begin(), EndIter))) {
50 
51       VPValue *VPV = Ingredient.getVPSingleValue();
52       Instruction *Inst = cast<Instruction>(VPV->getUnderlyingValue());
53 
54       VPRecipeBase *NewRecipe = nullptr;
55       if (auto *VPPhi = dyn_cast<VPWidenPHIRecipe>(&Ingredient)) {
56         auto *Phi = cast<PHINode>(VPPhi->getUnderlyingValue());
57         const auto *II = GetIntOrFpInductionDescriptor(Phi);
58         if (!II)
59           continue;
60 
61         VPValue *Start = Plan->getOrAddLiveIn(II->getStartValue());
62         VPValue *Step =
63             vputils::getOrCreateVPValueForSCEVExpr(*Plan, II->getStep(), SE);
64         NewRecipe = new VPWidenIntOrFpInductionRecipe(
65             Phi, Start, Step, &Plan->getVF(), *II, Ingredient.getDebugLoc());
66       } else {
67         assert(isa<VPInstruction>(&Ingredient) &&
68                "only VPInstructions expected here");
69         assert(!isa<PHINode>(Inst) && "phis should be handled above");
70         // Create VPWidenMemoryRecipe for loads and stores.
71         if (LoadInst *Load = dyn_cast<LoadInst>(Inst)) {
72           NewRecipe = new VPWidenLoadRecipe(
73               *Load, Ingredient.getOperand(0), nullptr /*Mask*/,
74               false /*Consecutive*/, false /*Reverse*/,
75               Ingredient.getDebugLoc());
76         } else if (StoreInst *Store = dyn_cast<StoreInst>(Inst)) {
77           NewRecipe = new VPWidenStoreRecipe(
78               *Store, Ingredient.getOperand(1), Ingredient.getOperand(0),
79               nullptr /*Mask*/, false /*Consecutive*/, false /*Reverse*/,
80               Ingredient.getDebugLoc());
81         } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Inst)) {
82           NewRecipe = new VPWidenGEPRecipe(GEP, Ingredient.operands());
83         } else if (CallInst *CI = dyn_cast<CallInst>(Inst)) {
84           NewRecipe = new VPWidenIntrinsicRecipe(
85               *CI, getVectorIntrinsicIDForCall(CI, &TLI),
86               {Ingredient.op_begin(), Ingredient.op_end() - 1}, CI->getType(),
87               CI->getDebugLoc());
88         } else if (SelectInst *SI = dyn_cast<SelectInst>(Inst)) {
89           NewRecipe = new VPWidenSelectRecipe(*SI, Ingredient.operands());
90         } else if (auto *CI = dyn_cast<CastInst>(Inst)) {
91           NewRecipe = new VPWidenCastRecipe(
92               CI->getOpcode(), Ingredient.getOperand(0), CI->getType(), *CI);
93         } else {
94           NewRecipe = new VPWidenRecipe(*Inst, Ingredient.operands());
95         }
96       }
97 
98       NewRecipe->insertBefore(&Ingredient);
99       if (NewRecipe->getNumDefinedValues() == 1)
100         VPV->replaceAllUsesWith(NewRecipe->getVPSingleValue());
101       else
102         assert(NewRecipe->getNumDefinedValues() == 0 &&
103                "Only recpies with zero or one defined values expected");
104       Ingredient.eraseFromParent();
105     }
106   }
107 }
108 
109 static bool sinkScalarOperands(VPlan &Plan) {
110   auto Iter = vp_depth_first_deep(Plan.getEntry());
111   bool Changed = false;
112   // First, collect the operands of all recipes in replicate blocks as seeds for
113   // sinking.
114   SetVector<std::pair<VPBasicBlock *, VPSingleDefRecipe *>> WorkList;
115   for (VPRegionBlock *VPR : VPBlockUtils::blocksOnly<VPRegionBlock>(Iter)) {
116     VPBasicBlock *EntryVPBB = VPR->getEntryBasicBlock();
117     if (!VPR->isReplicator() || EntryVPBB->getSuccessors().size() != 2)
118       continue;
119     VPBasicBlock *VPBB = dyn_cast<VPBasicBlock>(EntryVPBB->getSuccessors()[0]);
120     if (!VPBB || VPBB->getSingleSuccessor() != VPR->getExitingBasicBlock())
121       continue;
122     for (auto &Recipe : *VPBB) {
123       for (VPValue *Op : Recipe.operands())
124         if (auto *Def =
125                 dyn_cast_or_null<VPSingleDefRecipe>(Op->getDefiningRecipe()))
126           WorkList.insert(std::make_pair(VPBB, Def));
127     }
128   }
129 
130   bool ScalarVFOnly = Plan.hasScalarVFOnly();
131   // Try to sink each replicate or scalar IV steps recipe in the worklist.
132   for (unsigned I = 0; I != WorkList.size(); ++I) {
133     VPBasicBlock *SinkTo;
134     VPSingleDefRecipe *SinkCandidate;
135     std::tie(SinkTo, SinkCandidate) = WorkList[I];
136     if (SinkCandidate->getParent() == SinkTo ||
137         SinkCandidate->mayHaveSideEffects() ||
138         SinkCandidate->mayReadOrWriteMemory())
139       continue;
140     if (auto *RepR = dyn_cast<VPReplicateRecipe>(SinkCandidate)) {
141       if (!ScalarVFOnly && RepR->isUniform())
142         continue;
143     } else if (!isa<VPScalarIVStepsRecipe>(SinkCandidate))
144       continue;
145 
146     bool NeedsDuplicating = false;
147     // All recipe users of the sink candidate must be in the same block SinkTo
148     // or all users outside of SinkTo must be uniform-after-vectorization (
149     // i.e., only first lane is used) . In the latter case, we need to duplicate
150     // SinkCandidate.
151     auto CanSinkWithUser = [SinkTo, &NeedsDuplicating,
152                             SinkCandidate](VPUser *U) {
153       auto *UI = cast<VPRecipeBase>(U);
154       if (UI->getParent() == SinkTo)
155         return true;
156       NeedsDuplicating = UI->onlyFirstLaneUsed(SinkCandidate);
157       // We only know how to duplicate VPRecipeRecipes for now.
158       return NeedsDuplicating && isa<VPReplicateRecipe>(SinkCandidate);
159     };
160     if (!all_of(SinkCandidate->users(), CanSinkWithUser))
161       continue;
162 
163     if (NeedsDuplicating) {
164       if (ScalarVFOnly)
165         continue;
166       Instruction *I = SinkCandidate->getUnderlyingInstr();
167       auto *Clone = new VPReplicateRecipe(I, SinkCandidate->operands(), true);
168       // TODO: add ".cloned" suffix to name of Clone's VPValue.
169 
170       Clone->insertBefore(SinkCandidate);
171       SinkCandidate->replaceUsesWithIf(Clone, [SinkTo](VPUser &U, unsigned) {
172         return cast<VPRecipeBase>(&U)->getParent() != SinkTo;
173       });
174     }
175     SinkCandidate->moveBefore(*SinkTo, SinkTo->getFirstNonPhi());
176     for (VPValue *Op : SinkCandidate->operands())
177       if (auto *Def =
178               dyn_cast_or_null<VPSingleDefRecipe>(Op->getDefiningRecipe()))
179         WorkList.insert(std::make_pair(SinkTo, Def));
180     Changed = true;
181   }
182   return Changed;
183 }
184 
185 /// If \p R is a region with a VPBranchOnMaskRecipe in the entry block, return
186 /// the mask.
187 VPValue *getPredicatedMask(VPRegionBlock *R) {
188   auto *EntryBB = dyn_cast<VPBasicBlock>(R->getEntry());
189   if (!EntryBB || EntryBB->size() != 1 ||
190       !isa<VPBranchOnMaskRecipe>(EntryBB->begin()))
191     return nullptr;
192 
193   return cast<VPBranchOnMaskRecipe>(&*EntryBB->begin())->getOperand(0);
194 }
195 
196 /// If \p R is a triangle region, return the 'then' block of the triangle.
197 static VPBasicBlock *getPredicatedThenBlock(VPRegionBlock *R) {
198   auto *EntryBB = cast<VPBasicBlock>(R->getEntry());
199   if (EntryBB->getNumSuccessors() != 2)
200     return nullptr;
201 
202   auto *Succ0 = dyn_cast<VPBasicBlock>(EntryBB->getSuccessors()[0]);
203   auto *Succ1 = dyn_cast<VPBasicBlock>(EntryBB->getSuccessors()[1]);
204   if (!Succ0 || !Succ1)
205     return nullptr;
206 
207   if (Succ0->getNumSuccessors() + Succ1->getNumSuccessors() != 1)
208     return nullptr;
209   if (Succ0->getSingleSuccessor() == Succ1)
210     return Succ0;
211   if (Succ1->getSingleSuccessor() == Succ0)
212     return Succ1;
213   return nullptr;
214 }
215 
216 // Merge replicate regions in their successor region, if a replicate region
217 // is connected to a successor replicate region with the same predicate by a
218 // single, empty VPBasicBlock.
219 static bool mergeReplicateRegionsIntoSuccessors(VPlan &Plan) {
220   SetVector<VPRegionBlock *> DeletedRegions;
221 
222   // Collect replicate regions followed by an empty block, followed by another
223   // replicate region with matching masks to process front. This is to avoid
224   // iterator invalidation issues while merging regions.
225   SmallVector<VPRegionBlock *, 8> WorkList;
226   for (VPRegionBlock *Region1 : VPBlockUtils::blocksOnly<VPRegionBlock>(
227            vp_depth_first_deep(Plan.getEntry()))) {
228     if (!Region1->isReplicator())
229       continue;
230     auto *MiddleBasicBlock =
231         dyn_cast_or_null<VPBasicBlock>(Region1->getSingleSuccessor());
232     if (!MiddleBasicBlock || !MiddleBasicBlock->empty())
233       continue;
234 
235     auto *Region2 =
236         dyn_cast_or_null<VPRegionBlock>(MiddleBasicBlock->getSingleSuccessor());
237     if (!Region2 || !Region2->isReplicator())
238       continue;
239 
240     VPValue *Mask1 = getPredicatedMask(Region1);
241     VPValue *Mask2 = getPredicatedMask(Region2);
242     if (!Mask1 || Mask1 != Mask2)
243       continue;
244 
245     assert(Mask1 && Mask2 && "both region must have conditions");
246     WorkList.push_back(Region1);
247   }
248 
249   // Move recipes from Region1 to its successor region, if both are triangles.
250   for (VPRegionBlock *Region1 : WorkList) {
251     if (DeletedRegions.contains(Region1))
252       continue;
253     auto *MiddleBasicBlock = cast<VPBasicBlock>(Region1->getSingleSuccessor());
254     auto *Region2 = cast<VPRegionBlock>(MiddleBasicBlock->getSingleSuccessor());
255 
256     VPBasicBlock *Then1 = getPredicatedThenBlock(Region1);
257     VPBasicBlock *Then2 = getPredicatedThenBlock(Region2);
258     if (!Then1 || !Then2)
259       continue;
260 
261     // Note: No fusion-preventing memory dependencies are expected in either
262     // region. Such dependencies should be rejected during earlier dependence
263     // checks, which guarantee accesses can be re-ordered for vectorization.
264     //
265     // Move recipes to the successor region.
266     for (VPRecipeBase &ToMove : make_early_inc_range(reverse(*Then1)))
267       ToMove.moveBefore(*Then2, Then2->getFirstNonPhi());
268 
269     auto *Merge1 = cast<VPBasicBlock>(Then1->getSingleSuccessor());
270     auto *Merge2 = cast<VPBasicBlock>(Then2->getSingleSuccessor());
271 
272     // Move VPPredInstPHIRecipes from the merge block to the successor region's
273     // merge block. Update all users inside the successor region to use the
274     // original values.
275     for (VPRecipeBase &Phi1ToMove : make_early_inc_range(reverse(*Merge1))) {
276       VPValue *PredInst1 =
277           cast<VPPredInstPHIRecipe>(&Phi1ToMove)->getOperand(0);
278       VPValue *Phi1ToMoveV = Phi1ToMove.getVPSingleValue();
279       Phi1ToMoveV->replaceUsesWithIf(PredInst1, [Then2](VPUser &U, unsigned) {
280         return cast<VPRecipeBase>(&U)->getParent() == Then2;
281       });
282 
283       // Remove phi recipes that are unused after merging the regions.
284       if (Phi1ToMove.getVPSingleValue()->getNumUsers() == 0) {
285         Phi1ToMove.eraseFromParent();
286         continue;
287       }
288       Phi1ToMove.moveBefore(*Merge2, Merge2->begin());
289     }
290 
291     // Finally, remove the first region.
292     for (VPBlockBase *Pred : make_early_inc_range(Region1->getPredecessors())) {
293       VPBlockUtils::disconnectBlocks(Pred, Region1);
294       VPBlockUtils::connectBlocks(Pred, MiddleBasicBlock);
295     }
296     VPBlockUtils::disconnectBlocks(Region1, MiddleBasicBlock);
297     DeletedRegions.insert(Region1);
298   }
299 
300   for (VPRegionBlock *ToDelete : DeletedRegions)
301     delete ToDelete;
302   return !DeletedRegions.empty();
303 }
304 
305 static VPRegionBlock *createReplicateRegion(VPReplicateRecipe *PredRecipe,
306                                             VPlan &Plan) {
307   Instruction *Instr = PredRecipe->getUnderlyingInstr();
308   // Build the triangular if-then region.
309   std::string RegionName = (Twine("pred.") + Instr->getOpcodeName()).str();
310   assert(Instr->getParent() && "Predicated instruction not in any basic block");
311   auto *BlockInMask = PredRecipe->getMask();
312   auto *BOMRecipe = new VPBranchOnMaskRecipe(BlockInMask);
313   auto *Entry = new VPBasicBlock(Twine(RegionName) + ".entry", BOMRecipe);
314 
315   // Replace predicated replicate recipe with a replicate recipe without a
316   // mask but in the replicate region.
317   auto *RecipeWithoutMask = new VPReplicateRecipe(
318       PredRecipe->getUnderlyingInstr(),
319       make_range(PredRecipe->op_begin(), std::prev(PredRecipe->op_end())),
320       PredRecipe->isUniform());
321   auto *Pred = new VPBasicBlock(Twine(RegionName) + ".if", RecipeWithoutMask);
322 
323   VPPredInstPHIRecipe *PHIRecipe = nullptr;
324   if (PredRecipe->getNumUsers() != 0) {
325     PHIRecipe = new VPPredInstPHIRecipe(RecipeWithoutMask,
326                                         RecipeWithoutMask->getDebugLoc());
327     PredRecipe->replaceAllUsesWith(PHIRecipe);
328     PHIRecipe->setOperand(0, RecipeWithoutMask);
329   }
330   PredRecipe->eraseFromParent();
331   auto *Exiting = new VPBasicBlock(Twine(RegionName) + ".continue", PHIRecipe);
332   VPRegionBlock *Region = new VPRegionBlock(Entry, Exiting, RegionName, true);
333 
334   // Note: first set Entry as region entry and then connect successors starting
335   // from it in order, to propagate the "parent" of each VPBasicBlock.
336   VPBlockUtils::insertTwoBlocksAfter(Pred, Exiting, Entry);
337   VPBlockUtils::connectBlocks(Pred, Exiting);
338 
339   return Region;
340 }
341 
342 static void addReplicateRegions(VPlan &Plan) {
343   SmallVector<VPReplicateRecipe *> WorkList;
344   for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(
345            vp_depth_first_deep(Plan.getEntry()))) {
346     for (VPRecipeBase &R : *VPBB)
347       if (auto *RepR = dyn_cast<VPReplicateRecipe>(&R)) {
348         if (RepR->isPredicated())
349           WorkList.push_back(RepR);
350       }
351   }
352 
353   unsigned BBNum = 0;
354   for (VPReplicateRecipe *RepR : WorkList) {
355     VPBasicBlock *CurrentBlock = RepR->getParent();
356     VPBasicBlock *SplitBlock = CurrentBlock->splitAt(RepR->getIterator());
357 
358     BasicBlock *OrigBB = RepR->getUnderlyingInstr()->getParent();
359     SplitBlock->setName(
360         OrigBB->hasName() ? OrigBB->getName() + "." + Twine(BBNum++) : "");
361     // Record predicated instructions for above packing optimizations.
362     VPBlockBase *Region = createReplicateRegion(RepR, Plan);
363     Region->setParent(CurrentBlock->getParent());
364     VPBlockUtils::insertOnEdge(CurrentBlock, SplitBlock, Region);
365   }
366 }
367 
368 /// Remove redundant VPBasicBlocks by merging them into their predecessor if
369 /// the predecessor has a single successor.
370 static bool mergeBlocksIntoPredecessors(VPlan &Plan) {
371   SmallVector<VPBasicBlock *> WorkList;
372   for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(
373            vp_depth_first_deep(Plan.getEntry()))) {
374     // Don't fold the blocks in the skeleton of the Plan into their single
375     // predecessors for now.
376     // TODO: Remove restriction once more of the skeleton is modeled in VPlan.
377     if (!VPBB->getParent())
378       continue;
379     auto *PredVPBB =
380         dyn_cast_or_null<VPBasicBlock>(VPBB->getSinglePredecessor());
381     if (!PredVPBB || PredVPBB->getNumSuccessors() != 1 ||
382         isa<VPIRBasicBlock>(PredVPBB))
383       continue;
384     WorkList.push_back(VPBB);
385   }
386 
387   for (VPBasicBlock *VPBB : WorkList) {
388     VPBasicBlock *PredVPBB = cast<VPBasicBlock>(VPBB->getSinglePredecessor());
389     for (VPRecipeBase &R : make_early_inc_range(*VPBB))
390       R.moveBefore(*PredVPBB, PredVPBB->end());
391     VPBlockUtils::disconnectBlocks(PredVPBB, VPBB);
392     auto *ParentRegion = cast_or_null<VPRegionBlock>(VPBB->getParent());
393     if (ParentRegion && ParentRegion->getExiting() == VPBB)
394       ParentRegion->setExiting(PredVPBB);
395     for (auto *Succ : to_vector(VPBB->successors())) {
396       VPBlockUtils::disconnectBlocks(VPBB, Succ);
397       VPBlockUtils::connectBlocks(PredVPBB, Succ);
398     }
399     delete VPBB;
400   }
401   return !WorkList.empty();
402 }
403 
404 void VPlanTransforms::createAndOptimizeReplicateRegions(VPlan &Plan) {
405   // Convert masked VPReplicateRecipes to if-then region blocks.
406   addReplicateRegions(Plan);
407 
408   bool ShouldSimplify = true;
409   while (ShouldSimplify) {
410     ShouldSimplify = sinkScalarOperands(Plan);
411     ShouldSimplify |= mergeReplicateRegionsIntoSuccessors(Plan);
412     ShouldSimplify |= mergeBlocksIntoPredecessors(Plan);
413   }
414 }
415 
416 /// Remove redundant casts of inductions.
417 ///
418 /// Such redundant casts are casts of induction variables that can be ignored,
419 /// because we already proved that the casted phi is equal to the uncasted phi
420 /// in the vectorized loop. There is no need to vectorize the cast - the same
421 /// value can be used for both the phi and casts in the vector loop.
422 static void removeRedundantInductionCasts(VPlan &Plan) {
423   for (auto &Phi : Plan.getVectorLoopRegion()->getEntryBasicBlock()->phis()) {
424     auto *IV = dyn_cast<VPWidenIntOrFpInductionRecipe>(&Phi);
425     if (!IV || IV->getTruncInst())
426       continue;
427 
428     // A sequence of IR Casts has potentially been recorded for IV, which
429     // *must be bypassed* when the IV is vectorized, because the vectorized IV
430     // will produce the desired casted value. This sequence forms a def-use
431     // chain and is provided in reverse order, ending with the cast that uses
432     // the IV phi. Search for the recipe of the last cast in the chain and
433     // replace it with the original IV. Note that only the final cast is
434     // expected to have users outside the cast-chain and the dead casts left
435     // over will be cleaned up later.
436     auto &Casts = IV->getInductionDescriptor().getCastInsts();
437     VPValue *FindMyCast = IV;
438     for (Instruction *IRCast : reverse(Casts)) {
439       VPSingleDefRecipe *FoundUserCast = nullptr;
440       for (auto *U : FindMyCast->users()) {
441         auto *UserCast = dyn_cast<VPSingleDefRecipe>(U);
442         if (UserCast && UserCast->getUnderlyingValue() == IRCast) {
443           FoundUserCast = UserCast;
444           break;
445         }
446       }
447       FindMyCast = FoundUserCast;
448     }
449     FindMyCast->replaceAllUsesWith(IV);
450   }
451 }
452 
453 /// Try to replace VPWidenCanonicalIVRecipes with a widened canonical IV
454 /// recipe, if it exists.
455 static void removeRedundantCanonicalIVs(VPlan &Plan) {
456   VPCanonicalIVPHIRecipe *CanonicalIV = Plan.getCanonicalIV();
457   VPWidenCanonicalIVRecipe *WidenNewIV = nullptr;
458   for (VPUser *U : CanonicalIV->users()) {
459     WidenNewIV = dyn_cast<VPWidenCanonicalIVRecipe>(U);
460     if (WidenNewIV)
461       break;
462   }
463 
464   if (!WidenNewIV)
465     return;
466 
467   VPBasicBlock *HeaderVPBB = Plan.getVectorLoopRegion()->getEntryBasicBlock();
468   for (VPRecipeBase &Phi : HeaderVPBB->phis()) {
469     auto *WidenOriginalIV = dyn_cast<VPWidenIntOrFpInductionRecipe>(&Phi);
470 
471     if (!WidenOriginalIV || !WidenOriginalIV->isCanonical())
472       continue;
473 
474     // Replace WidenNewIV with WidenOriginalIV if WidenOriginalIV provides
475     // everything WidenNewIV's users need. That is, WidenOriginalIV will
476     // generate a vector phi or all users of WidenNewIV demand the first lane
477     // only.
478     if (any_of(WidenOriginalIV->users(),
479                [WidenOriginalIV](VPUser *U) {
480                  return !U->usesScalars(WidenOriginalIV);
481                }) ||
482         vputils::onlyFirstLaneUsed(WidenNewIV)) {
483       WidenNewIV->replaceAllUsesWith(WidenOriginalIV);
484       WidenNewIV->eraseFromParent();
485       return;
486     }
487   }
488 }
489 
490 /// Returns true if \p R is dead and can be removed.
491 static bool isDeadRecipe(VPRecipeBase &R) {
492   using namespace llvm::PatternMatch;
493   // Do remove conditional assume instructions as their conditions may be
494   // flattened.
495   auto *RepR = dyn_cast<VPReplicateRecipe>(&R);
496   bool IsConditionalAssume =
497       RepR && RepR->isPredicated() &&
498       match(RepR->getUnderlyingInstr(), m_Intrinsic<Intrinsic::assume>());
499   if (IsConditionalAssume)
500     return true;
501 
502   if (R.mayHaveSideEffects())
503     return false;
504 
505   // Recipe is dead if no user keeps the recipe alive.
506   return all_of(R.definedValues(),
507                 [](VPValue *V) { return V->getNumUsers() == 0; });
508 }
509 
510 void VPlanTransforms::removeDeadRecipes(VPlan &Plan) {
511   ReversePostOrderTraversal<VPBlockDeepTraversalWrapper<VPBlockBase *>> RPOT(
512       Plan.getEntry());
513 
514   for (VPBasicBlock *VPBB : reverse(VPBlockUtils::blocksOnly<VPBasicBlock>(RPOT))) {
515     // The recipes in the block are processed in reverse order, to catch chains
516     // of dead recipes.
517     for (VPRecipeBase &R : make_early_inc_range(reverse(*VPBB))) {
518       if (isDeadRecipe(R))
519         R.eraseFromParent();
520     }
521   }
522 }
523 
524 static VPScalarIVStepsRecipe *
525 createScalarIVSteps(VPlan &Plan, InductionDescriptor::InductionKind Kind,
526                     Instruction::BinaryOps InductionOpcode,
527                     FPMathOperator *FPBinOp, Instruction *TruncI,
528                     VPValue *StartV, VPValue *Step, VPBuilder &Builder) {
529   VPBasicBlock *HeaderVPBB = Plan.getVectorLoopRegion()->getEntryBasicBlock();
530   VPCanonicalIVPHIRecipe *CanonicalIV = Plan.getCanonicalIV();
531   VPSingleDefRecipe *BaseIV = CanonicalIV;
532   if (!CanonicalIV->isCanonical(Kind, StartV, Step)) {
533     BaseIV = Builder.createDerivedIV(Kind, FPBinOp, StartV, CanonicalIV, Step,
534                                      "offset.idx");
535   }
536 
537   // Truncate base induction if needed.
538   Type *CanonicalIVType = CanonicalIV->getScalarType();
539   VPTypeAnalysis TypeInfo(CanonicalIVType);
540   Type *ResultTy = TypeInfo.inferScalarType(BaseIV);
541   if (TruncI) {
542     Type *TruncTy = TruncI->getType();
543     assert(ResultTy->getScalarSizeInBits() > TruncTy->getScalarSizeInBits() &&
544            "Not truncating.");
545     assert(ResultTy->isIntegerTy() && "Truncation requires an integer type");
546     BaseIV = Builder.createScalarCast(Instruction::Trunc, BaseIV, TruncTy);
547     ResultTy = TruncTy;
548   }
549 
550   // Truncate step if needed.
551   Type *StepTy = TypeInfo.inferScalarType(Step);
552   if (ResultTy != StepTy) {
553     assert(StepTy->getScalarSizeInBits() > ResultTy->getScalarSizeInBits() &&
554            "Not truncating.");
555     assert(StepTy->isIntegerTy() && "Truncation requires an integer type");
556     auto *VecPreheader =
557         cast<VPBasicBlock>(HeaderVPBB->getSingleHierarchicalPredecessor());
558     VPBuilder::InsertPointGuard Guard(Builder);
559     Builder.setInsertPoint(VecPreheader);
560     Step = Builder.createScalarCast(Instruction::Trunc, Step, ResultTy);
561   }
562   return Builder.createScalarIVSteps(InductionOpcode, FPBinOp, BaseIV, Step);
563 }
564 
565 /// Legalize VPWidenPointerInductionRecipe, by replacing it with a PtrAdd
566 /// (IndStart, ScalarIVSteps (0, Step)) if only its scalar values are used, as
567 /// VPWidenPointerInductionRecipe will generate vectors only. If some users
568 /// require vectors while other require scalars, the scalar uses need to extract
569 /// the scalars from the generated vectors (Note that this is different to how
570 /// int/fp inductions are handled). Also optimize VPWidenIntOrFpInductionRecipe,
571 /// if any of its users needs scalar values, by providing them scalar steps
572 /// built on the canonical scalar IV and update the original IV's users. This is
573 /// an optional optimization to reduce the needs of vector extracts.
574 static void legalizeAndOptimizeInductions(VPlan &Plan) {
575   SmallVector<VPRecipeBase *> ToRemove;
576   VPBasicBlock *HeaderVPBB = Plan.getVectorLoopRegion()->getEntryBasicBlock();
577   bool HasOnlyVectorVFs = !Plan.hasVF(ElementCount::getFixed(1));
578   VPBuilder Builder(HeaderVPBB, HeaderVPBB->getFirstNonPhi());
579   for (VPRecipeBase &Phi : HeaderVPBB->phis()) {
580     // Replace wide pointer inductions which have only their scalars used by
581     // PtrAdd(IndStart, ScalarIVSteps (0, Step)).
582     if (auto *PtrIV = dyn_cast<VPWidenPointerInductionRecipe>(&Phi)) {
583       if (!PtrIV->onlyScalarsGenerated(Plan.hasScalableVF()))
584         continue;
585 
586       const InductionDescriptor &ID = PtrIV->getInductionDescriptor();
587       VPValue *StartV =
588           Plan.getOrAddLiveIn(ConstantInt::get(ID.getStep()->getType(), 0));
589       VPValue *StepV = PtrIV->getOperand(1);
590       VPScalarIVStepsRecipe *Steps = createScalarIVSteps(
591           Plan, InductionDescriptor::IK_IntInduction, Instruction::Add, nullptr,
592           nullptr, StartV, StepV, Builder);
593 
594       VPValue *PtrAdd = Builder.createPtrAdd(PtrIV->getStartValue(), Steps,
595                                              PtrIV->getDebugLoc(), "next.gep");
596 
597       PtrIV->replaceAllUsesWith(PtrAdd);
598       continue;
599     }
600 
601     // Replace widened induction with scalar steps for users that only use
602     // scalars.
603     auto *WideIV = dyn_cast<VPWidenIntOrFpInductionRecipe>(&Phi);
604     if (!WideIV)
605       continue;
606     if (HasOnlyVectorVFs && none_of(WideIV->users(), [WideIV](VPUser *U) {
607           return U->usesScalars(WideIV);
608         }))
609       continue;
610 
611     const InductionDescriptor &ID = WideIV->getInductionDescriptor();
612     VPScalarIVStepsRecipe *Steps = createScalarIVSteps(
613         Plan, ID.getKind(), ID.getInductionOpcode(),
614         dyn_cast_or_null<FPMathOperator>(ID.getInductionBinOp()),
615         WideIV->getTruncInst(), WideIV->getStartValue(), WideIV->getStepValue(),
616         Builder);
617 
618     // Update scalar users of IV to use Step instead.
619     if (!HasOnlyVectorVFs)
620       WideIV->replaceAllUsesWith(Steps);
621     else
622       WideIV->replaceUsesWithIf(Steps, [WideIV](VPUser &U, unsigned) {
623         return U.usesScalars(WideIV);
624       });
625   }
626 }
627 
628 /// Remove redundant EpxandSCEVRecipes in \p Plan's entry block by replacing
629 /// them with already existing recipes expanding the same SCEV expression.
630 static void removeRedundantExpandSCEVRecipes(VPlan &Plan) {
631   DenseMap<const SCEV *, VPValue *> SCEV2VPV;
632 
633   for (VPRecipeBase &R :
634        make_early_inc_range(*Plan.getEntry()->getEntryBasicBlock())) {
635     auto *ExpR = dyn_cast<VPExpandSCEVRecipe>(&R);
636     if (!ExpR)
637       continue;
638 
639     auto I = SCEV2VPV.insert({ExpR->getSCEV(), ExpR});
640     if (I.second)
641       continue;
642     ExpR->replaceAllUsesWith(I.first->second);
643     ExpR->eraseFromParent();
644   }
645 }
646 
647 static void recursivelyDeleteDeadRecipes(VPValue *V) {
648   SmallVector<VPValue *> WorkList;
649   SmallPtrSet<VPValue *, 8> Seen;
650   WorkList.push_back(V);
651 
652   while (!WorkList.empty()) {
653     VPValue *Cur = WorkList.pop_back_val();
654     if (!Seen.insert(Cur).second)
655       continue;
656     VPRecipeBase *R = Cur->getDefiningRecipe();
657     if (!R)
658       continue;
659     if (!isDeadRecipe(*R))
660       continue;
661     WorkList.append(R->op_begin(), R->op_end());
662     R->eraseFromParent();
663   }
664 }
665 
666 void VPlanTransforms::optimizeForVFAndUF(VPlan &Plan, ElementCount BestVF,
667                                          unsigned BestUF,
668                                          PredicatedScalarEvolution &PSE) {
669   assert(Plan.hasVF(BestVF) && "BestVF is not available in Plan");
670   assert(Plan.hasUF(BestUF) && "BestUF is not available in Plan");
671   VPBasicBlock *ExitingVPBB =
672       Plan.getVectorLoopRegion()->getExitingBasicBlock();
673   auto *Term = &ExitingVPBB->back();
674   // Try to simplify the branch condition if TC <= VF * UF when preparing to
675   // execute the plan for the main vector loop. We only do this if the
676   // terminator is:
677   //  1. BranchOnCount, or
678   //  2. BranchOnCond where the input is Not(ActiveLaneMask).
679   using namespace llvm::VPlanPatternMatch;
680   if (!match(Term, m_BranchOnCount(m_VPValue(), m_VPValue())) &&
681       !match(Term,
682              m_BranchOnCond(m_Not(m_ActiveLaneMask(m_VPValue(), m_VPValue())))))
683     return;
684 
685   ScalarEvolution &SE = *PSE.getSE();
686   const SCEV *TripCount =
687       vputils::getSCEVExprForVPValue(Plan.getTripCount(), SE);
688   assert(!isa<SCEVCouldNotCompute>(TripCount) &&
689          "Trip count SCEV must be computable");
690   ElementCount NumElements = BestVF.multiplyCoefficientBy(BestUF);
691   const SCEV *C = SE.getElementCount(TripCount->getType(), NumElements);
692   if (TripCount->isZero() ||
693       !SE.isKnownPredicate(CmpInst::ICMP_ULE, TripCount, C))
694     return;
695 
696   LLVMContext &Ctx = SE.getContext();
697   auto *BOC = new VPInstruction(
698       VPInstruction::BranchOnCond,
699       {Plan.getOrAddLiveIn(ConstantInt::getTrue(Ctx))}, Term->getDebugLoc());
700 
701   SmallVector<VPValue *> PossiblyDead(Term->operands());
702   Term->eraseFromParent();
703   for (VPValue *Op : PossiblyDead)
704     recursivelyDeleteDeadRecipes(Op);
705   ExitingVPBB->appendRecipe(BOC);
706   Plan.setVF(BestVF);
707   Plan.setUF(BestUF);
708   // TODO: Further simplifications are possible
709   //      1. Replace inductions with constants.
710   //      2. Replace vector loop region with VPBasicBlock.
711 }
712 
713 /// Sink users of \p FOR after the recipe defining the previous value \p
714 /// Previous of the recurrence. \returns true if all users of \p FOR could be
715 /// re-arranged as needed or false if it is not possible.
716 static bool
717 sinkRecurrenceUsersAfterPrevious(VPFirstOrderRecurrencePHIRecipe *FOR,
718                                  VPRecipeBase *Previous,
719                                  VPDominatorTree &VPDT) {
720   // Collect recipes that need sinking.
721   SmallVector<VPRecipeBase *> WorkList;
722   SmallPtrSet<VPRecipeBase *, 8> Seen;
723   Seen.insert(Previous);
724   auto TryToPushSinkCandidate = [&](VPRecipeBase *SinkCandidate) {
725     // The previous value must not depend on the users of the recurrence phi. In
726     // that case, FOR is not a fixed order recurrence.
727     if (SinkCandidate == Previous)
728       return false;
729 
730     if (isa<VPHeaderPHIRecipe>(SinkCandidate) ||
731         !Seen.insert(SinkCandidate).second ||
732         VPDT.properlyDominates(Previous, SinkCandidate))
733       return true;
734 
735     if (SinkCandidate->mayHaveSideEffects())
736       return false;
737 
738     WorkList.push_back(SinkCandidate);
739     return true;
740   };
741 
742   // Recursively sink users of FOR after Previous.
743   WorkList.push_back(FOR);
744   for (unsigned I = 0; I != WorkList.size(); ++I) {
745     VPRecipeBase *Current = WorkList[I];
746     assert(Current->getNumDefinedValues() == 1 &&
747            "only recipes with a single defined value expected");
748 
749     for (VPUser *User : Current->getVPSingleValue()->users()) {
750       if (!TryToPushSinkCandidate(cast<VPRecipeBase>(User)))
751         return false;
752     }
753   }
754 
755   // Keep recipes to sink ordered by dominance so earlier instructions are
756   // processed first.
757   sort(WorkList, [&VPDT](const VPRecipeBase *A, const VPRecipeBase *B) {
758     return VPDT.properlyDominates(A, B);
759   });
760 
761   for (VPRecipeBase *SinkCandidate : WorkList) {
762     if (SinkCandidate == FOR)
763       continue;
764 
765     SinkCandidate->moveAfter(Previous);
766     Previous = SinkCandidate;
767   }
768   return true;
769 }
770 
771 /// Try to hoist \p Previous and its operands before all users of \p FOR.
772 static bool hoistPreviousBeforeFORUsers(VPFirstOrderRecurrencePHIRecipe *FOR,
773                                         VPRecipeBase *Previous,
774                                         VPDominatorTree &VPDT) {
775   if (Previous->mayHaveSideEffects() || Previous->mayReadFromMemory())
776     return false;
777 
778   // Collect recipes that need hoisting.
779   SmallVector<VPRecipeBase *> HoistCandidates;
780   SmallPtrSet<VPRecipeBase *, 8> Visited;
781   VPRecipeBase *HoistPoint = nullptr;
782   // Find the closest hoist point by looking at all users of FOR and selecting
783   // the recipe dominating all other users.
784   for (VPUser *U : FOR->users()) {
785     auto *R = cast<VPRecipeBase>(U);
786     if (!HoistPoint || VPDT.properlyDominates(R, HoistPoint))
787       HoistPoint = R;
788   }
789   assert(all_of(FOR->users(),
790                 [&VPDT, HoistPoint](VPUser *U) {
791                   auto *R = cast<VPRecipeBase>(U);
792                   return HoistPoint == R ||
793                          VPDT.properlyDominates(HoistPoint, R);
794                 }) &&
795          "HoistPoint must dominate all users of FOR");
796 
797   auto NeedsHoisting = [HoistPoint, &VPDT,
798                         &Visited](VPValue *HoistCandidateV) -> VPRecipeBase * {
799     VPRecipeBase *HoistCandidate = HoistCandidateV->getDefiningRecipe();
800     if (!HoistCandidate)
801       return nullptr;
802     VPRegionBlock *EnclosingLoopRegion =
803         HoistCandidate->getParent()->getEnclosingLoopRegion();
804     assert((!HoistCandidate->getParent()->getParent() ||
805             HoistCandidate->getParent()->getParent() == EnclosingLoopRegion) &&
806            "CFG in VPlan should still be flat, without replicate regions");
807     // Hoist candidate was already visited, no need to hoist.
808     if (!Visited.insert(HoistCandidate).second)
809       return nullptr;
810 
811     // Candidate is outside loop region or a header phi, dominates FOR users w/o
812     // hoisting.
813     if (!EnclosingLoopRegion || isa<VPHeaderPHIRecipe>(HoistCandidate))
814       return nullptr;
815 
816     // If we reached a recipe that dominates HoistPoint, we don't need to
817     // hoist the recipe.
818     if (VPDT.properlyDominates(HoistCandidate, HoistPoint))
819       return nullptr;
820     return HoistCandidate;
821   };
822   auto CanHoist = [&](VPRecipeBase *HoistCandidate) {
823     // Avoid hoisting candidates with side-effects, as we do not yet analyze
824     // associated dependencies.
825     return !HoistCandidate->mayHaveSideEffects();
826   };
827 
828   if (!NeedsHoisting(Previous->getVPSingleValue()))
829     return true;
830 
831   // Recursively try to hoist Previous and its operands before all users of FOR.
832   HoistCandidates.push_back(Previous);
833 
834   for (unsigned I = 0; I != HoistCandidates.size(); ++I) {
835     VPRecipeBase *Current = HoistCandidates[I];
836     assert(Current->getNumDefinedValues() == 1 &&
837            "only recipes with a single defined value expected");
838     if (!CanHoist(Current))
839       return false;
840 
841     for (VPValue *Op : Current->operands()) {
842       // If we reach FOR, it means the original Previous depends on some other
843       // recurrence that in turn depends on FOR. If that is the case, we would
844       // also need to hoist recipes involving the other FOR, which may break
845       // dependencies.
846       if (Op == FOR)
847         return false;
848 
849       if (auto *R = NeedsHoisting(Op))
850         HoistCandidates.push_back(R);
851     }
852   }
853 
854   // Order recipes to hoist by dominance so earlier instructions are processed
855   // first.
856   sort(HoistCandidates, [&VPDT](const VPRecipeBase *A, const VPRecipeBase *B) {
857     return VPDT.properlyDominates(A, B);
858   });
859 
860   for (VPRecipeBase *HoistCandidate : HoistCandidates) {
861     HoistCandidate->moveBefore(*HoistPoint->getParent(),
862                                HoistPoint->getIterator());
863   }
864 
865   return true;
866 }
867 
868 bool VPlanTransforms::adjustFixedOrderRecurrences(VPlan &Plan,
869                                                   VPBuilder &LoopBuilder) {
870   VPDominatorTree VPDT;
871   VPDT.recalculate(Plan);
872 
873   SmallVector<VPFirstOrderRecurrencePHIRecipe *> RecurrencePhis;
874   for (VPRecipeBase &R :
875        Plan.getVectorLoopRegion()->getEntry()->getEntryBasicBlock()->phis())
876     if (auto *FOR = dyn_cast<VPFirstOrderRecurrencePHIRecipe>(&R))
877       RecurrencePhis.push_back(FOR);
878 
879   for (VPFirstOrderRecurrencePHIRecipe *FOR : RecurrencePhis) {
880     SmallPtrSet<VPFirstOrderRecurrencePHIRecipe *, 4> SeenPhis;
881     VPRecipeBase *Previous = FOR->getBackedgeValue()->getDefiningRecipe();
882     // Fixed-order recurrences do not contain cycles, so this loop is guaranteed
883     // to terminate.
884     while (auto *PrevPhi =
885                dyn_cast_or_null<VPFirstOrderRecurrencePHIRecipe>(Previous)) {
886       assert(PrevPhi->getParent() == FOR->getParent());
887       assert(SeenPhis.insert(PrevPhi).second);
888       Previous = PrevPhi->getBackedgeValue()->getDefiningRecipe();
889     }
890 
891     if (!sinkRecurrenceUsersAfterPrevious(FOR, Previous, VPDT) &&
892         !hoistPreviousBeforeFORUsers(FOR, Previous, VPDT))
893       return false;
894 
895     // Introduce a recipe to combine the incoming and previous values of a
896     // fixed-order recurrence.
897     VPBasicBlock *InsertBlock = Previous->getParent();
898     if (isa<VPHeaderPHIRecipe>(Previous))
899       LoopBuilder.setInsertPoint(InsertBlock, InsertBlock->getFirstNonPhi());
900     else
901       LoopBuilder.setInsertPoint(InsertBlock,
902                                  std::next(Previous->getIterator()));
903 
904     auto *RecurSplice = cast<VPInstruction>(
905         LoopBuilder.createNaryOp(VPInstruction::FirstOrderRecurrenceSplice,
906                                  {FOR, FOR->getBackedgeValue()}));
907 
908     FOR->replaceAllUsesWith(RecurSplice);
909     // Set the first operand of RecurSplice to FOR again, after replacing
910     // all users.
911     RecurSplice->setOperand(0, FOR);
912   }
913   return true;
914 }
915 
916 static SmallVector<VPUser *> collectUsersRecursively(VPValue *V) {
917   SetVector<VPUser *> Users(V->user_begin(), V->user_end());
918   for (unsigned I = 0; I != Users.size(); ++I) {
919     VPRecipeBase *Cur = cast<VPRecipeBase>(Users[I]);
920     if (isa<VPHeaderPHIRecipe>(Cur))
921       continue;
922     for (VPValue *V : Cur->definedValues())
923       Users.insert(V->user_begin(), V->user_end());
924   }
925   return Users.takeVector();
926 }
927 
928 void VPlanTransforms::clearReductionWrapFlags(VPlan &Plan) {
929   for (VPRecipeBase &R :
930        Plan.getVectorLoopRegion()->getEntryBasicBlock()->phis()) {
931     auto *PhiR = dyn_cast<VPReductionPHIRecipe>(&R);
932     if (!PhiR)
933       continue;
934     const RecurrenceDescriptor &RdxDesc = PhiR->getRecurrenceDescriptor();
935     RecurKind RK = RdxDesc.getRecurrenceKind();
936     if (RK != RecurKind::Add && RK != RecurKind::Mul)
937       continue;
938 
939     for (VPUser *U : collectUsersRecursively(PhiR))
940       if (auto *RecWithFlags = dyn_cast<VPRecipeWithIRFlags>(U)) {
941         RecWithFlags->dropPoisonGeneratingFlags();
942       }
943   }
944 }
945 
946 /// Try to simplify recipe \p R.
947 static void simplifyRecipe(VPRecipeBase &R, VPTypeAnalysis &TypeInfo) {
948   using namespace llvm::VPlanPatternMatch;
949 
950   if (auto *Blend = dyn_cast<VPBlendRecipe>(&R)) {
951     // Try to remove redundant blend recipes.
952     SmallPtrSet<VPValue *, 4> UniqueValues;
953     if (Blend->isNormalized() || !match(Blend->getMask(0), m_False()))
954       UniqueValues.insert(Blend->getIncomingValue(0));
955     for (unsigned I = 1; I != Blend->getNumIncomingValues(); ++I)
956       if (!match(Blend->getMask(I), m_False()))
957         UniqueValues.insert(Blend->getIncomingValue(I));
958 
959     if (UniqueValues.size() == 1) {
960       Blend->replaceAllUsesWith(*UniqueValues.begin());
961       Blend->eraseFromParent();
962       return;
963     }
964 
965     if (Blend->isNormalized())
966       return;
967 
968     // Normalize the blend so its first incoming value is used as the initial
969     // value with the others blended into it.
970 
971     unsigned StartIndex = 0;
972     for (unsigned I = 0; I != Blend->getNumIncomingValues(); ++I) {
973       // If a value's mask is used only by the blend then is can be deadcoded.
974       // TODO: Find the most expensive mask that can be deadcoded, or a mask
975       // that's used by multiple blends where it can be removed from them all.
976       VPValue *Mask = Blend->getMask(I);
977       if (Mask->getNumUsers() == 1 && !match(Mask, m_False())) {
978         StartIndex = I;
979         break;
980       }
981     }
982 
983     SmallVector<VPValue *, 4> OperandsWithMask;
984     OperandsWithMask.push_back(Blend->getIncomingValue(StartIndex));
985 
986     for (unsigned I = 0; I != Blend->getNumIncomingValues(); ++I) {
987       if (I == StartIndex)
988         continue;
989       OperandsWithMask.push_back(Blend->getIncomingValue(I));
990       OperandsWithMask.push_back(Blend->getMask(I));
991     }
992 
993     auto *NewBlend = new VPBlendRecipe(
994         cast<PHINode>(Blend->getUnderlyingValue()), OperandsWithMask);
995     NewBlend->insertBefore(&R);
996 
997     VPValue *DeadMask = Blend->getMask(StartIndex);
998     Blend->replaceAllUsesWith(NewBlend);
999     Blend->eraseFromParent();
1000     recursivelyDeleteDeadRecipes(DeadMask);
1001     return;
1002   }
1003 
1004   VPValue *A;
1005   if (match(&R, m_Trunc(m_ZExtOrSExt(m_VPValue(A))))) {
1006     VPValue *Trunc = R.getVPSingleValue();
1007     Type *TruncTy = TypeInfo.inferScalarType(Trunc);
1008     Type *ATy = TypeInfo.inferScalarType(A);
1009     if (TruncTy == ATy) {
1010       Trunc->replaceAllUsesWith(A);
1011     } else {
1012       // Don't replace a scalarizing recipe with a widened cast.
1013       if (isa<VPReplicateRecipe>(&R))
1014         return;
1015       if (ATy->getScalarSizeInBits() < TruncTy->getScalarSizeInBits()) {
1016 
1017         unsigned ExtOpcode = match(R.getOperand(0), m_SExt(m_VPValue()))
1018                                  ? Instruction::SExt
1019                                  : Instruction::ZExt;
1020         auto *VPC =
1021             new VPWidenCastRecipe(Instruction::CastOps(ExtOpcode), A, TruncTy);
1022         if (auto *UnderlyingExt = R.getOperand(0)->getUnderlyingValue()) {
1023           // UnderlyingExt has distinct return type, used to retain legacy cost.
1024           VPC->setUnderlyingValue(UnderlyingExt);
1025         }
1026         VPC->insertBefore(&R);
1027         Trunc->replaceAllUsesWith(VPC);
1028       } else if (ATy->getScalarSizeInBits() > TruncTy->getScalarSizeInBits()) {
1029         auto *VPC = new VPWidenCastRecipe(Instruction::Trunc, A, TruncTy);
1030         VPC->insertBefore(&R);
1031         Trunc->replaceAllUsesWith(VPC);
1032       }
1033     }
1034 #ifndef NDEBUG
1035     // Verify that the cached type info is for both A and its users is still
1036     // accurate by comparing it to freshly computed types.
1037     VPTypeAnalysis TypeInfo2(
1038         R.getParent()->getPlan()->getCanonicalIV()->getScalarType());
1039     assert(TypeInfo.inferScalarType(A) == TypeInfo2.inferScalarType(A));
1040     for (VPUser *U : A->users()) {
1041       auto *R = cast<VPRecipeBase>(U);
1042       for (VPValue *VPV : R->definedValues())
1043         assert(TypeInfo.inferScalarType(VPV) == TypeInfo2.inferScalarType(VPV));
1044     }
1045 #endif
1046   }
1047 
1048   // Simplify (X && Y) || (X && !Y) -> X.
1049   // TODO: Split up into simpler, modular combines: (X && Y) || (X && Z) into X
1050   // && (Y || Z) and (X || !X) into true. This requires queuing newly created
1051   // recipes to be visited during simplification.
1052   VPValue *X, *Y, *X1, *Y1;
1053   if (match(&R,
1054             m_c_BinaryOr(m_LogicalAnd(m_VPValue(X), m_VPValue(Y)),
1055                          m_LogicalAnd(m_VPValue(X1), m_Not(m_VPValue(Y1))))) &&
1056       X == X1 && Y == Y1) {
1057     R.getVPSingleValue()->replaceAllUsesWith(X);
1058     R.eraseFromParent();
1059     return;
1060   }
1061 
1062   if (match(&R, m_c_Mul(m_VPValue(A), m_SpecificInt(1))))
1063     return R.getVPSingleValue()->replaceAllUsesWith(A);
1064 
1065   if (match(&R, m_Not(m_Not(m_VPValue(A)))))
1066     return R.getVPSingleValue()->replaceAllUsesWith(A);
1067 }
1068 
1069 /// Move loop-invariant recipes out of the vector loop region in \p Plan.
1070 static void licm(VPlan &Plan) {
1071   VPBasicBlock *Preheader = Plan.getVectorPreheader();
1072 
1073   // Return true if we do not know how to (mechanically) hoist a given recipe
1074   // out of a loop region. Does not address legality concerns such as aliasing
1075   // or speculation safety.
1076   auto CannotHoistRecipe = [](VPRecipeBase &R) {
1077     // Allocas cannot be hoisted.
1078     auto *RepR = dyn_cast<VPReplicateRecipe>(&R);
1079     return RepR && RepR->getOpcode() == Instruction::Alloca;
1080   };
1081 
1082   // Hoist any loop invariant recipes from the vector loop region to the
1083   // preheader. Preform a shallow traversal of the vector loop region, to
1084   // exclude recipes in replicate regions.
1085   VPRegionBlock *LoopRegion = Plan.getVectorLoopRegion();
1086   for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(
1087            vp_depth_first_shallow(LoopRegion->getEntry()))) {
1088     for (VPRecipeBase &R : make_early_inc_range(*VPBB)) {
1089       if (CannotHoistRecipe(R))
1090         continue;
1091       // TODO: Relax checks in the future, e.g. we could also hoist reads, if
1092       // their memory location is not modified in the vector loop.
1093       if (R.mayHaveSideEffects() || R.mayReadFromMemory() || R.isPhi() ||
1094           any_of(R.operands(), [](VPValue *Op) {
1095             return !Op->isDefinedOutsideLoopRegions();
1096           }))
1097         continue;
1098       R.moveBefore(*Preheader, Preheader->end());
1099     }
1100   }
1101 }
1102 
1103 /// Try to simplify the recipes in \p Plan.
1104 static void simplifyRecipes(VPlan &Plan) {
1105   ReversePostOrderTraversal<VPBlockDeepTraversalWrapper<VPBlockBase *>> RPOT(
1106       Plan.getEntry());
1107   Type *CanonicalIVType = Plan.getCanonicalIV()->getScalarType();
1108   VPTypeAnalysis TypeInfo(CanonicalIVType);
1109   for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(RPOT)) {
1110     for (VPRecipeBase &R : make_early_inc_range(*VPBB)) {
1111       simplifyRecipe(R, TypeInfo);
1112     }
1113   }
1114 }
1115 
1116 void VPlanTransforms::truncateToMinimalBitwidths(
1117     VPlan &Plan, const MapVector<Instruction *, uint64_t> &MinBWs) {
1118 #ifndef NDEBUG
1119   // Count the processed recipes and cross check the count later with MinBWs
1120   // size, to make sure all entries in MinBWs have been handled.
1121   unsigned NumProcessedRecipes = 0;
1122 #endif
1123   // Keep track of created truncates, so they can be re-used. Note that we
1124   // cannot use RAUW after creating a new truncate, as this would could make
1125   // other uses have different types for their operands, making them invalidly
1126   // typed.
1127   DenseMap<VPValue *, VPWidenCastRecipe *> ProcessedTruncs;
1128   Type *CanonicalIVType = Plan.getCanonicalIV()->getScalarType();
1129   VPTypeAnalysis TypeInfo(CanonicalIVType);
1130   VPBasicBlock *PH = Plan.getVectorPreheader();
1131   for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(
1132            vp_depth_first_deep(Plan.getVectorLoopRegion()))) {
1133     for (VPRecipeBase &R : make_early_inc_range(*VPBB)) {
1134       if (!isa<VPWidenRecipe, VPWidenCastRecipe, VPReplicateRecipe,
1135                VPWidenSelectRecipe, VPWidenLoadRecipe>(&R))
1136         continue;
1137 
1138       VPValue *ResultVPV = R.getVPSingleValue();
1139       auto *UI = cast_or_null<Instruction>(ResultVPV->getUnderlyingValue());
1140       unsigned NewResSizeInBits = MinBWs.lookup(UI);
1141       if (!NewResSizeInBits)
1142         continue;
1143 
1144 #ifndef NDEBUG
1145       NumProcessedRecipes++;
1146 #endif
1147       // If the value wasn't vectorized, we must maintain the original scalar
1148       // type. Skip those here, after incrementing NumProcessedRecipes. Also
1149       // skip casts which do not need to be handled explicitly here, as
1150       // redundant casts will be removed during recipe simplification.
1151       if (isa<VPReplicateRecipe, VPWidenCastRecipe>(&R)) {
1152 #ifndef NDEBUG
1153         // If any of the operands is a live-in and not used by VPWidenRecipe or
1154         // VPWidenSelectRecipe, but in MinBWs, make sure it is counted as
1155         // processed as well. When MinBWs is currently constructed, there is no
1156         // information about whether recipes are widened or replicated and in
1157         // case they are reciplicated the operands are not truncated. Counting
1158         // them them here ensures we do not miss any recipes in MinBWs.
1159         // TODO: Remove once the analysis is done on VPlan.
1160         for (VPValue *Op : R.operands()) {
1161           if (!Op->isLiveIn())
1162             continue;
1163           auto *UV = dyn_cast_or_null<Instruction>(Op->getUnderlyingValue());
1164           if (UV && MinBWs.contains(UV) && !ProcessedTruncs.contains(Op) &&
1165               none_of(Op->users(),
1166                       IsaPred<VPWidenRecipe, VPWidenSelectRecipe>)) {
1167             // Add an entry to ProcessedTruncs to avoid counting the same
1168             // operand multiple times.
1169             ProcessedTruncs[Op] = nullptr;
1170             NumProcessedRecipes += 1;
1171           }
1172         }
1173 #endif
1174         continue;
1175       }
1176 
1177       Type *OldResTy = TypeInfo.inferScalarType(ResultVPV);
1178       unsigned OldResSizeInBits = OldResTy->getScalarSizeInBits();
1179       assert(OldResTy->isIntegerTy() && "only integer types supported");
1180       (void)OldResSizeInBits;
1181 
1182       LLVMContext &Ctx = CanonicalIVType->getContext();
1183       auto *NewResTy = IntegerType::get(Ctx, NewResSizeInBits);
1184 
1185       // Any wrapping introduced by shrinking this operation shouldn't be
1186       // considered undefined behavior. So, we can't unconditionally copy
1187       // arithmetic wrapping flags to VPW.
1188       if (auto *VPW = dyn_cast<VPRecipeWithIRFlags>(&R))
1189         VPW->dropPoisonGeneratingFlags();
1190 
1191       using namespace llvm::VPlanPatternMatch;
1192       if (OldResSizeInBits != NewResSizeInBits &&
1193           !match(&R, m_Binary<Instruction::ICmp>(m_VPValue(), m_VPValue()))) {
1194         // Extend result to original width.
1195         auto *Ext =
1196             new VPWidenCastRecipe(Instruction::ZExt, ResultVPV, OldResTy);
1197         Ext->insertAfter(&R);
1198         ResultVPV->replaceAllUsesWith(Ext);
1199         Ext->setOperand(0, ResultVPV);
1200         assert(OldResSizeInBits > NewResSizeInBits && "Nothing to shrink?");
1201       } else {
1202         assert(
1203             match(&R, m_Binary<Instruction::ICmp>(m_VPValue(), m_VPValue())) &&
1204             "Only ICmps should not need extending the result.");
1205       }
1206 
1207       assert(!isa<VPWidenStoreRecipe>(&R) && "stores cannot be narrowed");
1208       if (isa<VPWidenLoadRecipe>(&R))
1209         continue;
1210 
1211       // Shrink operands by introducing truncates as needed.
1212       unsigned StartIdx = isa<VPWidenSelectRecipe>(&R) ? 1 : 0;
1213       for (unsigned Idx = StartIdx; Idx != R.getNumOperands(); ++Idx) {
1214         auto *Op = R.getOperand(Idx);
1215         unsigned OpSizeInBits =
1216             TypeInfo.inferScalarType(Op)->getScalarSizeInBits();
1217         if (OpSizeInBits == NewResSizeInBits)
1218           continue;
1219         assert(OpSizeInBits > NewResSizeInBits && "nothing to truncate");
1220         auto [ProcessedIter, IterIsEmpty] =
1221             ProcessedTruncs.insert({Op, nullptr});
1222         VPWidenCastRecipe *NewOp =
1223             IterIsEmpty
1224                 ? new VPWidenCastRecipe(Instruction::Trunc, Op, NewResTy)
1225                 : ProcessedIter->second;
1226         R.setOperand(Idx, NewOp);
1227         if (!IterIsEmpty)
1228           continue;
1229         ProcessedIter->second = NewOp;
1230         if (!Op->isLiveIn()) {
1231           NewOp->insertBefore(&R);
1232         } else {
1233           PH->appendRecipe(NewOp);
1234 #ifndef NDEBUG
1235           auto *OpInst = dyn_cast<Instruction>(Op->getLiveInIRValue());
1236           bool IsContained = MinBWs.contains(OpInst);
1237           NumProcessedRecipes += IsContained;
1238 #endif
1239         }
1240       }
1241 
1242     }
1243   }
1244 
1245   assert(MinBWs.size() == NumProcessedRecipes &&
1246          "some entries in MinBWs haven't been processed");
1247 }
1248 
1249 void VPlanTransforms::optimize(VPlan &Plan) {
1250   removeRedundantCanonicalIVs(Plan);
1251   removeRedundantInductionCasts(Plan);
1252 
1253   simplifyRecipes(Plan);
1254   legalizeAndOptimizeInductions(Plan);
1255   removeDeadRecipes(Plan);
1256 
1257   createAndOptimizeReplicateRegions(Plan);
1258 
1259   removeRedundantExpandSCEVRecipes(Plan);
1260   mergeBlocksIntoPredecessors(Plan);
1261   licm(Plan);
1262 }
1263 
1264 // Add a VPActiveLaneMaskPHIRecipe and related recipes to \p Plan and replace
1265 // the loop terminator with a branch-on-cond recipe with the negated
1266 // active-lane-mask as operand. Note that this turns the loop into an
1267 // uncountable one. Only the existing terminator is replaced, all other existing
1268 // recipes/users remain unchanged, except for poison-generating flags being
1269 // dropped from the canonical IV increment. Return the created
1270 // VPActiveLaneMaskPHIRecipe.
1271 //
1272 // The function uses the following definitions:
1273 //
1274 //  %TripCount = DataWithControlFlowWithoutRuntimeCheck ?
1275 //    calculate-trip-count-minus-VF (original TC) : original TC
1276 //  %IncrementValue = DataWithControlFlowWithoutRuntimeCheck ?
1277 //     CanonicalIVPhi : CanonicalIVIncrement
1278 //  %StartV is the canonical induction start value.
1279 //
1280 // The function adds the following recipes:
1281 //
1282 // vector.ph:
1283 //   %TripCount = calculate-trip-count-minus-VF (original TC)
1284 //       [if DataWithControlFlowWithoutRuntimeCheck]
1285 //   %EntryInc = canonical-iv-increment-for-part %StartV
1286 //   %EntryALM = active-lane-mask %EntryInc, %TripCount
1287 //
1288 // vector.body:
1289 //   ...
1290 //   %P = active-lane-mask-phi [ %EntryALM, %vector.ph ], [ %ALM, %vector.body ]
1291 //   ...
1292 //   %InLoopInc = canonical-iv-increment-for-part %IncrementValue
1293 //   %ALM = active-lane-mask %InLoopInc, TripCount
1294 //   %Negated = Not %ALM
1295 //   branch-on-cond %Negated
1296 //
1297 static VPActiveLaneMaskPHIRecipe *addVPLaneMaskPhiAndUpdateExitBranch(
1298     VPlan &Plan, bool DataAndControlFlowWithoutRuntimeCheck) {
1299   VPRegionBlock *TopRegion = Plan.getVectorLoopRegion();
1300   VPBasicBlock *EB = TopRegion->getExitingBasicBlock();
1301   auto *CanonicalIVPHI = Plan.getCanonicalIV();
1302   VPValue *StartV = CanonicalIVPHI->getStartValue();
1303 
1304   auto *CanonicalIVIncrement =
1305       cast<VPInstruction>(CanonicalIVPHI->getBackedgeValue());
1306   // TODO: Check if dropping the flags is needed if
1307   // !DataAndControlFlowWithoutRuntimeCheck.
1308   CanonicalIVIncrement->dropPoisonGeneratingFlags();
1309   DebugLoc DL = CanonicalIVIncrement->getDebugLoc();
1310   // We can't use StartV directly in the ActiveLaneMask VPInstruction, since
1311   // we have to take unrolling into account. Each part needs to start at
1312   //   Part * VF
1313   auto *VecPreheader = Plan.getVectorPreheader();
1314   VPBuilder Builder(VecPreheader);
1315 
1316   // Create the ActiveLaneMask instruction using the correct start values.
1317   VPValue *TC = Plan.getTripCount();
1318 
1319   VPValue *TripCount, *IncrementValue;
1320   if (!DataAndControlFlowWithoutRuntimeCheck) {
1321     // When the loop is guarded by a runtime overflow check for the loop
1322     // induction variable increment by VF, we can increment the value before
1323     // the get.active.lane mask and use the unmodified tripcount.
1324     IncrementValue = CanonicalIVIncrement;
1325     TripCount = TC;
1326   } else {
1327     // When avoiding a runtime check, the active.lane.mask inside the loop
1328     // uses a modified trip count and the induction variable increment is
1329     // done after the active.lane.mask intrinsic is called.
1330     IncrementValue = CanonicalIVPHI;
1331     TripCount = Builder.createNaryOp(VPInstruction::CalculateTripCountMinusVF,
1332                                      {TC}, DL);
1333   }
1334   auto *EntryIncrement = Builder.createOverflowingOp(
1335       VPInstruction::CanonicalIVIncrementForPart, {StartV}, {false, false}, DL,
1336       "index.part.next");
1337 
1338   // Create the active lane mask instruction in the VPlan preheader.
1339   auto *EntryALM =
1340       Builder.createNaryOp(VPInstruction::ActiveLaneMask, {EntryIncrement, TC},
1341                            DL, "active.lane.mask.entry");
1342 
1343   // Now create the ActiveLaneMaskPhi recipe in the main loop using the
1344   // preheader ActiveLaneMask instruction.
1345   auto *LaneMaskPhi = new VPActiveLaneMaskPHIRecipe(EntryALM, DebugLoc());
1346   LaneMaskPhi->insertAfter(CanonicalIVPHI);
1347 
1348   // Create the active lane mask for the next iteration of the loop before the
1349   // original terminator.
1350   VPRecipeBase *OriginalTerminator = EB->getTerminator();
1351   Builder.setInsertPoint(OriginalTerminator);
1352   auto *InLoopIncrement =
1353       Builder.createOverflowingOp(VPInstruction::CanonicalIVIncrementForPart,
1354                                   {IncrementValue}, {false, false}, DL);
1355   auto *ALM = Builder.createNaryOp(VPInstruction::ActiveLaneMask,
1356                                    {InLoopIncrement, TripCount}, DL,
1357                                    "active.lane.mask.next");
1358   LaneMaskPhi->addOperand(ALM);
1359 
1360   // Replace the original terminator with BranchOnCond. We have to invert the
1361   // mask here because a true condition means jumping to the exit block.
1362   auto *NotMask = Builder.createNot(ALM, DL);
1363   Builder.createNaryOp(VPInstruction::BranchOnCond, {NotMask}, DL);
1364   OriginalTerminator->eraseFromParent();
1365   return LaneMaskPhi;
1366 }
1367 
1368 /// Collect all VPValues representing a header mask through the (ICMP_ULE,
1369 /// WideCanonicalIV, backedge-taken-count) pattern.
1370 /// TODO: Introduce explicit recipe for header-mask instead of searching
1371 /// for the header-mask pattern manually.
1372 static SmallVector<VPValue *> collectAllHeaderMasks(VPlan &Plan) {
1373   SmallVector<VPValue *> WideCanonicalIVs;
1374   auto *FoundWidenCanonicalIVUser =
1375       find_if(Plan.getCanonicalIV()->users(),
1376               [](VPUser *U) { return isa<VPWidenCanonicalIVRecipe>(U); });
1377   assert(count_if(Plan.getCanonicalIV()->users(),
1378                   [](VPUser *U) { return isa<VPWidenCanonicalIVRecipe>(U); }) <=
1379              1 &&
1380          "Must have at most one VPWideCanonicalIVRecipe");
1381   if (FoundWidenCanonicalIVUser != Plan.getCanonicalIV()->users().end()) {
1382     auto *WideCanonicalIV =
1383         cast<VPWidenCanonicalIVRecipe>(*FoundWidenCanonicalIVUser);
1384     WideCanonicalIVs.push_back(WideCanonicalIV);
1385   }
1386 
1387   // Also include VPWidenIntOrFpInductionRecipes that represent a widened
1388   // version of the canonical induction.
1389   VPBasicBlock *HeaderVPBB = Plan.getVectorLoopRegion()->getEntryBasicBlock();
1390   for (VPRecipeBase &Phi : HeaderVPBB->phis()) {
1391     auto *WidenOriginalIV = dyn_cast<VPWidenIntOrFpInductionRecipe>(&Phi);
1392     if (WidenOriginalIV && WidenOriginalIV->isCanonical())
1393       WideCanonicalIVs.push_back(WidenOriginalIV);
1394   }
1395 
1396   // Walk users of wide canonical IVs and collect to all compares of the form
1397   // (ICMP_ULE, WideCanonicalIV, backedge-taken-count).
1398   SmallVector<VPValue *> HeaderMasks;
1399   for (auto *Wide : WideCanonicalIVs) {
1400     for (VPUser *U : SmallVector<VPUser *>(Wide->users())) {
1401       auto *HeaderMask = dyn_cast<VPInstruction>(U);
1402       if (!HeaderMask || !vputils::isHeaderMask(HeaderMask, Plan))
1403         continue;
1404 
1405       assert(HeaderMask->getOperand(0) == Wide &&
1406              "WidenCanonicalIV must be the first operand of the compare");
1407       HeaderMasks.push_back(HeaderMask);
1408     }
1409   }
1410   return HeaderMasks;
1411 }
1412 
1413 void VPlanTransforms::addActiveLaneMask(
1414     VPlan &Plan, bool UseActiveLaneMaskForControlFlow,
1415     bool DataAndControlFlowWithoutRuntimeCheck) {
1416   assert((!DataAndControlFlowWithoutRuntimeCheck ||
1417           UseActiveLaneMaskForControlFlow) &&
1418          "DataAndControlFlowWithoutRuntimeCheck implies "
1419          "UseActiveLaneMaskForControlFlow");
1420 
1421   auto *FoundWidenCanonicalIVUser =
1422       find_if(Plan.getCanonicalIV()->users(),
1423               [](VPUser *U) { return isa<VPWidenCanonicalIVRecipe>(U); });
1424   assert(FoundWidenCanonicalIVUser &&
1425          "Must have widened canonical IV when tail folding!");
1426   auto *WideCanonicalIV =
1427       cast<VPWidenCanonicalIVRecipe>(*FoundWidenCanonicalIVUser);
1428   VPSingleDefRecipe *LaneMask;
1429   if (UseActiveLaneMaskForControlFlow) {
1430     LaneMask = addVPLaneMaskPhiAndUpdateExitBranch(
1431         Plan, DataAndControlFlowWithoutRuntimeCheck);
1432   } else {
1433     VPBuilder B = VPBuilder::getToInsertAfter(WideCanonicalIV);
1434     LaneMask = B.createNaryOp(VPInstruction::ActiveLaneMask,
1435                               {WideCanonicalIV, Plan.getTripCount()}, nullptr,
1436                               "active.lane.mask");
1437   }
1438 
1439   // Walk users of WideCanonicalIV and replace all compares of the form
1440   // (ICMP_ULE, WideCanonicalIV, backedge-taken-count) with an
1441   // active-lane-mask.
1442   for (VPValue *HeaderMask : collectAllHeaderMasks(Plan))
1443     HeaderMask->replaceAllUsesWith(LaneMask);
1444 }
1445 
1446 /// Replace recipes with their EVL variants.
1447 static void transformRecipestoEVLRecipes(VPlan &Plan, VPValue &EVL) {
1448   using namespace llvm::VPlanPatternMatch;
1449   Type *CanonicalIVType = Plan.getCanonicalIV()->getScalarType();
1450   VPTypeAnalysis TypeInfo(CanonicalIVType);
1451   LLVMContext &Ctx = CanonicalIVType->getContext();
1452   SmallVector<VPValue *> HeaderMasks = collectAllHeaderMasks(Plan);
1453 
1454   for (VPUser *U : Plan.getVF().users()) {
1455     if (auto *R = dyn_cast<VPReverseVectorPointerRecipe>(U))
1456       R->setOperand(1, &EVL);
1457   }
1458 
1459   SmallVector<VPRecipeBase *> ToErase;
1460 
1461   for (VPValue *HeaderMask : collectAllHeaderMasks(Plan)) {
1462     for (VPUser *U : collectUsersRecursively(HeaderMask)) {
1463       auto *CurRecipe = cast<VPRecipeBase>(U);
1464       auto GetNewMask = [&](VPValue *OrigMask) -> VPValue * {
1465         assert(OrigMask && "Unmasked recipe when folding tail");
1466         return HeaderMask == OrigMask ? nullptr : OrigMask;
1467       };
1468 
1469       VPRecipeBase *NewRecipe =
1470           TypeSwitch<VPRecipeBase *, VPRecipeBase *>(CurRecipe)
1471               .Case<VPWidenLoadRecipe>([&](VPWidenLoadRecipe *L) {
1472                 VPValue *NewMask = GetNewMask(L->getMask());
1473                 return new VPWidenLoadEVLRecipe(*L, EVL, NewMask);
1474               })
1475               .Case<VPWidenStoreRecipe>([&](VPWidenStoreRecipe *S) {
1476                 VPValue *NewMask = GetNewMask(S->getMask());
1477                 return new VPWidenStoreEVLRecipe(*S, EVL, NewMask);
1478               })
1479               .Case<VPWidenRecipe>([&](VPWidenRecipe *W) -> VPRecipeBase * {
1480                 unsigned Opcode = W->getOpcode();
1481                 if (!Instruction::isBinaryOp(Opcode) &&
1482                     !Instruction::isUnaryOp(Opcode))
1483                   return nullptr;
1484                 return new VPWidenEVLRecipe(*W, EVL);
1485               })
1486               .Case<VPReductionRecipe>([&](VPReductionRecipe *Red) {
1487                 VPValue *NewMask = GetNewMask(Red->getCondOp());
1488                 return new VPReductionEVLRecipe(*Red, EVL, NewMask);
1489               })
1490               .Case<VPWidenIntrinsicRecipe>(
1491                   [&](VPWidenIntrinsicRecipe *CInst) -> VPRecipeBase * {
1492                     auto *CI = cast<CallInst>(CInst->getUnderlyingInstr());
1493                     Intrinsic::ID VPID = VPIntrinsic::getForIntrinsic(
1494                         CI->getCalledFunction()->getIntrinsicID());
1495                     if (VPID == Intrinsic::not_intrinsic)
1496                       return nullptr;
1497 
1498                     SmallVector<VPValue *> Ops(CInst->operands());
1499                     assert(VPIntrinsic::getMaskParamPos(VPID) &&
1500                            VPIntrinsic::getVectorLengthParamPos(VPID) &&
1501                            "Expected VP intrinsic");
1502                     VPValue *Mask = Plan.getOrAddLiveIn(ConstantInt::getTrue(
1503                         IntegerType::getInt1Ty(CI->getContext())));
1504                     Ops.push_back(Mask);
1505                     Ops.push_back(&EVL);
1506                     return new VPWidenIntrinsicRecipe(
1507                         *CI, VPID, Ops, TypeInfo.inferScalarType(CInst),
1508                         CInst->getDebugLoc());
1509                   })
1510               .Case<VPWidenCastRecipe>(
1511                   [&](VPWidenCastRecipe *CastR) -> VPRecipeBase * {
1512                     Intrinsic::ID VPID =
1513                         VPIntrinsic::getForOpcode(CastR->getOpcode());
1514                     assert(VPID != Intrinsic::not_intrinsic &&
1515                            "Expected vp.casts Instrinsic");
1516 
1517                     SmallVector<VPValue *> Ops(CastR->operands());
1518                     assert(VPIntrinsic::getMaskParamPos(VPID) &&
1519                            VPIntrinsic::getVectorLengthParamPos(VPID) &&
1520                            "Expected VP intrinsic");
1521                     VPValue *Mask =
1522                         Plan.getOrAddLiveIn(ConstantInt::getTrue(Ctx));
1523                     Ops.push_back(Mask);
1524                     Ops.push_back(&EVL);
1525                     return new VPWidenIntrinsicRecipe(
1526                         VPID, Ops, TypeInfo.inferScalarType(CastR),
1527                         CastR->getDebugLoc());
1528                   })
1529               .Case<VPWidenSelectRecipe>([&](VPWidenSelectRecipe *Sel) {
1530                 SmallVector<VPValue *> Ops(Sel->operands());
1531                 Ops.push_back(&EVL);
1532                 return new VPWidenIntrinsicRecipe(Intrinsic::vp_select, Ops,
1533                                                   TypeInfo.inferScalarType(Sel),
1534                                                   Sel->getDebugLoc());
1535               })
1536               .Case<VPInstruction>([&](VPInstruction *VPI) -> VPRecipeBase * {
1537                 VPValue *LHS, *RHS;
1538                 // Transform select with a header mask condition
1539                 //   select(header_mask, LHS, RHS)
1540                 // into vector predication merge.
1541                 //   vp.merge(all-true, LHS, RHS, EVL)
1542                 if (!match(VPI, m_Select(m_Specific(HeaderMask), m_VPValue(LHS),
1543                                          m_VPValue(RHS))))
1544                   return nullptr;
1545                 // Use all true as the condition because this transformation is
1546                 // limited to selects whose condition is a header mask.
1547                 VPValue *AllTrue =
1548                     Plan.getOrAddLiveIn(ConstantInt::getTrue(Ctx));
1549                 return new VPWidenIntrinsicRecipe(
1550                     Intrinsic::vp_merge, {AllTrue, LHS, RHS, &EVL},
1551                     TypeInfo.inferScalarType(LHS), VPI->getDebugLoc());
1552               })
1553               .Default([&](VPRecipeBase *R) { return nullptr; });
1554 
1555       if (!NewRecipe)
1556         continue;
1557 
1558       [[maybe_unused]] unsigned NumDefVal = NewRecipe->getNumDefinedValues();
1559       assert(NumDefVal == CurRecipe->getNumDefinedValues() &&
1560              "New recipe must define the same number of values as the "
1561              "original.");
1562       assert(
1563           NumDefVal <= 1 &&
1564           "Only supports recipes with a single definition or without users.");
1565       NewRecipe->insertBefore(CurRecipe);
1566       if (isa<VPSingleDefRecipe, VPWidenLoadEVLRecipe>(NewRecipe)) {
1567         VPValue *CurVPV = CurRecipe->getVPSingleValue();
1568         CurVPV->replaceAllUsesWith(NewRecipe->getVPSingleValue());
1569       }
1570       // Defer erasing recipes till the end so that we don't invalidate the
1571       // VPTypeAnalysis cache.
1572       ToErase.push_back(CurRecipe);
1573     }
1574   }
1575 
1576   for (VPRecipeBase *R : reverse(ToErase)) {
1577     SmallVector<VPValue *> PossiblyDead(R->operands());
1578     R->eraseFromParent();
1579     for (VPValue *Op : PossiblyDead)
1580       recursivelyDeleteDeadRecipes(Op);
1581   }
1582 }
1583 
1584 /// Add a VPEVLBasedIVPHIRecipe and related recipes to \p Plan and
1585 /// replaces all uses except the canonical IV increment of
1586 /// VPCanonicalIVPHIRecipe with a VPEVLBasedIVPHIRecipe. VPCanonicalIVPHIRecipe
1587 /// is used only for loop iterations counting after this transformation.
1588 ///
1589 /// The function uses the following definitions:
1590 ///  %StartV is the canonical induction start value.
1591 ///
1592 /// The function adds the following recipes:
1593 ///
1594 /// vector.ph:
1595 /// ...
1596 ///
1597 /// vector.body:
1598 /// ...
1599 /// %EVLPhi = EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI [ %StartV, %vector.ph ],
1600 ///                                               [ %NextEVLIV, %vector.body ]
1601 /// %AVL = sub original TC, %EVLPhi
1602 /// %VPEVL = EXPLICIT-VECTOR-LENGTH %AVL
1603 /// ...
1604 /// %NextEVLIV = add IVSize (cast i32 %VPEVVL to IVSize), %EVLPhi
1605 /// ...
1606 ///
1607 /// If MaxSafeElements is provided, the function adds the following recipes:
1608 /// vector.ph:
1609 /// ...
1610 ///
1611 /// vector.body:
1612 /// ...
1613 /// %EVLPhi = EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI [ %StartV, %vector.ph ],
1614 ///                                               [ %NextEVLIV, %vector.body ]
1615 /// %AVL = sub original TC, %EVLPhi
1616 /// %cmp = cmp ult %AVL, MaxSafeElements
1617 /// %SAFE_AVL = select %cmp, %AVL, MaxSafeElements
1618 /// %VPEVL = EXPLICIT-VECTOR-LENGTH %SAFE_AVL
1619 /// ...
1620 /// %NextEVLIV = add IVSize (cast i32 %VPEVL to IVSize), %EVLPhi
1621 /// ...
1622 ///
1623 bool VPlanTransforms::tryAddExplicitVectorLength(
1624     VPlan &Plan, const std::optional<unsigned> &MaxSafeElements) {
1625   VPBasicBlock *Header = Plan.getVectorLoopRegion()->getEntryBasicBlock();
1626   // The transform updates all users of inductions to work based on EVL, instead
1627   // of the VF directly. At the moment, widened inductions cannot be updated, so
1628   // bail out if the plan contains any.
1629   bool ContainsWidenInductions = any_of(
1630       Header->phis(),
1631       IsaPred<VPWidenIntOrFpInductionRecipe, VPWidenPointerInductionRecipe>);
1632   if (ContainsWidenInductions)
1633     return false;
1634 
1635   auto *CanonicalIVPHI = Plan.getCanonicalIV();
1636   VPValue *StartV = CanonicalIVPHI->getStartValue();
1637 
1638   // Create the ExplicitVectorLengthPhi recipe in the main loop.
1639   auto *EVLPhi = new VPEVLBasedIVPHIRecipe(StartV, DebugLoc());
1640   EVLPhi->insertAfter(CanonicalIVPHI);
1641   VPBuilder Builder(Header, Header->getFirstNonPhi());
1642   // Compute original TC - IV as the AVL (application vector length).
1643   VPValue *AVL = Builder.createNaryOp(
1644       Instruction::Sub, {Plan.getTripCount(), EVLPhi}, DebugLoc(), "avl");
1645   if (MaxSafeElements) {
1646     // Support for MaxSafeDist for correct loop emission.
1647     VPValue *AVLSafe = Plan.getOrAddLiveIn(
1648         ConstantInt::get(CanonicalIVPHI->getScalarType(), *MaxSafeElements));
1649     VPValue *Cmp = Builder.createICmp(ICmpInst::ICMP_ULT, AVL, AVLSafe);
1650     AVL = Builder.createSelect(Cmp, AVL, AVLSafe, DebugLoc(), "safe_avl");
1651   }
1652   auto *VPEVL = Builder.createNaryOp(VPInstruction::ExplicitVectorLength, AVL,
1653                                      DebugLoc());
1654 
1655   auto *CanonicalIVIncrement =
1656       cast<VPInstruction>(CanonicalIVPHI->getBackedgeValue());
1657   VPSingleDefRecipe *OpVPEVL = VPEVL;
1658   if (unsigned IVSize = CanonicalIVPHI->getScalarType()->getScalarSizeInBits();
1659       IVSize != 32) {
1660     OpVPEVL = new VPScalarCastRecipe(IVSize < 32 ? Instruction::Trunc
1661                                                  : Instruction::ZExt,
1662                                      OpVPEVL, CanonicalIVPHI->getScalarType());
1663     OpVPEVL->insertBefore(CanonicalIVIncrement);
1664   }
1665   auto *NextEVLIV =
1666       new VPInstruction(Instruction::Add, {OpVPEVL, EVLPhi},
1667                         {CanonicalIVIncrement->hasNoUnsignedWrap(),
1668                          CanonicalIVIncrement->hasNoSignedWrap()},
1669                         CanonicalIVIncrement->getDebugLoc(), "index.evl.next");
1670   NextEVLIV->insertBefore(CanonicalIVIncrement);
1671   EVLPhi->addOperand(NextEVLIV);
1672 
1673   transformRecipestoEVLRecipes(Plan, *VPEVL);
1674 
1675   // Replace all uses of VPCanonicalIVPHIRecipe by
1676   // VPEVLBasedIVPHIRecipe except for the canonical IV increment.
1677   CanonicalIVPHI->replaceAllUsesWith(EVLPhi);
1678   CanonicalIVIncrement->setOperand(0, CanonicalIVPHI);
1679   // TODO: support unroll factor > 1.
1680   Plan.setUF(1);
1681   return true;
1682 }
1683 
1684 void VPlanTransforms::dropPoisonGeneratingRecipes(
1685     VPlan &Plan, function_ref<bool(BasicBlock *)> BlockNeedsPredication) {
1686   // Collect recipes in the backward slice of `Root` that may generate a poison
1687   // value that is used after vectorization.
1688   SmallPtrSet<VPRecipeBase *, 16> Visited;
1689   auto CollectPoisonGeneratingInstrsInBackwardSlice([&](VPRecipeBase *Root) {
1690     SmallVector<VPRecipeBase *, 16> Worklist;
1691     Worklist.push_back(Root);
1692 
1693     // Traverse the backward slice of Root through its use-def chain.
1694     while (!Worklist.empty()) {
1695       VPRecipeBase *CurRec = Worklist.pop_back_val();
1696 
1697       if (!Visited.insert(CurRec).second)
1698         continue;
1699 
1700       // Prune search if we find another recipe generating a widen memory
1701       // instruction. Widen memory instructions involved in address computation
1702       // will lead to gather/scatter instructions, which don't need to be
1703       // handled.
1704       if (isa<VPWidenMemoryRecipe, VPInterleaveRecipe, VPScalarIVStepsRecipe,
1705               VPHeaderPHIRecipe>(CurRec))
1706         continue;
1707 
1708       // This recipe contributes to the address computation of a widen
1709       // load/store. If the underlying instruction has poison-generating flags,
1710       // drop them directly.
1711       if (auto *RecWithFlags = dyn_cast<VPRecipeWithIRFlags>(CurRec)) {
1712         VPValue *A, *B;
1713         using namespace llvm::VPlanPatternMatch;
1714         // Dropping disjoint from an OR may yield incorrect results, as some
1715         // analysis may have converted it to an Add implicitly (e.g. SCEV used
1716         // for dependence analysis). Instead, replace it with an equivalent Add.
1717         // This is possible as all users of the disjoint OR only access lanes
1718         // where the operands are disjoint or poison otherwise.
1719         if (match(RecWithFlags, m_BinaryOr(m_VPValue(A), m_VPValue(B))) &&
1720             RecWithFlags->isDisjoint()) {
1721           VPBuilder Builder(RecWithFlags);
1722           VPInstruction *New = Builder.createOverflowingOp(
1723               Instruction::Add, {A, B}, {false, false},
1724               RecWithFlags->getDebugLoc());
1725           New->setUnderlyingValue(RecWithFlags->getUnderlyingValue());
1726           RecWithFlags->replaceAllUsesWith(New);
1727           RecWithFlags->eraseFromParent();
1728           CurRec = New;
1729         } else
1730           RecWithFlags->dropPoisonGeneratingFlags();
1731       } else {
1732         Instruction *Instr = dyn_cast_or_null<Instruction>(
1733             CurRec->getVPSingleValue()->getUnderlyingValue());
1734         (void)Instr;
1735         assert((!Instr || !Instr->hasPoisonGeneratingFlags()) &&
1736                "found instruction with poison generating flags not covered by "
1737                "VPRecipeWithIRFlags");
1738       }
1739 
1740       // Add new definitions to the worklist.
1741       for (VPValue *Operand : CurRec->operands())
1742         if (VPRecipeBase *OpDef = Operand->getDefiningRecipe())
1743           Worklist.push_back(OpDef);
1744     }
1745   });
1746 
1747   // Traverse all the recipes in the VPlan and collect the poison-generating
1748   // recipes in the backward slice starting at the address of a VPWidenRecipe or
1749   // VPInterleaveRecipe.
1750   auto Iter = vp_depth_first_deep(Plan.getEntry());
1751   for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(Iter)) {
1752     for (VPRecipeBase &Recipe : *VPBB) {
1753       if (auto *WidenRec = dyn_cast<VPWidenMemoryRecipe>(&Recipe)) {
1754         Instruction &UnderlyingInstr = WidenRec->getIngredient();
1755         VPRecipeBase *AddrDef = WidenRec->getAddr()->getDefiningRecipe();
1756         if (AddrDef && WidenRec->isConsecutive() &&
1757             BlockNeedsPredication(UnderlyingInstr.getParent()))
1758           CollectPoisonGeneratingInstrsInBackwardSlice(AddrDef);
1759       } else if (auto *InterleaveRec = dyn_cast<VPInterleaveRecipe>(&Recipe)) {
1760         VPRecipeBase *AddrDef = InterleaveRec->getAddr()->getDefiningRecipe();
1761         if (AddrDef) {
1762           // Check if any member of the interleave group needs predication.
1763           const InterleaveGroup<Instruction> *InterGroup =
1764               InterleaveRec->getInterleaveGroup();
1765           bool NeedPredication = false;
1766           for (int I = 0, NumMembers = InterGroup->getNumMembers();
1767                I < NumMembers; ++I) {
1768             Instruction *Member = InterGroup->getMember(I);
1769             if (Member)
1770               NeedPredication |= BlockNeedsPredication(Member->getParent());
1771           }
1772 
1773           if (NeedPredication)
1774             CollectPoisonGeneratingInstrsInBackwardSlice(AddrDef);
1775         }
1776       }
1777     }
1778   }
1779 }
1780 
1781 void VPlanTransforms::createInterleaveGroups(
1782     VPlan &Plan,
1783     const SmallPtrSetImpl<const InterleaveGroup<Instruction> *>
1784         &InterleaveGroups,
1785     VPRecipeBuilder &RecipeBuilder, bool ScalarEpilogueAllowed) {
1786   if (InterleaveGroups.empty())
1787     return;
1788 
1789   // Interleave memory: for each Interleave Group we marked earlier as relevant
1790   // for this VPlan, replace the Recipes widening its memory instructions with a
1791   // single VPInterleaveRecipe at its insertion point.
1792   VPDominatorTree VPDT;
1793   VPDT.recalculate(Plan);
1794   for (const auto *IG : InterleaveGroups) {
1795     SmallVector<VPValue *, 4> StoredValues;
1796     for (unsigned i = 0; i < IG->getFactor(); ++i)
1797       if (auto *SI = dyn_cast_or_null<StoreInst>(IG->getMember(i))) {
1798         auto *StoreR = cast<VPWidenStoreRecipe>(RecipeBuilder.getRecipe(SI));
1799         StoredValues.push_back(StoreR->getStoredValue());
1800       }
1801 
1802     bool NeedsMaskForGaps =
1803         IG->requiresScalarEpilogue() && !ScalarEpilogueAllowed;
1804 
1805     Instruction *IRInsertPos = IG->getInsertPos();
1806     auto *InsertPos =
1807         cast<VPWidenMemoryRecipe>(RecipeBuilder.getRecipe(IRInsertPos));
1808 
1809     // Get or create the start address for the interleave group.
1810     auto *Start =
1811         cast<VPWidenMemoryRecipe>(RecipeBuilder.getRecipe(IG->getMember(0)));
1812     VPValue *Addr = Start->getAddr();
1813     VPRecipeBase *AddrDef = Addr->getDefiningRecipe();
1814     if (AddrDef && !VPDT.properlyDominates(AddrDef, InsertPos)) {
1815       // TODO: Hoist Addr's defining recipe (and any operands as needed) to
1816       // InsertPos or sink loads above zero members to join it.
1817       bool InBounds = false;
1818       if (auto *Gep = dyn_cast<GetElementPtrInst>(
1819               getLoadStorePointerOperand(IRInsertPos)->stripPointerCasts()))
1820         InBounds = Gep->isInBounds();
1821 
1822       // We cannot re-use the address of member zero because it does not
1823       // dominate the insert position. Instead, use the address of the insert
1824       // position and create a PtrAdd adjusting it to the address of member
1825       // zero.
1826       assert(IG->getIndex(IRInsertPos) != 0 &&
1827              "index of insert position shouldn't be zero");
1828       auto &DL = IRInsertPos->getDataLayout();
1829       APInt Offset(32,
1830                    DL.getTypeAllocSize(getLoadStoreType(IRInsertPos)) *
1831                        IG->getIndex(IRInsertPos),
1832                    /*IsSigned=*/true);
1833       VPValue *OffsetVPV = Plan.getOrAddLiveIn(
1834           ConstantInt::get(IRInsertPos->getParent()->getContext(), -Offset));
1835       VPBuilder B(InsertPos);
1836       Addr = InBounds ? B.createInBoundsPtrAdd(InsertPos->getAddr(), OffsetVPV)
1837                       : B.createPtrAdd(InsertPos->getAddr(), OffsetVPV);
1838     }
1839     auto *VPIG = new VPInterleaveRecipe(IG, Addr, StoredValues,
1840                                         InsertPos->getMask(), NeedsMaskForGaps);
1841     VPIG->insertBefore(InsertPos);
1842 
1843     unsigned J = 0;
1844     for (unsigned i = 0; i < IG->getFactor(); ++i)
1845       if (Instruction *Member = IG->getMember(i)) {
1846         VPRecipeBase *MemberR = RecipeBuilder.getRecipe(Member);
1847         if (!Member->getType()->isVoidTy()) {
1848           VPValue *OriginalV = MemberR->getVPSingleValue();
1849           OriginalV->replaceAllUsesWith(VPIG->getVPValue(J));
1850           J++;
1851         }
1852         MemberR->eraseFromParent();
1853       }
1854   }
1855 }
1856 
1857 void VPlanTransforms::convertToConcreteRecipes(VPlan &Plan) {
1858   for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(
1859            vp_depth_first_deep(Plan.getEntry()))) {
1860     for (VPRecipeBase &R : make_early_inc_range(VPBB->phis())) {
1861       if (!isa<VPCanonicalIVPHIRecipe, VPEVLBasedIVPHIRecipe>(&R))
1862         continue;
1863       auto *PhiR = cast<VPHeaderPHIRecipe>(&R);
1864       StringRef Name =
1865           isa<VPCanonicalIVPHIRecipe>(PhiR) ? "index" : "evl.based.iv";
1866       auto *ScalarR =
1867           new VPScalarPHIRecipe(PhiR->getStartValue(), PhiR->getBackedgeValue(),
1868                                 PhiR->getDebugLoc(), Name);
1869       ScalarR->insertBefore(PhiR);
1870       PhiR->replaceAllUsesWith(ScalarR);
1871       PhiR->eraseFromParent();
1872     }
1873   }
1874 }
1875 
1876 void VPlanTransforms::handleUncountableEarlyExit(
1877     VPlan &Plan, ScalarEvolution &SE, Loop *OrigLoop,
1878     BasicBlock *UncountableExitingBlock, VPRecipeBuilder &RecipeBuilder) {
1879   VPRegionBlock *LoopRegion = Plan.getVectorLoopRegion();
1880   auto *LatchVPBB = cast<VPBasicBlock>(LoopRegion->getExiting());
1881   VPBuilder Builder(LatchVPBB->getTerminator());
1882   auto *MiddleVPBB = Plan.getMiddleBlock();
1883   VPValue *IsEarlyExitTaken = nullptr;
1884 
1885   // Process the uncountable exiting block. Update IsEarlyExitTaken, which
1886   // tracks if the uncountable early exit has been taken. Also split the middle
1887   // block and have it conditionally branch to the early exit block if
1888   // EarlyExitTaken.
1889   auto *EarlyExitingBranch =
1890       cast<BranchInst>(UncountableExitingBlock->getTerminator());
1891   BasicBlock *TrueSucc = EarlyExitingBranch->getSuccessor(0);
1892   BasicBlock *FalseSucc = EarlyExitingBranch->getSuccessor(1);
1893 
1894   // The early exit block may or may not be the same as the "countable" exit
1895   // block. Creates a new VPIRBB for the early exit block in case it is distinct
1896   // from the countable exit block.
1897   // TODO: Introduce both exit blocks during VPlan skeleton construction.
1898   VPIRBasicBlock *VPEarlyExitBlock;
1899   if (OrigLoop->getUniqueExitBlock()) {
1900     VPEarlyExitBlock = cast<VPIRBasicBlock>(MiddleVPBB->getSuccessors()[0]);
1901   } else {
1902     VPEarlyExitBlock = VPIRBasicBlock::fromBasicBlock(
1903         !OrigLoop->contains(TrueSucc) ? TrueSucc : FalseSucc);
1904   }
1905 
1906   VPValue *EarlyExitNotTakenCond = RecipeBuilder.getBlockInMask(
1907       OrigLoop->contains(TrueSucc) ? TrueSucc : FalseSucc);
1908   auto *EarlyExitTakenCond = Builder.createNot(EarlyExitNotTakenCond);
1909   IsEarlyExitTaken =
1910       Builder.createNaryOp(VPInstruction::AnyOf, {EarlyExitTakenCond});
1911 
1912   VPBasicBlock *NewMiddle = new VPBasicBlock("middle.split");
1913   VPBlockUtils::insertOnEdge(LoopRegion, MiddleVPBB, NewMiddle);
1914   VPBlockUtils::connectBlocks(NewMiddle, VPEarlyExitBlock);
1915   NewMiddle->swapSuccessors();
1916 
1917   VPBuilder MiddleBuilder(NewMiddle);
1918   MiddleBuilder.createNaryOp(VPInstruction::BranchOnCond, {IsEarlyExitTaken});
1919 
1920   // Replace the condition controlling the non-early exit from the vector loop
1921   // with one exiting if either the original condition of the vector latch is
1922   // true or the early exit has been taken.
1923   auto *LatchExitingBranch = cast<VPInstruction>(LatchVPBB->getTerminator());
1924   assert(LatchExitingBranch->getOpcode() == VPInstruction::BranchOnCount &&
1925          "Unexpected terminator");
1926   auto *IsLatchExitTaken =
1927       Builder.createICmp(CmpInst::ICMP_EQ, LatchExitingBranch->getOperand(0),
1928                          LatchExitingBranch->getOperand(1));
1929   auto *AnyExitTaken = Builder.createNaryOp(
1930       Instruction::Or, {IsEarlyExitTaken, IsLatchExitTaken});
1931   Builder.createNaryOp(VPInstruction::BranchOnCond, AnyExitTaken);
1932   LatchExitingBranch->eraseFromParent();
1933 }
1934