xref: /llvm-project/llvm/lib/Target/ARM/MVETailPredication.cpp (revision f39f92c1f610fcdfad74730a3e3df881e32a28c2)
1 //===- MVETailPredication.cpp - MVE Tail Predication ------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 /// \file
10 /// Armv8.1m introduced MVE, M-Profile Vector Extension, and low-overhead
11 /// branches to help accelerate DSP applications. These two extensions,
12 /// combined with a new form of predication called tail-predication, can be used
13 /// to provide implicit vector predication within a low-overhead loop.
14 /// This is implicit because the predicate of active/inactive lanes is
15 /// calculated by hardware, and thus does not need to be explicitly passed
16 /// to vector instructions. The instructions responsible for this are the
17 /// DLSTP and WLSTP instructions, which setup a tail-predicated loop and the
18 /// the total number of data elements processed by the loop. The loop-end
19 /// LETP instruction is responsible for decrementing and setting the remaining
20 /// elements to be processed and generating the mask of active lanes.
21 ///
22 /// The HardwareLoops pass inserts intrinsics identifying loops that the
23 /// backend will attempt to convert into a low-overhead loop. The vectorizer is
24 /// responsible for generating a vectorized loop in which the lanes are
25 /// predicated upon the iteration counter. This pass looks at these predicated
26 /// vector loops, that are targets for low-overhead loops, and prepares it for
27 /// code generation. Once the vectorizer has produced a masked loop, there's a
28 /// couple of final forms:
29 /// - A tail-predicated loop, with implicit predication.
30 /// - A loop containing multiple VCPT instructions, predicating multiple VPT
31 ///   blocks of instructions operating on different vector types.
32 ///
33 /// This pass:
34 /// 1) Checks if the predicates of the masked load/store instructions are
35 ///    generated by intrinsic @llvm.get.active.lanes(). This intrinsic consumes
36 ///    the the scalar loop tripcount as its second argument, which we extract
37 ///    to set up the number of elements processed by the loop.
38 /// 2) Intrinsic @llvm.get.active.lanes() is then replaced by the MVE target
39 ///    specific VCTP intrinsic to represent the effect of tail predication.
40 ///    This will be picked up by the ARM Low-overhead loop pass, which performs
41 ///    the final transformation to a DLSTP or WLSTP tail-predicated loop.
42 
43 #include "ARM.h"
44 #include "ARMSubtarget.h"
45 #include "ARMTargetTransformInfo.h"
46 #include "llvm/Analysis/LoopInfo.h"
47 #include "llvm/Analysis/LoopPass.h"
48 #include "llvm/Analysis/ScalarEvolution.h"
49 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
50 #include "llvm/Analysis/TargetLibraryInfo.h"
51 #include "llvm/Analysis/TargetTransformInfo.h"
52 #include "llvm/CodeGen/TargetPassConfig.h"
53 #include "llvm/IR/IRBuilder.h"
54 #include "llvm/IR/Instructions.h"
55 #include "llvm/IR/IntrinsicsARM.h"
56 #include "llvm/IR/PatternMatch.h"
57 #include "llvm/InitializePasses.h"
58 #include "llvm/Support/Debug.h"
59 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
60 #include "llvm/Transforms/Utils/LoopUtils.h"
61 #include "llvm/Transforms/Utils/ScalarEvolutionExpander.h"
62 
63 using namespace llvm;
64 
65 #define DEBUG_TYPE "mve-tail-predication"
66 #define DESC "Transform predicated vector loops to use MVE tail predication"
67 
68 cl::opt<TailPredication::Mode> EnableTailPredication(
69    "tail-predication", cl::desc("MVE tail-predication pass options"),
70    cl::init(TailPredication::Disabled),
71    cl::values(clEnumValN(TailPredication::Disabled, "disabled",
72                          "Don't tail-predicate loops"),
73               clEnumValN(TailPredication::EnabledNoReductions,
74                          "enabled-no-reductions",
75                          "Enable tail-predication, but not for reduction loops"),
76               clEnumValN(TailPredication::Enabled,
77                          "enabled",
78                          "Enable tail-predication, including reduction loops"),
79               clEnumValN(TailPredication::ForceEnabledNoReductions,
80                          "force-enabled-no-reductions",
81                          "Enable tail-predication, but not for reduction loops, "
82                          "and force this which might be unsafe"),
83               clEnumValN(TailPredication::ForceEnabled,
84                          "force-enabled",
85                          "Enable tail-predication, including reduction loops, "
86                          "and force this which might be unsafe")));
87 
88 
89 namespace {
90 
91 class MVETailPredication : public LoopPass {
92   SmallVector<IntrinsicInst*, 4> MaskedInsts;
93   Loop *L = nullptr;
94   ScalarEvolution *SE = nullptr;
95   TargetTransformInfo *TTI = nullptr;
96   const ARMSubtarget *ST = nullptr;
97 
98 public:
99   static char ID;
100 
101   MVETailPredication() : LoopPass(ID) { }
102 
103   void getAnalysisUsage(AnalysisUsage &AU) const override {
104     AU.addRequired<ScalarEvolutionWrapperPass>();
105     AU.addRequired<LoopInfoWrapperPass>();
106     AU.addRequired<TargetPassConfig>();
107     AU.addRequired<TargetTransformInfoWrapperPass>();
108     AU.addPreserved<LoopInfoWrapperPass>();
109     AU.setPreservesCFG();
110   }
111 
112   bool runOnLoop(Loop *L, LPPassManager&) override;
113 
114 private:
115   /// Perform the relevant checks on the loop and convert if possible.
116   bool TryConvert(Value *TripCount);
117 
118   /// Return whether this is a vectorized loop, that contains masked
119   /// load/stores.
120   bool IsPredicatedVectorLoop();
121 
122   /// Perform several checks on the arguments of @llvm.get.active.lane.mask
123   /// intrinsic. E.g., check that the loop induction variable and the element
124   /// count are of the form we expect, and also perform overflow checks for
125   /// the new expressions that are created.
126   bool IsSafeActiveMask(IntrinsicInst *ActiveLaneMask, Value *TripCount,
127                         FixedVectorType *VecTy);
128 
129   /// Insert the intrinsic to represent the effect of tail predication.
130   void InsertVCTPIntrinsic(IntrinsicInst *ActiveLaneMask, Value *TripCount,
131                            FixedVectorType *VecTy);
132 
133   /// Rematerialize the iteration count in exit blocks, which enables
134   /// ARMLowOverheadLoops to better optimise away loop update statements inside
135   /// hardware-loops.
136   void RematerializeIterCount();
137 };
138 
139 } // end namespace
140 
141 static bool IsDecrement(Instruction &I) {
142   auto *Call = dyn_cast<IntrinsicInst>(&I);
143   if (!Call)
144     return false;
145 
146   Intrinsic::ID ID = Call->getIntrinsicID();
147   return ID == Intrinsic::loop_decrement_reg;
148 }
149 
150 static bool IsMasked(Instruction *I) {
151   auto *Call = dyn_cast<IntrinsicInst>(I);
152   if (!Call)
153     return false;
154 
155   Intrinsic::ID ID = Call->getIntrinsicID();
156   return ID == Intrinsic::masked_store || ID == Intrinsic::masked_load ||
157          isGatherScatter(Call);
158 }
159 
160 bool MVETailPredication::runOnLoop(Loop *L, LPPassManager&) {
161   if (skipLoop(L) || !EnableTailPredication)
162     return false;
163 
164   MaskedInsts.clear();
165   Function &F = *L->getHeader()->getParent();
166   auto &TPC = getAnalysis<TargetPassConfig>();
167   auto &TM = TPC.getTM<TargetMachine>();
168   ST = &TM.getSubtarget<ARMSubtarget>(F);
169   TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
170   SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
171   this->L = L;
172 
173   // The MVE and LOB extensions are combined to enable tail-predication, but
174   // there's nothing preventing us from generating VCTP instructions for v8.1m.
175   if (!ST->hasMVEIntegerOps() || !ST->hasV8_1MMainlineOps()) {
176     LLVM_DEBUG(dbgs() << "ARM TP: Not a v8.1m.main+mve target.\n");
177     return false;
178   }
179 
180   BasicBlock *Preheader = L->getLoopPreheader();
181   if (!Preheader)
182     return false;
183 
184   auto FindLoopIterations = [](BasicBlock *BB) -> IntrinsicInst* {
185     for (auto &I : *BB) {
186       auto *Call = dyn_cast<IntrinsicInst>(&I);
187       if (!Call)
188         continue;
189 
190       Intrinsic::ID ID = Call->getIntrinsicID();
191       if (ID == Intrinsic::set_loop_iterations ||
192           ID == Intrinsic::test_set_loop_iterations)
193         return cast<IntrinsicInst>(&I);
194     }
195     return nullptr;
196   };
197 
198   // Look for the hardware loop intrinsic that sets the iteration count.
199   IntrinsicInst *Setup = FindLoopIterations(Preheader);
200 
201   // The test.set iteration could live in the pre-preheader.
202   if (!Setup) {
203     if (!Preheader->getSinglePredecessor())
204       return false;
205     Setup = FindLoopIterations(Preheader->getSinglePredecessor());
206     if (!Setup)
207       return false;
208   }
209 
210   // Search for the hardware loop intrinic that decrements the loop counter.
211   IntrinsicInst *Decrement = nullptr;
212   for (auto *BB : L->getBlocks()) {
213     for (auto &I : *BB) {
214       if (IsDecrement(I)) {
215         Decrement = cast<IntrinsicInst>(&I);
216         break;
217       }
218     }
219   }
220 
221   if (!Decrement)
222     return false;
223 
224   LLVM_DEBUG(dbgs() << "ARM TP: Running on Loop: " << *L << *Setup << "\n"
225              << *Decrement << "\n");
226 
227   if (!TryConvert(Setup->getArgOperand(0))) {
228     LLVM_DEBUG(dbgs() << "ARM TP: Can't tail-predicate this loop.\n");
229     return false;
230   }
231 
232   return true;
233 }
234 
235 static FixedVectorType *getVectorType(IntrinsicInst *I) {
236   unsigned ID = I->getIntrinsicID();
237   FixedVectorType *VecTy;
238   if (ID == Intrinsic::masked_load || isGather(I)) {
239     if (ID == Intrinsic::arm_mve_vldr_gather_base_wb ||
240         ID == Intrinsic::arm_mve_vldr_gather_base_wb_predicated)
241       // then the type is a StructType
242       VecTy = dyn_cast<FixedVectorType>(I->getType()->getContainedType(0));
243     else
244       VecTy = dyn_cast<FixedVectorType>(I->getType());
245   } else if (ID == Intrinsic::masked_store) {
246     VecTy = dyn_cast<FixedVectorType>(I->getOperand(0)->getType());
247   } else {
248     VecTy = dyn_cast<FixedVectorType>(I->getOperand(2)->getType());
249   }
250   assert(VecTy && "No scalable vectors expected here");
251   return VecTy;
252 }
253 
254 bool MVETailPredication::IsPredicatedVectorLoop() {
255   // Check that the loop contains at least one masked load/store intrinsic.
256   // We only support 'normal' vector instructions - other than masked
257   // load/stores.
258   bool ActiveLaneMask = false;
259   for (auto *BB : L->getBlocks()) {
260     for (auto &I : *BB) {
261       auto *Int = dyn_cast<IntrinsicInst>(&I);
262       if (!Int)
263         continue;
264 
265       switch (Int->getIntrinsicID()) {
266       case Intrinsic::get_active_lane_mask:
267         ActiveLaneMask = true;
268         continue;
269       case Intrinsic::sadd_sat:
270       case Intrinsic::uadd_sat:
271       case Intrinsic::ssub_sat:
272       case Intrinsic::usub_sat:
273       case Intrinsic::experimental_vector_reduce_add:
274         continue;
275       case Intrinsic::fma:
276       case Intrinsic::trunc:
277       case Intrinsic::rint:
278       case Intrinsic::round:
279       case Intrinsic::floor:
280       case Intrinsic::ceil:
281       case Intrinsic::fabs:
282         if (ST->hasMVEFloatOps())
283           continue;
284         break;
285       default:
286         break;
287       }
288       if (IsMasked(&I)) {
289         auto *VecTy = getVectorType(Int);
290         unsigned Lanes = VecTy->getNumElements();
291         unsigned ElementWidth = VecTy->getScalarSizeInBits();
292         // MVE vectors are 128-bit, but don't support 128 x i1.
293         // TODO: Can we support vectors larger than 128-bits?
294         unsigned MaxWidth = TTI->getRegisterBitWidth(true);
295         if (Lanes * ElementWidth > MaxWidth || Lanes == MaxWidth)
296           return false;
297         MaskedInsts.push_back(cast<IntrinsicInst>(&I));
298         continue;
299       }
300 
301       for (const Use &U : Int->args()) {
302         if (isa<VectorType>(U->getType()))
303           return false;
304       }
305     }
306   }
307 
308   if (!ActiveLaneMask) {
309     LLVM_DEBUG(dbgs() << "ARM TP: No get.active.lane.mask intrinsic found.\n");
310     return false;
311   }
312   return !MaskedInsts.empty();
313 }
314 
315 // Look through the exit block to see whether there's a duplicate predicate
316 // instruction. This can happen when we need to perform a select on values
317 // from the last and previous iteration. Instead of doing a straight
318 // replacement of that predicate with the vctp, clone the vctp and place it
319 // in the block. This means that the VPR doesn't have to be live into the
320 // exit block which should make it easier to convert this loop into a proper
321 // tail predicated loop.
322 static void Cleanup(SetVector<Instruction*> &MaybeDead, Loop *L) {
323   BasicBlock *Exit = L->getUniqueExitBlock();
324   if (!Exit) {
325     LLVM_DEBUG(dbgs() << "ARM TP: can't find loop exit block\n");
326     return;
327   }
328 
329   // Drop references and add operands to check for dead.
330   SmallPtrSet<Instruction*, 4> Dead;
331   while (!MaybeDead.empty()) {
332     auto *I = MaybeDead.front();
333     MaybeDead.remove(I);
334     if (I->hasNUsesOrMore(1))
335       continue;
336 
337     for (auto &U : I->operands())
338       if (auto *OpI = dyn_cast<Instruction>(U))
339         MaybeDead.insert(OpI);
340 
341     Dead.insert(I);
342   }
343 
344   for (auto *I : Dead) {
345     LLVM_DEBUG(dbgs() << "ARM TP: removing dead insn: "; I->dump());
346     I->eraseFromParent();
347   }
348 
349   for (auto I : L->blocks())
350     DeleteDeadPHIs(I);
351 }
352 
353 // The active lane intrinsic has this form:
354 //
355 //    @llvm.get.active.lane.mask(IV, TC)
356 //
357 // Here we perform checks that this intrinsic behaves as expected,
358 // which means:
359 //
360 // 1) Check that the TripCount (TC) belongs to this loop (originally).
361 // 2) The element count (TC) needs to be sufficiently large that the decrement
362 //    of element counter doesn't overflow, which means that we need to prove:
363 //        ceil(ElementCount / VectorWidth) >= TripCount
364 //    by rounding up ElementCount up:
365 //        ((ElementCount + (VectorWidth - 1)) / VectorWidth
366 //    and evaluate if expression isKnownNonNegative:
367 //        (((ElementCount + (VectorWidth - 1)) / VectorWidth) - TripCount
368 // 3) The IV must be an induction phi with an increment equal to the
369 //    vector width.
370 bool MVETailPredication::IsSafeActiveMask(IntrinsicInst *ActiveLaneMask,
371     Value *TripCount, FixedVectorType *VecTy) {
372   bool ForceTailPredication =
373     EnableTailPredication == TailPredication::ForceEnabledNoReductions ||
374     EnableTailPredication == TailPredication::ForceEnabled;
375 
376   Value *ElemCount = ActiveLaneMask->getOperand(1);
377   auto *EC= SE->getSCEV(ElemCount);
378   auto *TC = SE->getSCEV(TripCount);
379   int VectorWidth = VecTy->getNumElements();
380   ConstantInt *ConstElemCount = nullptr;
381 
382   // 1) Smoke tests that the original scalar loop TripCount (TC) belongs to
383   // this loop.  The scalar tripcount corresponds the number of elements
384   // processed by the loop, so we will refer to that from this point on.
385   if (!SE->isLoopInvariant(EC, L)) {
386     LLVM_DEBUG(dbgs() << "ARM TP: element count must be loop invariant.\n");
387     return false;
388   }
389 
390   if ((ConstElemCount = dyn_cast<ConstantInt>(ElemCount))) {
391     ConstantInt *TC = dyn_cast<ConstantInt>(TripCount);
392     if (!TC) {
393       LLVM_DEBUG(dbgs() << "ARM TP: Constant tripcount expected in "
394                            "set.loop.iterations\n");
395       return false;
396     }
397 
398     // Calculate 2 tripcount values and check that they are consistent with
399     // each other:
400     // i) The number of loop iterations extracted from the set.loop.iterations
401     //    intrinsic, multipled by the vector width:
402     uint64_t TC1 = TC->getZExtValue() * VectorWidth;
403 
404     // ii) TC1 has to be equal to TC + 1, with the + 1 to compensate for start
405     //     counting from 0.
406     uint64_t TC2 = ConstElemCount->getZExtValue() + 1;
407 
408     // If the tripcount values are inconsistent, we don't want to insert the
409     // VCTP and trigger tail-predication; it's better to keep intrinsic
410     // get.active.lane.mask and legalize this.
411     if (TC1 != TC2) {
412       LLVM_DEBUG(dbgs() << "ARM TP: inconsistent constant tripcount values: "
413                  << TC1 << " from set.loop.iterations, and "
414                  << TC2 << " from get.active.lane.mask\n");
415       return false;
416     }
417   } else if (!ForceTailPredication) {
418     // 2) We need to prove that the sub expression that we create in the
419     // tail-predicated loop body, which calculates the remaining elements to be
420     // processed, is non-negative, i.e. it doesn't overflow:
421     //
422     //   ((ElementCount + VectorWidth - 1) / VectorWidth) - TripCount >= 0
423     //
424     // This is true if:
425     //
426     //    TripCount == (ElementCount + VectorWidth - 1) / VectorWidth
427     //
428     // which what we will be using here.
429     //
430     auto *VW = SE->getSCEV(ConstantInt::get(TripCount->getType(), VectorWidth));
431     // ElementCount + (VW-1):
432     auto *ECPlusVWMinus1 = SE->getAddExpr(EC,
433         SE->getSCEV(ConstantInt::get(TripCount->getType(), VectorWidth - 1)));
434 
435     // Ceil = ElementCount + (VW-1) / VW
436     auto *Ceil = SE->getUDivExpr(ECPlusVWMinus1, VW);
437 
438     LLVM_DEBUG(
439       dbgs() << "ARM TP: Analysing overflow behaviour for:\n";
440       dbgs() << "ARM TP: - TripCount = "; TC->dump();
441       dbgs() << "ARM TP: - ElemCount = "; EC->dump();
442       dbgs() << "ARM TP: - VecWidth =  " << VectorWidth << "\n";
443       dbgs() << "ARM TP: - (ElemCount+VW-1) / VW = "; Ceil->dump();
444     );
445 
446     // As an example, almost all the tripcount expressions (produced by the
447     // vectoriser) look like this:
448     //
449     //   TC = ((-4 + (4 * ((3 + %N) /u 4))<nuw>) /u 4)
450     //
451     // and "ElementCount + (VW-1) / VW":
452     //
453     //   Ceil = ((3 + %N) /u 4)
454     //
455     // Check for equality of TC and Ceil by calculating SCEV expression
456     // TC - Ceil and test it for zero.
457     //
458     bool Zero = SE->getMinusSCEV(
459                       SE->getBackedgeTakenCount(L),
460                       SE->getUDivExpr(SE->getAddExpr(SE->getMulExpr(Ceil, VW),
461                                                      SE->getNegativeSCEV(VW)),
462                                       VW))
463                     ->isZero();
464 
465     if (!Zero) {
466       LLVM_DEBUG(dbgs() << "ARM TP: possible overflow in sub expression.\n");
467       return false;
468     }
469   }
470 
471   // 3) Find out if IV is an induction phi. Note that we can't use Loop
472   // helpers here to get the induction variable, because the hardware loop is
473   // no longer in loopsimplify form, and also the hwloop intrinsic uses a
474   // different counter. Using SCEV, we check that the induction is of the
475   // form i = i + 4, where the increment must be equal to the VectorWidth.
476   auto *IV = ActiveLaneMask->getOperand(0);
477   auto *IVExpr = SE->getSCEV(IV);
478   auto *AddExpr = dyn_cast<SCEVAddRecExpr>(IVExpr);
479 
480   if (!AddExpr) {
481     LLVM_DEBUG(dbgs() << "ARM TP: induction not an add expr: "; IVExpr->dump());
482     return false;
483   }
484   // Check that this AddRec is associated with this loop.
485   if (AddExpr->getLoop() != L) {
486     LLVM_DEBUG(dbgs() << "ARM TP: phi not part of this loop\n");
487     return false;
488   }
489   auto *Base = dyn_cast<SCEVConstant>(AddExpr->getOperand(0));
490   if (!Base || !Base->isZero()) {
491     LLVM_DEBUG(dbgs() << "ARM TP: induction base is not 0\n");
492     return false;
493   }
494   auto *Step = dyn_cast<SCEVConstant>(AddExpr->getOperand(1));
495   if (!Step) {
496     LLVM_DEBUG(dbgs() << "ARM TP: induction step is not a constant: ";
497                AddExpr->getOperand(1)->dump());
498     return false;
499   }
500   auto StepValue = Step->getValue()->getSExtValue();
501   if (VectorWidth == StepValue)
502     return true;
503 
504   LLVM_DEBUG(dbgs() << "ARM TP: Step value " << StepValue << " doesn't match "
505              "vector width " << VectorWidth << "\n");
506 
507   return false;
508 }
509 
510 void MVETailPredication::InsertVCTPIntrinsic(IntrinsicInst *ActiveLaneMask,
511     Value *TripCount, FixedVectorType *VecTy) {
512   IRBuilder<> Builder(L->getLoopPreheader()->getTerminator());
513   Module *M = L->getHeader()->getModule();
514   Type *Ty = IntegerType::get(M->getContext(), 32);
515   unsigned VectorWidth = VecTy->getNumElements();
516 
517   // Insert a phi to count the number of elements processed by the loop.
518   Builder.SetInsertPoint(L->getHeader()->getFirstNonPHI()  );
519   PHINode *Processed = Builder.CreatePHI(Ty, 2);
520   Processed->addIncoming(ActiveLaneMask->getOperand(1), L->getLoopPreheader());
521 
522   // Replace @llvm.get.active.mask() with the ARM specific VCTP intrinic, and
523   // thus represent the effect of tail predication.
524   Builder.SetInsertPoint(ActiveLaneMask);
525   ConstantInt *Factor = ConstantInt::get(cast<IntegerType>(Ty), VectorWidth);
526 
527   Intrinsic::ID VCTPID;
528   switch (VectorWidth) {
529   default:
530     llvm_unreachable("unexpected number of lanes");
531   case 4:  VCTPID = Intrinsic::arm_mve_vctp32; break;
532   case 8:  VCTPID = Intrinsic::arm_mve_vctp16; break;
533   case 16: VCTPID = Intrinsic::arm_mve_vctp8; break;
534 
535     // FIXME: vctp64 currently not supported because the predicate
536     // vector wants to be <2 x i1>, but v2i1 is not a legal MVE
537     // type, so problems happen at isel time.
538     // Intrinsic::arm_mve_vctp64 exists for ACLE intrinsics
539     // purposes, but takes a v4i1 instead of a v2i1.
540   }
541   Function *VCTP = Intrinsic::getDeclaration(M, VCTPID);
542   Value *VCTPCall = Builder.CreateCall(VCTP, Processed);
543   ActiveLaneMask->replaceAllUsesWith(VCTPCall);
544 
545   // Add the incoming value to the new phi.
546   // TODO: This add likely already exists in the loop.
547   Value *Remaining = Builder.CreateSub(Processed, Factor);
548   Processed->addIncoming(Remaining, L->getLoopLatch());
549   LLVM_DEBUG(dbgs() << "ARM TP: Insert processed elements phi: "
550              << *Processed << "\n"
551              << "ARM TP: Inserted VCTP: " << *VCTPCall << "\n");
552 }
553 
554 bool MVETailPredication::TryConvert(Value *TripCount) {
555   if (!IsPredicatedVectorLoop()) {
556     LLVM_DEBUG(dbgs() << "ARM TP: no masked instructions in loop.\n");
557     return false;
558   }
559 
560   LLVM_DEBUG(dbgs() << "ARM TP: Found predicated vector loop.\n");
561   SetVector<Instruction*> Predicates;
562 
563   auto getPredicateOp = [](IntrinsicInst *I) {
564     unsigned IntrinsicID = I->getIntrinsicID();
565     if (IntrinsicID == Intrinsic::arm_mve_vldr_gather_offset_predicated ||
566         IntrinsicID == Intrinsic::arm_mve_vstr_scatter_offset_predicated)
567       return 5;
568     return (IntrinsicID == Intrinsic::masked_load || isGather(I)) ? 2 : 3;
569   };
570 
571   // Walk through the masked intrinsics and try to find whether the predicate
572   // operand is generated by intrinsic @llvm.get.active.lane.mask().
573   for (auto *I : MaskedInsts) {
574     Value *PredOp = I->getArgOperand(getPredicateOp(I));
575     auto *Predicate = dyn_cast<Instruction>(PredOp);
576     if (!Predicate || Predicates.count(Predicate))
577       continue;
578 
579     auto *ActiveLaneMask = dyn_cast<IntrinsicInst>(Predicate);
580     if (!ActiveLaneMask ||
581         ActiveLaneMask->getIntrinsicID() != Intrinsic::get_active_lane_mask)
582       continue;
583 
584     Predicates.insert(Predicate);
585     LLVM_DEBUG(dbgs() << "ARM TP: Found active lane mask: "
586                       << *ActiveLaneMask << "\n");
587 
588     auto *VecTy = getVectorType(I);
589     if (!IsSafeActiveMask(ActiveLaneMask, TripCount, VecTy)) {
590       LLVM_DEBUG(dbgs() << "ARM TP: Not safe to insert VCTP.\n");
591       return false;
592     }
593     LLVM_DEBUG(dbgs() << "ARM TP: Safe to insert VCTP.\n");
594     InsertVCTPIntrinsic(ActiveLaneMask, TripCount, VecTy);
595   }
596 
597   Cleanup(Predicates, L);
598   return true;
599 }
600 
601 Pass *llvm::createMVETailPredicationPass() {
602   return new MVETailPredication();
603 }
604 
605 char MVETailPredication::ID = 0;
606 
607 INITIALIZE_PASS_BEGIN(MVETailPredication, DEBUG_TYPE, DESC, false, false)
608 INITIALIZE_PASS_END(MVETailPredication, DEBUG_TYPE, DESC, false, false)
609