xref: /llvm-project/llvm/lib/Target/ARM/MVETailPredication.cpp (revision c18b753686dc98bf978760d19dc9cba1dba58a13)
1 //===- MVETailPredication.cpp - MVE Tail Predication ------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 /// \file
10 /// Armv8.1m introduced MVE, M-Profile Vector Extension, and low-overhead
11 /// branches to help accelerate DSP applications. These two extensions,
12 /// combined with a new form of predication called tail-predication, can be used
13 /// to provide implicit vector predication within a low-overhead loop.
14 /// This is implicit because the predicate of active/inactive lanes is
15 /// calculated by hardware, and thus does not need to be explicitly passed
16 /// to vector instructions. The instructions responsible for this are the
17 /// DLSTP and WLSTP instructions, which setup a tail-predicated loop and the
18 /// the total number of data elements processed by the loop. The loop-end
19 /// LETP instruction is responsible for decrementing and setting the remaining
20 /// elements to be processed and generating the mask of active lanes.
21 ///
22 /// The HardwareLoops pass inserts intrinsics identifying loops that the
23 /// backend will attempt to convert into a low-overhead loop. The vectorizer is
24 /// responsible for generating a vectorized loop in which the lanes are
25 /// predicated upon the iteration counter. This pass looks at these predicated
26 /// vector loops, that are targets for low-overhead loops, and prepares it for
27 /// code generation. Once the vectorizer has produced a masked loop, there's a
28 /// couple of final forms:
29 /// - A tail-predicated loop, with implicit predication.
30 /// - A loop containing multiple VCPT instructions, predicating multiple VPT
31 ///   blocks of instructions operating on different vector types.
32 ///
33 /// This pass:
34 /// 1) Checks if the predicates of the masked load/store instructions are
35 ///    generated by intrinsic @llvm.get.active.lanes(). This intrinsic consumes
36 ///    the Backedge Taken Count (BTC) of the scalar loop as its second argument,
37 ///    which we extract to set up the number of elements processed by the loop.
38 /// 2) Intrinsic @llvm.get.active.lanes() is then replaced by the MVE target
39 ///    specific VCTP intrinsic to represent the effect of tail predication.
40 ///    This will be picked up by the ARM Low-overhead loop pass, which performs
41 ///    the final transformation to a DLSTP or WLSTP tail-predicated loop.
42 
43 #include "ARM.h"
44 #include "ARMSubtarget.h"
45 #include "llvm/Analysis/LoopInfo.h"
46 #include "llvm/Analysis/LoopPass.h"
47 #include "llvm/Analysis/ScalarEvolution.h"
48 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
49 #include "llvm/Analysis/TargetLibraryInfo.h"
50 #include "llvm/Analysis/TargetTransformInfo.h"
51 #include "llvm/CodeGen/TargetPassConfig.h"
52 #include "llvm/IR/IRBuilder.h"
53 #include "llvm/IR/Instructions.h"
54 #include "llvm/IR/IntrinsicsARM.h"
55 #include "llvm/IR/PatternMatch.h"
56 #include "llvm/InitializePasses.h"
57 #include "llvm/Support/Debug.h"
58 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
59 #include "llvm/Transforms/Utils/LoopUtils.h"
60 #include "llvm/Transforms/Utils/ScalarEvolutionExpander.h"
61 
62 using namespace llvm;
63 
64 #define DEBUG_TYPE "mve-tail-predication"
65 #define DESC "Transform predicated vector loops to use MVE tail predication"
66 
67 static cl::opt<bool>
68 ForceTailPredication("force-mve-tail-predication", cl::Hidden, cl::init(false),
69                      cl::desc("Force MVE tail-predication even if it might be "
70                               "unsafe (e.g. possible overflow in loop "
71                               "counters)"));
72 
73 cl::opt<bool>
74 DisableTailPredication("disable-mve-tail-predication", cl::Hidden,
75                        cl::init(true),
76                        cl::desc("Disable MVE Tail Predication"));
77 namespace {
78 
79 class MVETailPredication : public LoopPass {
80   SmallVector<IntrinsicInst*, 4> MaskedInsts;
81   Loop *L = nullptr;
82   LoopInfo *LI = nullptr;
83   const DataLayout *DL;
84   DominatorTree *DT = nullptr;
85   ScalarEvolution *SE = nullptr;
86   TargetTransformInfo *TTI = nullptr;
87   TargetLibraryInfo *TLI = nullptr;
88   bool ClonedVCTPInExitBlock = false;
89   IntrinsicInst *ActiveLaneMask = nullptr;
90   FixedVectorType *VecTy = nullptr;
91 
92 public:
93   static char ID;
94 
95   MVETailPredication() : LoopPass(ID) { }
96 
97   void getAnalysisUsage(AnalysisUsage &AU) const override {
98     AU.addRequired<ScalarEvolutionWrapperPass>();
99     AU.addRequired<LoopInfoWrapperPass>();
100     AU.addRequired<TargetPassConfig>();
101     AU.addRequired<TargetTransformInfoWrapperPass>();
102     AU.addRequired<DominatorTreeWrapperPass>();
103     AU.addRequired<TargetLibraryInfoWrapperPass>();
104     AU.addPreserved<LoopInfoWrapperPass>();
105     AU.setPreservesCFG();
106   }
107 
108   bool runOnLoop(Loop *L, LPPassManager&) override;
109 
110 private:
111   /// Perform the relevant checks on the loop and convert if possible.
112   bool TryConvert(Value *TripCount);
113 
114   /// Return whether this is a vectorized loop, that contains masked
115   /// load/stores.
116   bool IsPredicatedVectorLoop();
117 
118   /// Perform checks on the arguments of @llvm.get.active.lane.mask
119   /// intrinsic: check if the first is a loop induction variable, and for the
120   /// the second check that no overflow can occur in the expression that use
121   /// this backedge-taken count.
122   bool IsSafeActiveMask(Value *TripCount, FixedVectorType *VecTy);
123 
124   /// Insert the intrinsic to represent the effect of tail predication.
125   void InsertVCTPIntrinsic(IntrinsicInst *ActiveLaneMask, Value *TripCount,
126                            FixedVectorType *VecTy,
127                            DenseMap<Instruction *, Instruction *> &NewPredicates);
128 
129   /// Rematerialize the iteration count in exit blocks, which enables
130   /// ARMLowOverheadLoops to better optimise away loop update statements inside
131   /// hardware-loops.
132   void RematerializeIterCount();
133 
134   /// If it is not safe to lower @llvm.get.active.lane.mask to a VCTP, it needs
135   /// to be lowered to an icmp.
136   void RevertActiveLaneMask();
137 };
138 
139 } // end namespace
140 
141 static bool IsDecrement(Instruction &I) {
142   auto *Call = dyn_cast<IntrinsicInst>(&I);
143   if (!Call)
144     return false;
145 
146   Intrinsic::ID ID = Call->getIntrinsicID();
147   return ID == Intrinsic::loop_decrement_reg;
148 }
149 
150 static bool IsMasked(Instruction *I) {
151   auto *Call = dyn_cast<IntrinsicInst>(I);
152   if (!Call)
153     return false;
154 
155   Intrinsic::ID ID = Call->getIntrinsicID();
156   // TODO: Support gather/scatter expand/compress operations.
157   return ID == Intrinsic::masked_store || ID == Intrinsic::masked_load;
158 }
159 
160 void MVETailPredication::RematerializeIterCount() {
161   SmallVector<WeakTrackingVH, 16> DeadInsts;
162   SCEVExpander Rewriter(*SE, *DL, "mvetp");
163   ReplaceExitVal ReplaceExitValue = AlwaysRepl;
164 
165   formLCSSARecursively(*L, *DT, LI, SE);
166   rewriteLoopExitValues(L, LI, TLI, SE, TTI, Rewriter, DT, ReplaceExitValue,
167                         DeadInsts);
168 }
169 
170 void MVETailPredication::RevertActiveLaneMask() {
171   if (!ActiveLaneMask)
172     return;
173 
174   int VectorWidth = VecTy->getElementCount().Min;
175   IRBuilder<> Builder(ActiveLaneMask->getParent()->getFirstNonPHI());
176 
177   // 1. Create the vector induction step. This %induction will be the LHS of
178   // the icmp:
179   //
180   // %splatinsert = insertelement <4 x i32> undef, i32 %index, i32 0
181   // %splat = shufflevector <4 x i32> %splatinsert, <4 x i32> undef, <4 x i32> 0
182   // %induction = add <4 x i32> %splat, <i32 0, i32 1, i32 2, i32 3>
183   //
184   Value *Index = ActiveLaneMask->getOperand(0);
185   Value *SplatIndex =
186       Builder.CreateVectorSplat(VectorWidth, Index, "lane.mask");
187 
188   SmallVector<Constant *, 8> Indices;
189   for (int i = 0; i < VectorWidth; ++i)
190     Indices.push_back(ConstantInt::get(Index->getType(), i));
191 
192   Constant *CV = ConstantVector::get(Indices);
193   Value *Induction = Builder.CreateAdd(SplatIndex, CV, "lane.mask.induction");
194 
195   LLVM_DEBUG(dbgs() << "ARM TP: New index: " << *SplatIndex << "\n";
196              dbgs() << "ARM TP: New Induction: " << *Induction << "\n");
197 
198   // 2. In the Preheader, first look if the splat BTC already exists. Find this
199   //    %splat, which will be the RHS of the icmp:
200   //
201   //    %TC.minus.1 = add i32 %N, -1
202   //    %splatinsert = insertelement <4 x i32> undef, i32 %TC.minus.1, i32 0
203   //    %splat = shufflevector <4 x i32> %splatinsert, <4 x i32> undef, <16 x i32> 0
204   //
205   auto *Preheader = L->getLoopPreheader();
206   auto *BTC = ActiveLaneMask->getOperand(1);
207   Value *SplatBTC = nullptr;
208 
209   if (auto *C = dyn_cast<ConstantInt>(BTC)) {
210     Builder.SetInsertPoint(Preheader->getTerminator());
211     SplatBTC = Builder.CreateVectorSplat(VectorWidth, C);
212     LLVM_DEBUG(dbgs() << "ARM TCP: New splat BTC: " << *SplatBTC << "\n");
213   } else {
214     Instruction *InsertElem;
215     for (auto &V : *Preheader) {
216       InsertElem = dyn_cast<InsertElementInst>(&V);
217       if (!InsertElem)
218         continue;
219       ConstantInt *CI = dyn_cast<ConstantInt>(InsertElem->getOperand(2));
220       if (!CI)
221         continue;
222       if (InsertElem->getOperand(1) != BTC || CI->getSExtValue() != 0)
223         continue;
224       if ((SplatBTC = dyn_cast<ShuffleVectorInst>(*InsertElem->users().begin())))
225          break;
226     }
227   }
228   // Or create the splat BTC if it doesn't exist.
229   if (!SplatBTC) {
230     Builder.SetInsertPoint(Preheader->getTerminator());
231     Value *Undef =
232         UndefValue::get(FixedVectorType::get(BTC->getType(), VectorWidth));
233     Value *Insert = Builder.CreateInsertElement(Undef,
234         BTC, Builder.getInt32(0), "insert.btc");
235     Value *Zero = ConstantInt::get(Insert->getType(), 0);
236     SplatBTC = Builder.CreateShuffleVector (Insert, Undef, Zero, "splat.btc");
237     LLVM_DEBUG(dbgs() << "ARM TCP: New splat BTC: " << *SplatBTC << "\n");
238   }
239 
240   Builder.SetInsertPoint(ActiveLaneMask);
241   Value *ICmp = Builder.CreateICmp(ICmpInst::ICMP_ULE, Induction, SplatBTC);
242   LLVM_DEBUG(dbgs() << "ARM TP: New compare: " << *ICmp << "\n");
243   ActiveLaneMask->replaceAllUsesWith(ICmp);
244   ActiveLaneMask->eraseFromParent();
245 }
246 
247 bool MVETailPredication::runOnLoop(Loop *L, LPPassManager&) {
248   if (skipLoop(L) || DisableTailPredication)
249     return false;
250 
251   MaskedInsts.clear();
252   Function &F = *L->getHeader()->getParent();
253   auto &TPC = getAnalysis<TargetPassConfig>();
254   auto &TM = TPC.getTM<TargetMachine>();
255   auto *ST = &TM.getSubtarget<ARMSubtarget>(F);
256   DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
257   LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
258   TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
259   SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
260   auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>();
261   TLI = TLIP ? &TLIP->getTLI(*L->getHeader()->getParent()) : nullptr;
262   DL = &L->getHeader()->getModule()->getDataLayout();
263   this->L = L;
264   ActiveLaneMask = nullptr;
265 
266   // The MVE and LOB extensions are combined to enable tail-predication, but
267   // there's nothing preventing us from generating VCTP instructions for v8.1m.
268   if (!ST->hasMVEIntegerOps() || !ST->hasV8_1MMainlineOps()) {
269     LLVM_DEBUG(dbgs() << "ARM TP: Not a v8.1m.main+mve target.\n");
270     return false;
271   }
272 
273   BasicBlock *Preheader = L->getLoopPreheader();
274   if (!Preheader)
275     return false;
276 
277   auto FindLoopIterations = [](BasicBlock *BB) -> IntrinsicInst* {
278     for (auto &I : *BB) {
279       auto *Call = dyn_cast<IntrinsicInst>(&I);
280       if (!Call)
281         continue;
282 
283       Intrinsic::ID ID = Call->getIntrinsicID();
284       if (ID == Intrinsic::set_loop_iterations ||
285           ID == Intrinsic::test_set_loop_iterations)
286         return cast<IntrinsicInst>(&I);
287     }
288     return nullptr;
289   };
290 
291   // Look for the hardware loop intrinsic that sets the iteration count.
292   IntrinsicInst *Setup = FindLoopIterations(Preheader);
293 
294   // The test.set iteration could live in the pre-preheader.
295   if (!Setup) {
296     if (!Preheader->getSinglePredecessor())
297       return false;
298     Setup = FindLoopIterations(Preheader->getSinglePredecessor());
299     if (!Setup)
300       return false;
301   }
302 
303   // Search for the hardware loop intrinic that decrements the loop counter.
304   IntrinsicInst *Decrement = nullptr;
305   for (auto *BB : L->getBlocks()) {
306     for (auto &I : *BB) {
307       if (IsDecrement(I)) {
308         Decrement = cast<IntrinsicInst>(&I);
309         break;
310       }
311     }
312   }
313 
314   if (!Decrement)
315     return false;
316 
317   ClonedVCTPInExitBlock = false;
318   LLVM_DEBUG(dbgs() << "ARM TP: Running on Loop: " << *L << *Setup << "\n"
319              << *Decrement << "\n");
320 
321   if (TryConvert(Setup->getArgOperand(0))) {
322     if (ClonedVCTPInExitBlock)
323       RematerializeIterCount();
324     return true;
325   } else
326     RevertActiveLaneMask();
327 
328   LLVM_DEBUG(dbgs() << "ARM TP: Can't tail-predicate this loop.\n");
329   return false;
330 }
331 
332 static FixedVectorType *getVectorType(IntrinsicInst *I) {
333   unsigned TypeOp = I->getIntrinsicID() == Intrinsic::masked_load ? 0 : 1;
334   auto *PtrTy = cast<PointerType>(I->getOperand(TypeOp)->getType());
335   auto *VecTy = cast<FixedVectorType>(PtrTy->getElementType());
336   assert(VecTy && "No scalable vectors expected here");
337   return VecTy;
338 }
339 
340 bool MVETailPredication::IsPredicatedVectorLoop() {
341   // Check that the loop contains at least one masked load/store intrinsic.
342   // We only support 'normal' vector instructions - other than masked
343   // load/stores.
344   for (auto *BB : L->getBlocks()) {
345     for (auto &I : *BB) {
346       if (IsMasked(&I)) {
347         FixedVectorType *VecTy = getVectorType(cast<IntrinsicInst>(&I));
348         unsigned Lanes = VecTy->getNumElements();
349         unsigned ElementWidth = VecTy->getScalarSizeInBits();
350         // MVE vectors are 128-bit, but don't support 128 x i1.
351         // TODO: Can we support vectors larger than 128-bits?
352         unsigned MaxWidth = TTI->getRegisterBitWidth(true);
353         if (Lanes * ElementWidth > MaxWidth || Lanes == MaxWidth)
354           return false;
355         MaskedInsts.push_back(cast<IntrinsicInst>(&I));
356       } else if (auto *Int = dyn_cast<IntrinsicInst>(&I)) {
357         if (Int->getIntrinsicID() == Intrinsic::fma)
358           continue;
359         for (auto &U : Int->args()) {
360           if (isa<VectorType>(U->getType()))
361             return false;
362         }
363       }
364     }
365   }
366 
367   return !MaskedInsts.empty();
368 }
369 
370 // Look through the exit block to see whether there's a duplicate predicate
371 // instruction. This can happen when we need to perform a select on values
372 // from the last and previous iteration. Instead of doing a straight
373 // replacement of that predicate with the vctp, clone the vctp and place it
374 // in the block. This means that the VPR doesn't have to be live into the
375 // exit block which should make it easier to convert this loop into a proper
376 // tail predicated loop.
377 static bool Cleanup(DenseMap<Instruction*, Instruction*> &NewPredicates,
378                     SetVector<Instruction*> &MaybeDead, Loop *L) {
379   BasicBlock *Exit = L->getUniqueExitBlock();
380   if (!Exit) {
381     LLVM_DEBUG(dbgs() << "ARM TP: can't find loop exit block\n");
382     return false;
383   }
384 
385   bool ClonedVCTPInExitBlock = false;
386 
387   for (auto &Pair : NewPredicates) {
388     Instruction *OldPred = Pair.first;
389     Instruction *NewPred = Pair.second;
390 
391     for (auto &I : *Exit) {
392       if (I.isSameOperationAs(OldPred)) {
393         Instruction *PredClone = NewPred->clone();
394         PredClone->insertBefore(&I);
395         I.replaceAllUsesWith(PredClone);
396         MaybeDead.insert(&I);
397         ClonedVCTPInExitBlock = true;
398         LLVM_DEBUG(dbgs() << "ARM TP: replacing: "; I.dump();
399                    dbgs() << "ARM TP: with:      "; PredClone->dump());
400         break;
401       }
402     }
403   }
404 
405   // Drop references and add operands to check for dead.
406   SmallPtrSet<Instruction*, 4> Dead;
407   while (!MaybeDead.empty()) {
408     auto *I = MaybeDead.front();
409     MaybeDead.remove(I);
410     if (I->hasNUsesOrMore(1))
411       continue;
412 
413     for (auto &U : I->operands())
414       if (auto *OpI = dyn_cast<Instruction>(U))
415         MaybeDead.insert(OpI);
416 
417     Dead.insert(I);
418   }
419 
420   for (auto *I : Dead) {
421     LLVM_DEBUG(dbgs() << "ARM TP: removing dead insn: "; I->dump());
422     I->eraseFromParent();
423   }
424 
425   for (auto I : L->blocks())
426     DeleteDeadPHIs(I);
427 
428   return ClonedVCTPInExitBlock;
429 }
430 
431 // The active lane intrinsic has this form:
432 //
433 //    @llvm.get.active.lane.mask(IV, BTC)
434 //
435 // Here we perform checks that this intrinsic behaves as expected,
436 // which means:
437 //
438 // 1) The element count, which is calculated with BTC + 1, cannot overflow.
439 // 2) The element count needs to be sufficiently large that the decrement of
440 //    element counter doesn't overflow, which means that we need to prove:
441 //        ceil(ElementCount / VectorWidth) >= TripCount
442 //    by rounding up ElementCount up:
443 //        ((ElementCount + (VectorWidth - 1)) / VectorWidth
444 //    and evaluate if expression isKnownNonNegative:
445 //        (((ElementCount + (VectorWidth - 1)) / VectorWidth) - TripCount
446 // 3) The IV must be an induction phi with an increment equal to the
447 //    vector width.
448 bool MVETailPredication::IsSafeActiveMask(Value *TripCount,
449     FixedVectorType *VecTy) {
450   // 1) Test whether entry to the loop is protected by a conditional
451   // BTC + 1 < 0. In other words, if the scalar trip count overflows,
452   // becomes negative, we shouldn't enter the loop and creating
453   // tripcount expression BTC + 1 is not safe. So, check that BTC
454   // isn't max. This is evaluated in unsigned, because the semantics
455   // of @get.active.lane.mask is a ULE comparison.
456   int VectorWidth = VecTy->getNumElements();
457   auto *BackedgeTakenCount = ActiveLaneMask->getOperand(1);
458   auto *BTC = SE->getSCEV(BackedgeTakenCount);
459 
460   if (!llvm::cannotBeMaxInLoop(BTC, L, *SE, false /*Signed*/) &&
461       !ForceTailPredication) {
462     LLVM_DEBUG(dbgs() << "ARM TP: Overflow possible, BTC can be max: ";
463                BTC->dump());
464     return false;
465   }
466 
467   // 2) Prove that the sub expression is non-negative, i.e. it doesn't overflow:
468   //
469   //      (((ElementCount + (VectorWidth - 1)) / VectorWidth) - TripCount
470   //
471   // 2.1) First prove overflow can't happen in:
472   //
473   //      ElementCount + (VectorWidth - 1)
474   //
475   // Because of a lack of context, it is difficult to get a useful bounds on
476   // this expression. But since ElementCount uses the same variables as the
477   // TripCount (TC), for which we can find meaningful value ranges, we use that
478   // instead and assert that:
479   //
480   //     upperbound(TC) <= UINT_MAX - VectorWidth
481   //
482   auto *TC = SE->getSCEV(TripCount);
483   unsigned SizeInBits = TripCount->getType()->getScalarSizeInBits();
484   auto Diff =  APInt(SizeInBits, ~0) - APInt(SizeInBits, VectorWidth);
485   uint64_t MaxMinusVW = Diff.getZExtValue();
486   uint64_t UpperboundTC = SE->getSignedRange(TC).getUpper().getZExtValue();
487 
488   if (UpperboundTC > MaxMinusVW && !ForceTailPredication) {
489     LLVM_DEBUG(dbgs() << "ARM TP: Overflow possible in tripcount rounding:\n";
490                dbgs() << "upperbound(TC) <= UINT_MAX - VectorWidth\n";
491                dbgs() << UpperboundTC << " <= " << MaxMinusVW << "== false\n";);
492     return false;
493   }
494 
495   // 2.2) Make sure overflow doesn't happen in final expression:
496   //  (((ElementCount + (VectorWidth - 1)) / VectorWidth) - TripCount,
497   // To do this, compare the full ranges of these subexpressions:
498   //
499   //     Range(Ceil) <= Range(TC)
500   //
501   // where Ceil = ElementCount + (VW-1) / VW. If Ceil and TC are runtime
502   // values (and not constants), we have to compensate for the lowerbound value
503   // range to be off by 1. The reason is that BTC lives in the preheader in
504   // this form:
505   //
506   //     %trip.count.minus = add nsw nuw i32 %N, -1
507   //
508   // For the loop to be executed, %N has to be >= 1 and as a result the value
509   // range of %trip.count.minus has a lower bound of 0. Value %TC has this form:
510   //
511   //     %5 = add nuw nsw i32 %4, 1
512   //     call void @llvm.set.loop.iterations.i32(i32 %5)
513   //
514   // where %5 is some expression using %N, which needs to have a lower bound of
515   // 1. Thus, if the ranges of Ceil and TC are not a single constant but a set,
516   // we first add 0 to TC such that we can do the <= comparison on both sets.
517   //
518   auto *One = SE->getOne(TripCount->getType());
519   // ElementCount = BTC + 1
520   auto *ElementCount = SE->getAddExpr(BTC, One);
521   // Tmp = ElementCount + (VW-1)
522   auto *ECPlusVWMinus1 = SE->getAddExpr(ElementCount,
523       SE->getSCEV(ConstantInt::get(TripCount->getType(), VectorWidth - 1)));
524   // Ceil = ElementCount + (VW-1) / VW
525   auto *Ceil = SE->getUDivExpr(ECPlusVWMinus1,
526       SE->getSCEV(ConstantInt::get(TripCount->getType(), VectorWidth)));
527 
528   ConstantRange RangeCeil = SE->getSignedRange(Ceil) ;
529   ConstantRange RangeTC = SE->getSignedRange(TC) ;
530   if (!RangeTC.isSingleElement()) {
531     auto ZeroRange =
532         ConstantRange(APInt(TripCount->getType()->getScalarSizeInBits(), 0));
533     RangeTC = RangeTC.unionWith(ZeroRange);
534   }
535   if (!RangeTC.contains(RangeCeil) && !ForceTailPredication) {
536     LLVM_DEBUG(dbgs() << "ARM TP: Overflow possible in sub\n");
537     return false;
538   }
539 
540   // 3) Find out if IV is an induction phi. Note that We can't use Loop
541   // helpers here to get the induction variable, because the hardware loop is
542   // no longer in loopsimplify form, and also the hwloop intrinsic use a
543   // different counter.  Using SCEV, we check that the induction is of the
544   // form i = i + 4, where the increment must be equal to the VectorWidth.
545   auto *IV = ActiveLaneMask->getOperand(0);
546   auto *IVExpr = SE->getSCEV(IV);
547   auto *AddExpr = dyn_cast<SCEVAddRecExpr>(IVExpr);
548   if (!AddExpr) {
549     LLVM_DEBUG(dbgs() << "ARM TP: induction not an add expr: "; IVExpr->dump());
550     return false;
551   }
552   // Check that this AddRec is associated with this loop.
553   if (AddExpr->getLoop() != L) {
554     LLVM_DEBUG(dbgs() << "ARM TP: phi not part of this loop\n");
555     return false;
556   }
557   auto *Step = dyn_cast<SCEVConstant>(AddExpr->getOperand(1));
558   if (!Step) {
559     LLVM_DEBUG(dbgs() << "ARM TP: induction step is not a constant: ";
560                AddExpr->getOperand(1)->dump());
561     return false;
562   }
563   auto StepValue = Step->getValue()->getSExtValue();
564   if (VectorWidth == StepValue)
565     return true;
566 
567   LLVM_DEBUG(dbgs() << "ARM TP: step value " << StepValue << " doesn't match "
568              "vector width : " << VectorWidth << "\n");
569 
570   return false;
571 }
572 
573 // Materialize NumElements in the preheader block.
574 static Value *getNumElements(BasicBlock *Preheader, Value *BTC) {
575   // First, check the preheader if it not already exist:
576   //
577   // preheader:
578   //    %BTC = add i32 %N, -1
579   //    ..
580   // vector.body:
581   //
582   // if %BTC already exists. We don't need to emit %NumElems = %BTC + 1,
583   // but instead can just return %N.
584   for (auto &I : *Preheader) {
585     if (I.getOpcode() != Instruction::Add || &I != BTC)
586       continue;
587     ConstantInt *MinusOne = nullptr;
588     if (!(MinusOne = dyn_cast<ConstantInt>(I.getOperand(1))))
589       continue;
590     if (MinusOne->getSExtValue() == -1) {
591       LLVM_DEBUG(dbgs() << "ARM TP: Found num elems: " << I << "\n");
592       return I.getOperand(0);
593     }
594   }
595 
596   // But we do need to materialise BTC if it is not already there,
597   // e.g. if it is a constant.
598   IRBuilder<> Builder(Preheader->getTerminator());
599   Value *NumElements = Builder.CreateAdd(BTC,
600         ConstantInt::get(BTC->getType(), 1), "num.elements");
601   LLVM_DEBUG(dbgs() << "ARM TP: Created num elems: " << *NumElements << "\n");
602   return NumElements;
603 }
604 
605 void MVETailPredication::InsertVCTPIntrinsic(IntrinsicInst *ActiveLaneMask,
606     Value *TripCount, FixedVectorType *VecTy,
607     DenseMap<Instruction*, Instruction*> &NewPredicates) {
608   IRBuilder<> Builder(L->getLoopPreheader()->getTerminator());
609   Module *M = L->getHeader()->getModule();
610   Type *Ty = IntegerType::get(M->getContext(), 32);
611 
612   // The backedge-taken count in @llvm.get.active.lane.mask, its 2nd operand,
613   // is one less than the trip count. So we need to find or create
614   // %num.elements = %BTC + 1 in the preheader.
615   Value *BTC = ActiveLaneMask->getOperand(1);
616   Builder.SetInsertPoint(L->getLoopPreheader()->getTerminator());
617   Value *NumElements = getNumElements(L->getLoopPreheader(), BTC);
618 
619   // Insert a phi to count the number of elements processed by the loop.
620   Builder.SetInsertPoint(L->getHeader()->getFirstNonPHI()  );
621   PHINode *Processed = Builder.CreatePHI(Ty, 2);
622   Processed->addIncoming(NumElements, L->getLoopPreheader());
623 
624   // Replace @llvm.get.active.mask() with the ARM specific VCTP intrinic, and thus
625   // represent the effect of tail predication.
626   Builder.SetInsertPoint(ActiveLaneMask);
627   ConstantInt *Factor =
628     ConstantInt::get(cast<IntegerType>(Ty), VecTy->getNumElements());
629 
630   Intrinsic::ID VCTPID;
631   switch (VecTy->getNumElements()) {
632   default:
633     llvm_unreachable("unexpected number of lanes");
634   case 4:  VCTPID = Intrinsic::arm_mve_vctp32; break;
635   case 8:  VCTPID = Intrinsic::arm_mve_vctp16; break;
636   case 16: VCTPID = Intrinsic::arm_mve_vctp8; break;
637 
638     // FIXME: vctp64 currently not supported because the predicate
639     // vector wants to be <2 x i1>, but v2i1 is not a legal MVE
640     // type, so problems happen at isel time.
641     // Intrinsic::arm_mve_vctp64 exists for ACLE intrinsics
642     // purposes, but takes a v4i1 instead of a v2i1.
643   }
644   Function *VCTP = Intrinsic::getDeclaration(M, VCTPID);
645   Value *VCTPCall = Builder.CreateCall(VCTP, Processed);
646   ActiveLaneMask->replaceAllUsesWith(VCTPCall);
647   NewPredicates[ActiveLaneMask] = cast<Instruction>(VCTPCall);
648 
649   // Add the incoming value to the new phi.
650   // TODO: This add likely already exists in the loop.
651   Value *Remaining = Builder.CreateSub(Processed, Factor);
652   Processed->addIncoming(Remaining, L->getLoopLatch());
653   LLVM_DEBUG(dbgs() << "ARM TP: Insert processed elements phi: "
654              << *Processed << "\n"
655              << "ARM TP: Inserted VCTP: " << *VCTPCall << "\n");
656 }
657 
658 bool MVETailPredication::TryConvert(Value *TripCount) {
659   if (!IsPredicatedVectorLoop()) {
660     LLVM_DEBUG(dbgs() << "ARM TP: no masked instructions in loop.\n");
661     return false;
662   }
663 
664   LLVM_DEBUG(dbgs() << "ARM TP: Found predicated vector loop.\n");
665 
666   SetVector<Instruction*> Predicates;
667   DenseMap<Instruction*, Instruction*> NewPredicates;
668 
669   // Walk through the masked intrinsics and try to find whether the predicate
670   // operand is generated by intrinsic @llvm.get.active.lane.mask().
671   for (auto *I : MaskedInsts) {
672     unsigned PredOp = I->getIntrinsicID() == Intrinsic::masked_load ? 2 : 3;
673     auto *Predicate = dyn_cast<Instruction>(I->getArgOperand(PredOp));
674     if (!Predicate || Predicates.count(Predicate))
675       continue;
676 
677     ActiveLaneMask = dyn_cast<IntrinsicInst>(Predicate);
678     if (!ActiveLaneMask ||
679         ActiveLaneMask->getIntrinsicID() != Intrinsic::get_active_lane_mask)
680       continue;
681 
682     Predicates.insert(Predicate);
683     LLVM_DEBUG(dbgs() << "ARM TP: Found active lane mask: "
684                       << *ActiveLaneMask << "\n");
685 
686     VecTy = getVectorType(I);
687     if (!IsSafeActiveMask(TripCount, VecTy)) {
688       LLVM_DEBUG(dbgs() << "ARM TP: Not safe to insert VCTP.\n");
689       return false;
690     }
691     LLVM_DEBUG(dbgs() << "ARM TP: Safe to insert VCTP.\n");
692     InsertVCTPIntrinsic(ActiveLaneMask, TripCount, VecTy, NewPredicates);
693   }
694 
695   // Now clean up.
696   ClonedVCTPInExitBlock = Cleanup(NewPredicates, Predicates, L);
697   return true;
698 }
699 
700 Pass *llvm::createMVETailPredicationPass() {
701   return new MVETailPredication();
702 }
703 
704 char MVETailPredication::ID = 0;
705 
706 INITIALIZE_PASS_BEGIN(MVETailPredication, DEBUG_TYPE, DESC, false, false)
707 INITIALIZE_PASS_END(MVETailPredication, DEBUG_TYPE, DESC, false, false)
708