xref: /llvm-project/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp (revision effc3b079927a6dd3084b4ff712ec07f926366f0)
1 //===- ARMTargetTransformInfo.cpp - ARM specific TTI ----------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "ARMTargetTransformInfo.h"
10 #include "ARMSubtarget.h"
11 #include "MCTargetDesc/ARMAddressingModes.h"
12 #include "llvm/ADT/APInt.h"
13 #include "llvm/ADT/SmallVector.h"
14 #include "llvm/Analysis/LoopInfo.h"
15 #include "llvm/CodeGen/CostTable.h"
16 #include "llvm/CodeGen/ISDOpcodes.h"
17 #include "llvm/CodeGen/ValueTypes.h"
18 #include "llvm/IR/BasicBlock.h"
19 #include "llvm/IR/DataLayout.h"
20 #include "llvm/IR/DerivedTypes.h"
21 #include "llvm/IR/Instruction.h"
22 #include "llvm/IR/Instructions.h"
23 #include "llvm/IR/Intrinsics.h"
24 #include "llvm/IR/IntrinsicInst.h"
25 #include "llvm/IR/IntrinsicsARM.h"
26 #include "llvm/IR/PatternMatch.h"
27 #include "llvm/IR/Type.h"
28 #include "llvm/MC/SubtargetFeature.h"
29 #include "llvm/Support/Casting.h"
30 #include "llvm/Support/KnownBits.h"
31 #include "llvm/Support/MachineValueType.h"
32 #include "llvm/Target/TargetMachine.h"
33 #include "llvm/Transforms/InstCombine/InstCombiner.h"
34 #include "llvm/Transforms/Utils/Local.h"
35 #include "llvm/Transforms/Utils/LoopUtils.h"
36 #include <algorithm>
37 #include <cassert>
38 #include <cstdint>
39 #include <utility>
40 
41 using namespace llvm;
42 
43 #define DEBUG_TYPE "armtti"
44 
45 static cl::opt<bool> EnableMaskedLoadStores(
46   "enable-arm-maskedldst", cl::Hidden, cl::init(true),
47   cl::desc("Enable the generation of masked loads and stores"));
48 
49 static cl::opt<bool> DisableLowOverheadLoops(
50   "disable-arm-loloops", cl::Hidden, cl::init(false),
51   cl::desc("Disable the generation of low-overhead loops"));
52 
53 static cl::opt<bool>
54     AllowWLSLoops("allow-arm-wlsloops", cl::Hidden, cl::init(true),
55                   cl::desc("Enable the generation of WLS loops"));
56 
57 extern cl::opt<TailPredication::Mode> EnableTailPredication;
58 
59 extern cl::opt<bool> EnableMaskedGatherScatters;
60 
61 extern cl::opt<unsigned> MVEMaxSupportedInterleaveFactor;
62 
63 /// Convert a vector load intrinsic into a simple llvm load instruction.
64 /// This is beneficial when the underlying object being addressed comes
65 /// from a constant, since we get constant-folding for free.
66 static Value *simplifyNeonVld1(const IntrinsicInst &II, unsigned MemAlign,
67                                InstCombiner::BuilderTy &Builder) {
68   auto *IntrAlign = dyn_cast<ConstantInt>(II.getArgOperand(1));
69 
70   if (!IntrAlign)
71     return nullptr;
72 
73   unsigned Alignment = IntrAlign->getLimitedValue() < MemAlign
74                            ? MemAlign
75                            : IntrAlign->getLimitedValue();
76 
77   if (!isPowerOf2_32(Alignment))
78     return nullptr;
79 
80   auto *BCastInst = Builder.CreateBitCast(II.getArgOperand(0),
81                                           PointerType::get(II.getType(), 0));
82   return Builder.CreateAlignedLoad(II.getType(), BCastInst, Align(Alignment));
83 }
84 
85 bool ARMTTIImpl::areInlineCompatible(const Function *Caller,
86                                      const Function *Callee) const {
87   const TargetMachine &TM = getTLI()->getTargetMachine();
88   const FeatureBitset &CallerBits =
89       TM.getSubtargetImpl(*Caller)->getFeatureBits();
90   const FeatureBitset &CalleeBits =
91       TM.getSubtargetImpl(*Callee)->getFeatureBits();
92 
93   // To inline a callee, all features not in the allowed list must match exactly.
94   bool MatchExact = (CallerBits & ~InlineFeaturesAllowed) ==
95                     (CalleeBits & ~InlineFeaturesAllowed);
96   // For features in the allowed list, the callee's features must be a subset of
97   // the callers'.
98   bool MatchSubset = ((CallerBits & CalleeBits) & InlineFeaturesAllowed) ==
99                      (CalleeBits & InlineFeaturesAllowed);
100   return MatchExact && MatchSubset;
101 }
102 
103 bool ARMTTIImpl::shouldFavorBackedgeIndex(const Loop *L) const {
104   if (L->getHeader()->getParent()->hasOptSize())
105     return false;
106   if (ST->hasMVEIntegerOps())
107     return false;
108   return ST->isMClass() && ST->isThumb2() && L->getNumBlocks() == 1;
109 }
110 
111 bool ARMTTIImpl::shouldFavorPostInc() const {
112   if (ST->hasMVEIntegerOps())
113     return true;
114   return false;
115 }
116 
117 Optional<Instruction *>
118 ARMTTIImpl::instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II) const {
119   using namespace PatternMatch;
120   Intrinsic::ID IID = II.getIntrinsicID();
121   switch (IID) {
122   default:
123     break;
124   case Intrinsic::arm_neon_vld1: {
125     Align MemAlign =
126         getKnownAlignment(II.getArgOperand(0), IC.getDataLayout(), &II,
127                           &IC.getAssumptionCache(), &IC.getDominatorTree());
128     if (Value *V = simplifyNeonVld1(II, MemAlign.value(), IC.Builder)) {
129       return IC.replaceInstUsesWith(II, V);
130     }
131     break;
132   }
133 
134   case Intrinsic::arm_neon_vld2:
135   case Intrinsic::arm_neon_vld3:
136   case Intrinsic::arm_neon_vld4:
137   case Intrinsic::arm_neon_vld2lane:
138   case Intrinsic::arm_neon_vld3lane:
139   case Intrinsic::arm_neon_vld4lane:
140   case Intrinsic::arm_neon_vst1:
141   case Intrinsic::arm_neon_vst2:
142   case Intrinsic::arm_neon_vst3:
143   case Intrinsic::arm_neon_vst4:
144   case Intrinsic::arm_neon_vst2lane:
145   case Intrinsic::arm_neon_vst3lane:
146   case Intrinsic::arm_neon_vst4lane: {
147     Align MemAlign =
148         getKnownAlignment(II.getArgOperand(0), IC.getDataLayout(), &II,
149                           &IC.getAssumptionCache(), &IC.getDominatorTree());
150     unsigned AlignArg = II.getNumArgOperands() - 1;
151     Value *AlignArgOp = II.getArgOperand(AlignArg);
152     MaybeAlign Align = cast<ConstantInt>(AlignArgOp)->getMaybeAlignValue();
153     if (Align && *Align < MemAlign) {
154       return IC.replaceOperand(
155           II, AlignArg,
156           ConstantInt::get(Type::getInt32Ty(II.getContext()), MemAlign.value(),
157                            false));
158     }
159     break;
160   }
161 
162   case Intrinsic::arm_mve_pred_i2v: {
163     Value *Arg = II.getArgOperand(0);
164     Value *ArgArg;
165     if (match(Arg, PatternMatch::m_Intrinsic<Intrinsic::arm_mve_pred_v2i>(
166                        PatternMatch::m_Value(ArgArg))) &&
167         II.getType() == ArgArg->getType()) {
168       return IC.replaceInstUsesWith(II, ArgArg);
169     }
170     Constant *XorMask;
171     if (match(Arg, m_Xor(PatternMatch::m_Intrinsic<Intrinsic::arm_mve_pred_v2i>(
172                              PatternMatch::m_Value(ArgArg)),
173                          PatternMatch::m_Constant(XorMask))) &&
174         II.getType() == ArgArg->getType()) {
175       if (auto *CI = dyn_cast<ConstantInt>(XorMask)) {
176         if (CI->getValue().trunc(16).isAllOnesValue()) {
177           auto TrueVector = IC.Builder.CreateVectorSplat(
178               cast<FixedVectorType>(II.getType())->getNumElements(),
179               IC.Builder.getTrue());
180           return BinaryOperator::Create(Instruction::Xor, ArgArg, TrueVector);
181         }
182       }
183     }
184     KnownBits ScalarKnown(32);
185     if (IC.SimplifyDemandedBits(&II, 0, APInt::getLowBitsSet(32, 16),
186                                 ScalarKnown, 0)) {
187       return &II;
188     }
189     break;
190   }
191   case Intrinsic::arm_mve_pred_v2i: {
192     Value *Arg = II.getArgOperand(0);
193     Value *ArgArg;
194     if (match(Arg, PatternMatch::m_Intrinsic<Intrinsic::arm_mve_pred_i2v>(
195                        PatternMatch::m_Value(ArgArg)))) {
196       return IC.replaceInstUsesWith(II, ArgArg);
197     }
198     if (!II.getMetadata(LLVMContext::MD_range)) {
199       Type *IntTy32 = Type::getInt32Ty(II.getContext());
200       Metadata *M[] = {
201           ConstantAsMetadata::get(ConstantInt::get(IntTy32, 0)),
202           ConstantAsMetadata::get(ConstantInt::get(IntTy32, 0xFFFF))};
203       II.setMetadata(LLVMContext::MD_range, MDNode::get(II.getContext(), M));
204       return &II;
205     }
206     break;
207   }
208   case Intrinsic::arm_mve_vadc:
209   case Intrinsic::arm_mve_vadc_predicated: {
210     unsigned CarryOp =
211         (II.getIntrinsicID() == Intrinsic::arm_mve_vadc_predicated) ? 3 : 2;
212     assert(II.getArgOperand(CarryOp)->getType()->getScalarSizeInBits() == 32 &&
213            "Bad type for intrinsic!");
214 
215     KnownBits CarryKnown(32);
216     if (IC.SimplifyDemandedBits(&II, CarryOp, APInt::getOneBitSet(32, 29),
217                                 CarryKnown)) {
218       return &II;
219     }
220     break;
221   }
222   case Intrinsic::arm_mve_vmldava: {
223     Instruction *I = cast<Instruction>(&II);
224     if (I->hasOneUse()) {
225       auto *User = cast<Instruction>(*I->user_begin());
226       Value *OpZ;
227       if (match(User, m_c_Add(m_Specific(I), m_Value(OpZ))) &&
228           match(I->getOperand(3), m_Zero())) {
229         Value *OpX = I->getOperand(4);
230         Value *OpY = I->getOperand(5);
231         Type *OpTy = OpX->getType();
232 
233         IC.Builder.SetInsertPoint(User);
234         Value *V =
235             IC.Builder.CreateIntrinsic(Intrinsic::arm_mve_vmldava, {OpTy},
236                                        {I->getOperand(0), I->getOperand(1),
237                                         I->getOperand(2), OpZ, OpX, OpY});
238 
239         IC.replaceInstUsesWith(*User, V);
240         return IC.eraseInstFromFunction(*User);
241       }
242     }
243     return None;
244   }
245   }
246   return None;
247 }
248 
249 int ARMTTIImpl::getIntImmCost(const APInt &Imm, Type *Ty,
250                               TTI::TargetCostKind CostKind) {
251   assert(Ty->isIntegerTy());
252 
253  unsigned Bits = Ty->getPrimitiveSizeInBits();
254  if (Bits == 0 || Imm.getActiveBits() >= 64)
255    return 4;
256 
257   int64_t SImmVal = Imm.getSExtValue();
258   uint64_t ZImmVal = Imm.getZExtValue();
259   if (!ST->isThumb()) {
260     if ((SImmVal >= 0 && SImmVal < 65536) ||
261         (ARM_AM::getSOImmVal(ZImmVal) != -1) ||
262         (ARM_AM::getSOImmVal(~ZImmVal) != -1))
263       return 1;
264     return ST->hasV6T2Ops() ? 2 : 3;
265   }
266   if (ST->isThumb2()) {
267     if ((SImmVal >= 0 && SImmVal < 65536) ||
268         (ARM_AM::getT2SOImmVal(ZImmVal) != -1) ||
269         (ARM_AM::getT2SOImmVal(~ZImmVal) != -1))
270       return 1;
271     return ST->hasV6T2Ops() ? 2 : 3;
272   }
273   // Thumb1, any i8 imm cost 1.
274   if (Bits == 8 || (SImmVal >= 0 && SImmVal < 256))
275     return 1;
276   if ((~SImmVal < 256) || ARM_AM::isThumbImmShiftedVal(ZImmVal))
277     return 2;
278   // Load from constantpool.
279   return 3;
280 }
281 
282 // Constants smaller than 256 fit in the immediate field of
283 // Thumb1 instructions so we return a zero cost and 1 otherwise.
284 int ARMTTIImpl::getIntImmCodeSizeCost(unsigned Opcode, unsigned Idx,
285                                       const APInt &Imm, Type *Ty) {
286   if (Imm.isNonNegative() && Imm.getLimitedValue() < 256)
287     return 0;
288 
289   return 1;
290 }
291 
292 // Checks whether Inst is part of a min(max()) or max(min()) pattern
293 // that will match to an SSAT instruction
294 static bool isSSATMinMaxPattern(Instruction *Inst, const APInt &Imm) {
295   Value *LHS, *RHS;
296   ConstantInt *C;
297   SelectPatternFlavor InstSPF = matchSelectPattern(Inst, LHS, RHS).Flavor;
298 
299   if (InstSPF == SPF_SMAX &&
300       PatternMatch::match(RHS, PatternMatch::m_ConstantInt(C)) &&
301       C->getValue() == Imm && Imm.isNegative() && (-Imm).isPowerOf2()) {
302 
303     auto isSSatMin = [&](Value *MinInst) {
304       if (isa<SelectInst>(MinInst)) {
305         Value *MinLHS, *MinRHS;
306         ConstantInt *MinC;
307         SelectPatternFlavor MinSPF =
308             matchSelectPattern(MinInst, MinLHS, MinRHS).Flavor;
309         if (MinSPF == SPF_SMIN &&
310             PatternMatch::match(MinRHS, PatternMatch::m_ConstantInt(MinC)) &&
311             MinC->getValue() == ((-Imm) - 1))
312           return true;
313       }
314       return false;
315     };
316 
317     if (isSSatMin(Inst->getOperand(1)) ||
318         (Inst->hasNUses(2) && (isSSatMin(*Inst->user_begin()) ||
319                                isSSatMin(*(++Inst->user_begin())))))
320       return true;
321   }
322   return false;
323 }
324 
325 int ARMTTIImpl::getIntImmCostInst(unsigned Opcode, unsigned Idx,
326                                   const APInt &Imm, Type *Ty,
327                                   TTI::TargetCostKind CostKind,
328                                   Instruction *Inst) {
329   // Division by a constant can be turned into multiplication, but only if we
330   // know it's constant. So it's not so much that the immediate is cheap (it's
331   // not), but that the alternative is worse.
332   // FIXME: this is probably unneeded with GlobalISel.
333   if ((Opcode == Instruction::SDiv || Opcode == Instruction::UDiv ||
334        Opcode == Instruction::SRem || Opcode == Instruction::URem) &&
335       Idx == 1)
336     return 0;
337 
338   if (Opcode == Instruction::And) {
339     // UXTB/UXTH
340     if (Imm == 255 || Imm == 65535)
341       return 0;
342     // Conversion to BIC is free, and means we can use ~Imm instead.
343     return std::min(getIntImmCost(Imm, Ty, CostKind),
344                     getIntImmCost(~Imm, Ty, CostKind));
345   }
346 
347   if (Opcode == Instruction::Add)
348     // Conversion to SUB is free, and means we can use -Imm instead.
349     return std::min(getIntImmCost(Imm, Ty, CostKind),
350                     getIntImmCost(-Imm, Ty, CostKind));
351 
352   if (Opcode == Instruction::ICmp && Imm.isNegative() &&
353       Ty->getIntegerBitWidth() == 32) {
354     int64_t NegImm = -Imm.getSExtValue();
355     if (ST->isThumb2() && NegImm < 1<<12)
356       // icmp X, #-C -> cmn X, #C
357       return 0;
358     if (ST->isThumb() && NegImm < 1<<8)
359       // icmp X, #-C -> adds X, #C
360       return 0;
361   }
362 
363   // xor a, -1 can always be folded to MVN
364   if (Opcode == Instruction::Xor && Imm.isAllOnesValue())
365     return 0;
366 
367   // Ensures negative constant of min(max()) or max(min()) patterns that
368   // match to SSAT instructions don't get hoisted
369   if (Inst && ((ST->hasV6Ops() && !ST->isThumb()) || ST->isThumb2()) &&
370       Ty->getIntegerBitWidth() <= 32) {
371     if (isSSATMinMaxPattern(Inst, Imm) ||
372         (isa<ICmpInst>(Inst) && Inst->hasOneUse() &&
373          isSSATMinMaxPattern(cast<Instruction>(*Inst->user_begin()), Imm)))
374       return 0;
375   }
376 
377   return getIntImmCost(Imm, Ty, CostKind);
378 }
379 
380 int ARMTTIImpl::getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind) {
381   if (CostKind == TTI::TCK_RecipThroughput &&
382       (ST->hasNEON() || ST->hasMVEIntegerOps())) {
383     // FIXME: The vectorizer is highly sensistive to the cost of these
384     // instructions, which suggests that it may be using the costs incorrectly.
385     // But, for now, just make them free to avoid performance regressions for
386     // vector targets.
387     return 0;
388   }
389   return BaseT::getCFInstrCost(Opcode, CostKind);
390 }
391 
392 int ARMTTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src,
393                                  TTI::CastContextHint CCH,
394                                  TTI::TargetCostKind CostKind,
395                                  const Instruction *I) {
396   int ISD = TLI->InstructionOpcodeToISD(Opcode);
397   assert(ISD && "Invalid opcode");
398 
399   // TODO: Allow non-throughput costs that aren't binary.
400   auto AdjustCost = [&CostKind](int Cost) {
401     if (CostKind != TTI::TCK_RecipThroughput)
402       return Cost == 0 ? 0 : 1;
403     return Cost;
404   };
405   auto IsLegalFPType = [this](EVT VT) {
406     EVT EltVT = VT.getScalarType();
407     return (EltVT == MVT::f32 && ST->hasVFP2Base()) ||
408             (EltVT == MVT::f64 && ST->hasFP64()) ||
409             (EltVT == MVT::f16 && ST->hasFullFP16());
410   };
411 
412   EVT SrcTy = TLI->getValueType(DL, Src);
413   EVT DstTy = TLI->getValueType(DL, Dst);
414 
415   if (!SrcTy.isSimple() || !DstTy.isSimple())
416     return AdjustCost(
417         BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I));
418 
419   // Extending masked load/Truncating masked stores is expensive because we
420   // currently don't split them. This means that we'll likely end up
421   // loading/storing each element individually (hence the high cost).
422   if ((ST->hasMVEIntegerOps() &&
423        (Opcode == Instruction::Trunc || Opcode == Instruction::ZExt ||
424         Opcode == Instruction::SExt)) ||
425       (ST->hasMVEFloatOps() &&
426        (Opcode == Instruction::FPExt || Opcode == Instruction::FPTrunc) &&
427        IsLegalFPType(SrcTy) && IsLegalFPType(DstTy)))
428     if (CCH == TTI::CastContextHint::Masked && DstTy.getSizeInBits() > 128)
429       return 2 * DstTy.getVectorNumElements() *
430              ST->getMVEVectorCostFactor(CostKind);
431 
432   // The extend of other kinds of load is free
433   if (CCH == TTI::CastContextHint::Normal ||
434       CCH == TTI::CastContextHint::Masked) {
435     static const TypeConversionCostTblEntry LoadConversionTbl[] = {
436         {ISD::SIGN_EXTEND, MVT::i32, MVT::i16, 0},
437         {ISD::ZERO_EXTEND, MVT::i32, MVT::i16, 0},
438         {ISD::SIGN_EXTEND, MVT::i32, MVT::i8, 0},
439         {ISD::ZERO_EXTEND, MVT::i32, MVT::i8, 0},
440         {ISD::SIGN_EXTEND, MVT::i16, MVT::i8, 0},
441         {ISD::ZERO_EXTEND, MVT::i16, MVT::i8, 0},
442         {ISD::SIGN_EXTEND, MVT::i64, MVT::i32, 1},
443         {ISD::ZERO_EXTEND, MVT::i64, MVT::i32, 1},
444         {ISD::SIGN_EXTEND, MVT::i64, MVT::i16, 1},
445         {ISD::ZERO_EXTEND, MVT::i64, MVT::i16, 1},
446         {ISD::SIGN_EXTEND, MVT::i64, MVT::i8, 1},
447         {ISD::ZERO_EXTEND, MVT::i64, MVT::i8, 1},
448     };
449     if (const auto *Entry = ConvertCostTableLookup(
450             LoadConversionTbl, ISD, DstTy.getSimpleVT(), SrcTy.getSimpleVT()))
451       return AdjustCost(Entry->Cost);
452 
453     static const TypeConversionCostTblEntry MVELoadConversionTbl[] = {
454         {ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i16, 0},
455         {ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i16, 0},
456         {ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i8, 0},
457         {ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i8, 0},
458         {ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i8, 0},
459         {ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i8, 0},
460         // The following extend from a legal type to an illegal type, so need to
461         // split the load. This introduced an extra load operation, but the
462         // extend is still "free".
463         {ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 1},
464         {ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 1},
465         {ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 3},
466         {ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 3},
467         {ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 1},
468         {ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 1},
469     };
470     if (SrcTy.isVector() && ST->hasMVEIntegerOps()) {
471       if (const auto *Entry =
472               ConvertCostTableLookup(MVELoadConversionTbl, ISD,
473                                      DstTy.getSimpleVT(), SrcTy.getSimpleVT()))
474         return Entry->Cost * ST->getMVEVectorCostFactor(CostKind);
475     }
476 
477     static const TypeConversionCostTblEntry MVEFLoadConversionTbl[] = {
478         // FPExtends are similar but also require the VCVT instructions.
479         {ISD::FP_EXTEND, MVT::v4f32, MVT::v4f16, 1},
480         {ISD::FP_EXTEND, MVT::v8f32, MVT::v8f16, 3},
481     };
482     if (SrcTy.isVector() && ST->hasMVEFloatOps()) {
483       if (const auto *Entry =
484               ConvertCostTableLookup(MVEFLoadConversionTbl, ISD,
485                                      DstTy.getSimpleVT(), SrcTy.getSimpleVT()))
486         return Entry->Cost * ST->getMVEVectorCostFactor(CostKind);
487     }
488 
489     // The truncate of a store is free. This is the mirror of extends above.
490     static const TypeConversionCostTblEntry MVEStoreConversionTbl[] = {
491         {ISD::TRUNCATE, MVT::v4i32, MVT::v4i16, 0},
492         {ISD::TRUNCATE, MVT::v4i32, MVT::v4i8, 0},
493         {ISD::TRUNCATE, MVT::v8i16, MVT::v8i8, 0},
494         {ISD::TRUNCATE, MVT::v8i32, MVT::v8i16, 1},
495         {ISD::TRUNCATE, MVT::v8i32, MVT::v8i8, 1},
496         {ISD::TRUNCATE, MVT::v16i32, MVT::v16i8, 3},
497         {ISD::TRUNCATE, MVT::v16i16, MVT::v16i8, 1},
498     };
499     if (SrcTy.isVector() && ST->hasMVEIntegerOps()) {
500       if (const auto *Entry =
501               ConvertCostTableLookup(MVEStoreConversionTbl, ISD,
502                                      SrcTy.getSimpleVT(), DstTy.getSimpleVT()))
503         return Entry->Cost * ST->getMVEVectorCostFactor(CostKind);
504     }
505 
506     static const TypeConversionCostTblEntry MVEFStoreConversionTbl[] = {
507         {ISD::FP_ROUND, MVT::v4f32, MVT::v4f16, 1},
508         {ISD::FP_ROUND, MVT::v8f32, MVT::v8f16, 3},
509     };
510     if (SrcTy.isVector() && ST->hasMVEFloatOps()) {
511       if (const auto *Entry =
512               ConvertCostTableLookup(MVEFStoreConversionTbl, ISD,
513                                      SrcTy.getSimpleVT(), DstTy.getSimpleVT()))
514         return Entry->Cost * ST->getMVEVectorCostFactor(CostKind);
515     }
516   }
517 
518   // NEON vector operations that can extend their inputs.
519   if ((ISD == ISD::SIGN_EXTEND || ISD == ISD::ZERO_EXTEND) &&
520       I && I->hasOneUse() && ST->hasNEON() && SrcTy.isVector()) {
521     static const TypeConversionCostTblEntry NEONDoubleWidthTbl[] = {
522       // vaddl
523       { ISD::ADD, MVT::v4i32, MVT::v4i16, 0 },
524       { ISD::ADD, MVT::v8i16, MVT::v8i8,  0 },
525       // vsubl
526       { ISD::SUB, MVT::v4i32, MVT::v4i16, 0 },
527       { ISD::SUB, MVT::v8i16, MVT::v8i8,  0 },
528       // vmull
529       { ISD::MUL, MVT::v4i32, MVT::v4i16, 0 },
530       { ISD::MUL, MVT::v8i16, MVT::v8i8,  0 },
531       // vshll
532       { ISD::SHL, MVT::v4i32, MVT::v4i16, 0 },
533       { ISD::SHL, MVT::v8i16, MVT::v8i8,  0 },
534     };
535 
536     auto *User = cast<Instruction>(*I->user_begin());
537     int UserISD = TLI->InstructionOpcodeToISD(User->getOpcode());
538     if (auto *Entry = ConvertCostTableLookup(NEONDoubleWidthTbl, UserISD,
539                                              DstTy.getSimpleVT(),
540                                              SrcTy.getSimpleVT())) {
541       return AdjustCost(Entry->Cost);
542     }
543   }
544 
545   // Single to/from double precision conversions.
546   if (Src->isVectorTy() && ST->hasNEON() &&
547       ((ISD == ISD::FP_ROUND && SrcTy.getScalarType() == MVT::f64 &&
548         DstTy.getScalarType() == MVT::f32) ||
549        (ISD == ISD::FP_EXTEND && SrcTy.getScalarType() == MVT::f32 &&
550         DstTy.getScalarType() == MVT::f64))) {
551     static const CostTblEntry NEONFltDblTbl[] = {
552         // Vector fptrunc/fpext conversions.
553         {ISD::FP_ROUND, MVT::v2f64, 2},
554         {ISD::FP_EXTEND, MVT::v2f32, 2},
555         {ISD::FP_EXTEND, MVT::v4f32, 4}};
556 
557     std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Src);
558     if (const auto *Entry = CostTableLookup(NEONFltDblTbl, ISD, LT.second))
559       return AdjustCost(LT.first * Entry->Cost);
560   }
561 
562   // Some arithmetic, load and store operations have specific instructions
563   // to cast up/down their types automatically at no extra cost.
564   // TODO: Get these tables to know at least what the related operations are.
565   static const TypeConversionCostTblEntry NEONVectorConversionTbl[] = {
566     { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i16, 1 },
567     { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i16, 1 },
568     { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i32, 1 },
569     { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i32, 1 },
570     { ISD::TRUNCATE,    MVT::v4i32, MVT::v4i64, 0 },
571     { ISD::TRUNCATE,    MVT::v4i16, MVT::v4i32, 1 },
572 
573     // The number of vmovl instructions for the extension.
574     { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i8,  1 },
575     { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i8,  1 },
576     { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i8,  2 },
577     { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i8,  2 },
578     { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i8,  3 },
579     { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i8,  3 },
580     { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i16, 2 },
581     { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i16, 2 },
582     { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 3 },
583     { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 },
584     { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 3 },
585     { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 3 },
586     { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i8, 7 },
587     { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i8, 7 },
588     { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i16, 6 },
589     { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i16, 6 },
590     { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 6 },
591     { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 6 },
592 
593     // Operations that we legalize using splitting.
594     { ISD::TRUNCATE,    MVT::v16i8, MVT::v16i32, 6 },
595     { ISD::TRUNCATE,    MVT::v8i8, MVT::v8i32, 3 },
596 
597     // Vector float <-> i32 conversions.
598     { ISD::SINT_TO_FP,  MVT::v4f32, MVT::v4i32, 1 },
599     { ISD::UINT_TO_FP,  MVT::v4f32, MVT::v4i32, 1 },
600 
601     { ISD::SINT_TO_FP,  MVT::v2f32, MVT::v2i8, 3 },
602     { ISD::UINT_TO_FP,  MVT::v2f32, MVT::v2i8, 3 },
603     { ISD::SINT_TO_FP,  MVT::v2f32, MVT::v2i16, 2 },
604     { ISD::UINT_TO_FP,  MVT::v2f32, MVT::v2i16, 2 },
605     { ISD::SINT_TO_FP,  MVT::v2f32, MVT::v2i32, 1 },
606     { ISD::UINT_TO_FP,  MVT::v2f32, MVT::v2i32, 1 },
607     { ISD::SINT_TO_FP,  MVT::v4f32, MVT::v4i1, 3 },
608     { ISD::UINT_TO_FP,  MVT::v4f32, MVT::v4i1, 3 },
609     { ISD::SINT_TO_FP,  MVT::v4f32, MVT::v4i8, 3 },
610     { ISD::UINT_TO_FP,  MVT::v4f32, MVT::v4i8, 3 },
611     { ISD::SINT_TO_FP,  MVT::v4f32, MVT::v4i16, 2 },
612     { ISD::UINT_TO_FP,  MVT::v4f32, MVT::v4i16, 2 },
613     { ISD::SINT_TO_FP,  MVT::v8f32, MVT::v8i16, 4 },
614     { ISD::UINT_TO_FP,  MVT::v8f32, MVT::v8i16, 4 },
615     { ISD::SINT_TO_FP,  MVT::v8f32, MVT::v8i32, 2 },
616     { ISD::UINT_TO_FP,  MVT::v8f32, MVT::v8i32, 2 },
617     { ISD::SINT_TO_FP,  MVT::v16f32, MVT::v16i16, 8 },
618     { ISD::UINT_TO_FP,  MVT::v16f32, MVT::v16i16, 8 },
619     { ISD::SINT_TO_FP,  MVT::v16f32, MVT::v16i32, 4 },
620     { ISD::UINT_TO_FP,  MVT::v16f32, MVT::v16i32, 4 },
621 
622     { ISD::FP_TO_SINT,  MVT::v4i32, MVT::v4f32, 1 },
623     { ISD::FP_TO_UINT,  MVT::v4i32, MVT::v4f32, 1 },
624     { ISD::FP_TO_SINT,  MVT::v4i8, MVT::v4f32, 3 },
625     { ISD::FP_TO_UINT,  MVT::v4i8, MVT::v4f32, 3 },
626     { ISD::FP_TO_SINT,  MVT::v4i16, MVT::v4f32, 2 },
627     { ISD::FP_TO_UINT,  MVT::v4i16, MVT::v4f32, 2 },
628 
629     // Vector double <-> i32 conversions.
630     { ISD::SINT_TO_FP,  MVT::v2f64, MVT::v2i32, 2 },
631     { ISD::UINT_TO_FP,  MVT::v2f64, MVT::v2i32, 2 },
632 
633     { ISD::SINT_TO_FP,  MVT::v2f64, MVT::v2i8, 4 },
634     { ISD::UINT_TO_FP,  MVT::v2f64, MVT::v2i8, 4 },
635     { ISD::SINT_TO_FP,  MVT::v2f64, MVT::v2i16, 3 },
636     { ISD::UINT_TO_FP,  MVT::v2f64, MVT::v2i16, 3 },
637     { ISD::SINT_TO_FP,  MVT::v2f64, MVT::v2i32, 2 },
638     { ISD::UINT_TO_FP,  MVT::v2f64, MVT::v2i32, 2 },
639 
640     { ISD::FP_TO_SINT,  MVT::v2i32, MVT::v2f64, 2 },
641     { ISD::FP_TO_UINT,  MVT::v2i32, MVT::v2f64, 2 },
642     { ISD::FP_TO_SINT,  MVT::v8i16, MVT::v8f32, 4 },
643     { ISD::FP_TO_UINT,  MVT::v8i16, MVT::v8f32, 4 },
644     { ISD::FP_TO_SINT,  MVT::v16i16, MVT::v16f32, 8 },
645     { ISD::FP_TO_UINT,  MVT::v16i16, MVT::v16f32, 8 }
646   };
647 
648   if (SrcTy.isVector() && ST->hasNEON()) {
649     if (const auto *Entry = ConvertCostTableLookup(NEONVectorConversionTbl, ISD,
650                                                    DstTy.getSimpleVT(),
651                                                    SrcTy.getSimpleVT()))
652       return AdjustCost(Entry->Cost);
653   }
654 
655   // Scalar float to integer conversions.
656   static const TypeConversionCostTblEntry NEONFloatConversionTbl[] = {
657     { ISD::FP_TO_SINT,  MVT::i1, MVT::f32, 2 },
658     { ISD::FP_TO_UINT,  MVT::i1, MVT::f32, 2 },
659     { ISD::FP_TO_SINT,  MVT::i1, MVT::f64, 2 },
660     { ISD::FP_TO_UINT,  MVT::i1, MVT::f64, 2 },
661     { ISD::FP_TO_SINT,  MVT::i8, MVT::f32, 2 },
662     { ISD::FP_TO_UINT,  MVT::i8, MVT::f32, 2 },
663     { ISD::FP_TO_SINT,  MVT::i8, MVT::f64, 2 },
664     { ISD::FP_TO_UINT,  MVT::i8, MVT::f64, 2 },
665     { ISD::FP_TO_SINT,  MVT::i16, MVT::f32, 2 },
666     { ISD::FP_TO_UINT,  MVT::i16, MVT::f32, 2 },
667     { ISD::FP_TO_SINT,  MVT::i16, MVT::f64, 2 },
668     { ISD::FP_TO_UINT,  MVT::i16, MVT::f64, 2 },
669     { ISD::FP_TO_SINT,  MVT::i32, MVT::f32, 2 },
670     { ISD::FP_TO_UINT,  MVT::i32, MVT::f32, 2 },
671     { ISD::FP_TO_SINT,  MVT::i32, MVT::f64, 2 },
672     { ISD::FP_TO_UINT,  MVT::i32, MVT::f64, 2 },
673     { ISD::FP_TO_SINT,  MVT::i64, MVT::f32, 10 },
674     { ISD::FP_TO_UINT,  MVT::i64, MVT::f32, 10 },
675     { ISD::FP_TO_SINT,  MVT::i64, MVT::f64, 10 },
676     { ISD::FP_TO_UINT,  MVT::i64, MVT::f64, 10 }
677   };
678   if (SrcTy.isFloatingPoint() && ST->hasNEON()) {
679     if (const auto *Entry = ConvertCostTableLookup(NEONFloatConversionTbl, ISD,
680                                                    DstTy.getSimpleVT(),
681                                                    SrcTy.getSimpleVT()))
682       return AdjustCost(Entry->Cost);
683   }
684 
685   // Scalar integer to float conversions.
686   static const TypeConversionCostTblEntry NEONIntegerConversionTbl[] = {
687     { ISD::SINT_TO_FP,  MVT::f32, MVT::i1, 2 },
688     { ISD::UINT_TO_FP,  MVT::f32, MVT::i1, 2 },
689     { ISD::SINT_TO_FP,  MVT::f64, MVT::i1, 2 },
690     { ISD::UINT_TO_FP,  MVT::f64, MVT::i1, 2 },
691     { ISD::SINT_TO_FP,  MVT::f32, MVT::i8, 2 },
692     { ISD::UINT_TO_FP,  MVT::f32, MVT::i8, 2 },
693     { ISD::SINT_TO_FP,  MVT::f64, MVT::i8, 2 },
694     { ISD::UINT_TO_FP,  MVT::f64, MVT::i8, 2 },
695     { ISD::SINT_TO_FP,  MVT::f32, MVT::i16, 2 },
696     { ISD::UINT_TO_FP,  MVT::f32, MVT::i16, 2 },
697     { ISD::SINT_TO_FP,  MVT::f64, MVT::i16, 2 },
698     { ISD::UINT_TO_FP,  MVT::f64, MVT::i16, 2 },
699     { ISD::SINT_TO_FP,  MVT::f32, MVT::i32, 2 },
700     { ISD::UINT_TO_FP,  MVT::f32, MVT::i32, 2 },
701     { ISD::SINT_TO_FP,  MVT::f64, MVT::i32, 2 },
702     { ISD::UINT_TO_FP,  MVT::f64, MVT::i32, 2 },
703     { ISD::SINT_TO_FP,  MVT::f32, MVT::i64, 10 },
704     { ISD::UINT_TO_FP,  MVT::f32, MVT::i64, 10 },
705     { ISD::SINT_TO_FP,  MVT::f64, MVT::i64, 10 },
706     { ISD::UINT_TO_FP,  MVT::f64, MVT::i64, 10 }
707   };
708 
709   if (SrcTy.isInteger() && ST->hasNEON()) {
710     if (const auto *Entry = ConvertCostTableLookup(NEONIntegerConversionTbl,
711                                                    ISD, DstTy.getSimpleVT(),
712                                                    SrcTy.getSimpleVT()))
713       return AdjustCost(Entry->Cost);
714   }
715 
716   // MVE extend costs, taken from codegen tests. i8->i16 or i16->i32 is one
717   // instruction, i8->i32 is two. i64 zexts are an VAND with a constant, sext
718   // are linearised so take more.
719   static const TypeConversionCostTblEntry MVEVectorConversionTbl[] = {
720     { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i8, 1 },
721     { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i8, 1 },
722     { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i8, 2 },
723     { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i8, 2 },
724     { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i8, 10 },
725     { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i8, 2 },
726     { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i16, 1 },
727     { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i16, 1 },
728     { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i16, 10 },
729     { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i16, 2 },
730     { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i32, 8 },
731     { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i32, 2 },
732   };
733 
734   if (SrcTy.isVector() && ST->hasMVEIntegerOps()) {
735     if (const auto *Entry = ConvertCostTableLookup(MVEVectorConversionTbl,
736                                                    ISD, DstTy.getSimpleVT(),
737                                                    SrcTy.getSimpleVT()))
738       return Entry->Cost * ST->getMVEVectorCostFactor(CostKind);
739   }
740 
741   if (ISD == ISD::FP_ROUND || ISD == ISD::FP_EXTEND) {
742     // As general rule, fp converts that were not matched above are scalarized
743     // and cost 1 vcvt for each lane, so long as the instruction is available.
744     // If not it will become a series of function calls.
745     const int CallCost = getCallInstrCost(nullptr, Dst, {Src}, CostKind);
746     int Lanes = 1;
747     if (SrcTy.isFixedLengthVector())
748       Lanes = SrcTy.getVectorNumElements();
749 
750     if (IsLegalFPType(SrcTy) && IsLegalFPType(DstTy))
751       return Lanes;
752     else
753       return Lanes * CallCost;
754   }
755 
756   if (ISD == ISD::TRUNCATE && ST->hasMVEIntegerOps() &&
757       SrcTy.isFixedLengthVector()) {
758     // Treat a truncate with larger than legal source (128bits for MVE) as
759     // expensive, 2 instructions per lane.
760     if ((SrcTy.getScalarType() == MVT::i8 ||
761          SrcTy.getScalarType() == MVT::i16 ||
762          SrcTy.getScalarType() == MVT::i32) &&
763         SrcTy.getSizeInBits() > 128 &&
764         SrcTy.getSizeInBits() > DstTy.getSizeInBits())
765       return SrcTy.getVectorNumElements() * 2;
766   }
767 
768   // Scalar integer conversion costs.
769   static const TypeConversionCostTblEntry ARMIntegerConversionTbl[] = {
770     // i16 -> i64 requires two dependent operations.
771     { ISD::SIGN_EXTEND, MVT::i64, MVT::i16, 2 },
772 
773     // Truncates on i64 are assumed to be free.
774     { ISD::TRUNCATE,    MVT::i32, MVT::i64, 0 },
775     { ISD::TRUNCATE,    MVT::i16, MVT::i64, 0 },
776     { ISD::TRUNCATE,    MVT::i8,  MVT::i64, 0 },
777     { ISD::TRUNCATE,    MVT::i1,  MVT::i64, 0 }
778   };
779 
780   if (SrcTy.isInteger()) {
781     if (const auto *Entry = ConvertCostTableLookup(ARMIntegerConversionTbl, ISD,
782                                                    DstTy.getSimpleVT(),
783                                                    SrcTy.getSimpleVT()))
784       return AdjustCost(Entry->Cost);
785   }
786 
787   int BaseCost = ST->hasMVEIntegerOps() && Src->isVectorTy()
788                      ? ST->getMVEVectorCostFactor(CostKind)
789                      : 1;
790   return AdjustCost(
791       BaseCost * BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I));
792 }
793 
794 int ARMTTIImpl::getVectorInstrCost(unsigned Opcode, Type *ValTy,
795                                    unsigned Index) {
796   // Penalize inserting into an D-subregister. We end up with a three times
797   // lower estimated throughput on swift.
798   if (ST->hasSlowLoadDSubregister() && Opcode == Instruction::InsertElement &&
799       ValTy->isVectorTy() && ValTy->getScalarSizeInBits() <= 32)
800     return 3;
801 
802   if (ST->hasNEON() && (Opcode == Instruction::InsertElement ||
803                         Opcode == Instruction::ExtractElement)) {
804     // Cross-class copies are expensive on many microarchitectures,
805     // so assume they are expensive by default.
806     if (cast<VectorType>(ValTy)->getElementType()->isIntegerTy())
807       return 3;
808 
809     // Even if it's not a cross class copy, this likely leads to mixing
810     // of NEON and VFP code and should be therefore penalized.
811     if (ValTy->isVectorTy() &&
812         ValTy->getScalarSizeInBits() <= 32)
813       return std::max(BaseT::getVectorInstrCost(Opcode, ValTy, Index), 2U);
814   }
815 
816   if (ST->hasMVEIntegerOps() && (Opcode == Instruction::InsertElement ||
817                                  Opcode == Instruction::ExtractElement)) {
818     // We say MVE moves costs at least the MVEVectorCostFactor, even though
819     // they are scalar instructions. This helps prevent mixing scalar and
820     // vector, to prevent vectorising where we end up just scalarising the
821     // result anyway.
822     return std::max(BaseT::getVectorInstrCost(Opcode, ValTy, Index),
823                     ST->getMVEVectorCostFactor(TTI::TCK_RecipThroughput)) *
824            cast<FixedVectorType>(ValTy)->getNumElements() / 2;
825   }
826 
827   return BaseT::getVectorInstrCost(Opcode, ValTy, Index);
828 }
829 
830 int ARMTTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy,
831                                    CmpInst::Predicate VecPred,
832                                    TTI::TargetCostKind CostKind,
833                                    const Instruction *I) {
834   int ISD = TLI->InstructionOpcodeToISD(Opcode);
835 
836   // Thumb scalar code size cost for select.
837   if (CostKind == TTI::TCK_CodeSize && ISD == ISD::SELECT &&
838       ST->isThumb() && !ValTy->isVectorTy()) {
839     // Assume expensive structs.
840     if (TLI->getValueType(DL, ValTy, true) == MVT::Other)
841       return TTI::TCC_Expensive;
842 
843     // Select costs can vary because they:
844     // - may require one or more conditional mov (including an IT),
845     // - can't operate directly on immediates,
846     // - require live flags, which we can't copy around easily.
847     int Cost = TLI->getTypeLegalizationCost(DL, ValTy).first;
848 
849     // Possible IT instruction for Thumb2, or more for Thumb1.
850     ++Cost;
851 
852     // i1 values may need rematerialising by using mov immediates and/or
853     // flag setting instructions.
854     if (ValTy->isIntegerTy(1))
855       ++Cost;
856 
857     return Cost;
858   }
859 
860   // On NEON a vector select gets lowered to vbsl.
861   if (ST->hasNEON() && ValTy->isVectorTy() && ISD == ISD::SELECT && CondTy) {
862     // Lowering of some vector selects is currently far from perfect.
863     static const TypeConversionCostTblEntry NEONVectorSelectTbl[] = {
864       { ISD::SELECT, MVT::v4i1, MVT::v4i64, 4*4 + 1*2 + 1 },
865       { ISD::SELECT, MVT::v8i1, MVT::v8i64, 50 },
866       { ISD::SELECT, MVT::v16i1, MVT::v16i64, 100 }
867     };
868 
869     EVT SelCondTy = TLI->getValueType(DL, CondTy);
870     EVT SelValTy = TLI->getValueType(DL, ValTy);
871     if (SelCondTy.isSimple() && SelValTy.isSimple()) {
872       if (const auto *Entry = ConvertCostTableLookup(NEONVectorSelectTbl, ISD,
873                                                      SelCondTy.getSimpleVT(),
874                                                      SelValTy.getSimpleVT()))
875         return Entry->Cost;
876     }
877 
878     std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy);
879     return LT.first;
880   }
881 
882   // Default to cheap (throughput/size of 1 instruction) but adjust throughput
883   // for "multiple beats" potentially needed by MVE instructions.
884   int BaseCost = 1;
885   if (ST->hasMVEIntegerOps() && ValTy->isVectorTy())
886     BaseCost = ST->getMVEVectorCostFactor(CostKind);
887 
888   return BaseCost *
889          BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind, I);
890 }
891 
892 int ARMTTIImpl::getAddressComputationCost(Type *Ty, ScalarEvolution *SE,
893                                           const SCEV *Ptr) {
894   // Address computations in vectorized code with non-consecutive addresses will
895   // likely result in more instructions compared to scalar code where the
896   // computation can more often be merged into the index mode. The resulting
897   // extra micro-ops can significantly decrease throughput.
898   unsigned NumVectorInstToHideOverhead = 10;
899   int MaxMergeDistance = 64;
900 
901   if (ST->hasNEON()) {
902     if (Ty->isVectorTy() && SE &&
903         !BaseT::isConstantStridedAccessLessThan(SE, Ptr, MaxMergeDistance + 1))
904       return NumVectorInstToHideOverhead;
905 
906     // In many cases the address computation is not merged into the instruction
907     // addressing mode.
908     return 1;
909   }
910   return BaseT::getAddressComputationCost(Ty, SE, Ptr);
911 }
912 
913 bool ARMTTIImpl::isProfitableLSRChainElement(Instruction *I) {
914   if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
915     // If a VCTP is part of a chain, it's already profitable and shouldn't be
916     // optimized, else LSR may block tail-predication.
917     switch (II->getIntrinsicID()) {
918     case Intrinsic::arm_mve_vctp8:
919     case Intrinsic::arm_mve_vctp16:
920     case Intrinsic::arm_mve_vctp32:
921     case Intrinsic::arm_mve_vctp64:
922       return true;
923     default:
924       break;
925     }
926   }
927   return false;
928 }
929 
930 bool ARMTTIImpl::isLegalMaskedLoad(Type *DataTy, Align Alignment) {
931   if (!EnableMaskedLoadStores || !ST->hasMVEIntegerOps())
932     return false;
933 
934   if (auto *VecTy = dyn_cast<FixedVectorType>(DataTy)) {
935     // Don't support v2i1 yet.
936     if (VecTy->getNumElements() == 2)
937       return false;
938 
939     // We don't support extending fp types.
940      unsigned VecWidth = DataTy->getPrimitiveSizeInBits();
941     if (VecWidth != 128 && VecTy->getElementType()->isFloatingPointTy())
942       return false;
943   }
944 
945   unsigned EltWidth = DataTy->getScalarSizeInBits();
946   return (EltWidth == 32 && Alignment >= 4) ||
947          (EltWidth == 16 && Alignment >= 2) || (EltWidth == 8);
948 }
949 
950 bool ARMTTIImpl::isLegalMaskedGather(Type *Ty, Align Alignment) {
951   if (!EnableMaskedGatherScatters || !ST->hasMVEIntegerOps())
952     return false;
953 
954   // This method is called in 2 places:
955   //  - from the vectorizer with a scalar type, in which case we need to get
956   //  this as good as we can with the limited info we have (and rely on the cost
957   //  model for the rest).
958   //  - from the masked intrinsic lowering pass with the actual vector type.
959   // For MVE, we have a custom lowering pass that will already have custom
960   // legalised any gathers that we can to MVE intrinsics, and want to expand all
961   // the rest. The pass runs before the masked intrinsic lowering pass, so if we
962   // are here, we know we want to expand.
963   if (isa<VectorType>(Ty))
964     return false;
965 
966   unsigned EltWidth = Ty->getScalarSizeInBits();
967   return ((EltWidth == 32 && Alignment >= 4) ||
968           (EltWidth == 16 && Alignment >= 2) || EltWidth == 8);
969 }
970 
971 /// Given a memcpy/memset/memmove instruction, return the number of memory
972 /// operations performed, via querying findOptimalMemOpLowering. Returns -1 if a
973 /// call is used.
974 int ARMTTIImpl::getNumMemOps(const IntrinsicInst *I) const {
975   MemOp MOp;
976   unsigned DstAddrSpace = ~0u;
977   unsigned SrcAddrSpace = ~0u;
978   const Function *F = I->getParent()->getParent();
979 
980   if (const auto *MC = dyn_cast<MemTransferInst>(I)) {
981     ConstantInt *C = dyn_cast<ConstantInt>(MC->getLength());
982     // If 'size' is not a constant, a library call will be generated.
983     if (!C)
984       return -1;
985 
986     const unsigned Size = C->getValue().getZExtValue();
987     const Align DstAlign = *MC->getDestAlign();
988     const Align SrcAlign = *MC->getSourceAlign();
989 
990     MOp = MemOp::Copy(Size, /*DstAlignCanChange*/ false, DstAlign, SrcAlign,
991                       /*IsVolatile*/ false);
992     DstAddrSpace = MC->getDestAddressSpace();
993     SrcAddrSpace = MC->getSourceAddressSpace();
994   }
995   else if (const auto *MS = dyn_cast<MemSetInst>(I)) {
996     ConstantInt *C = dyn_cast<ConstantInt>(MS->getLength());
997     // If 'size' is not a constant, a library call will be generated.
998     if (!C)
999       return -1;
1000 
1001     const unsigned Size = C->getValue().getZExtValue();
1002     const Align DstAlign = *MS->getDestAlign();
1003 
1004     MOp = MemOp::Set(Size, /*DstAlignCanChange*/ false, DstAlign,
1005                      /*IsZeroMemset*/ false, /*IsVolatile*/ false);
1006     DstAddrSpace = MS->getDestAddressSpace();
1007   }
1008   else
1009     llvm_unreachable("Expected a memcpy/move or memset!");
1010 
1011   unsigned Limit, Factor = 2;
1012   switch(I->getIntrinsicID()) {
1013     case Intrinsic::memcpy:
1014       Limit = TLI->getMaxStoresPerMemcpy(F->hasMinSize());
1015       break;
1016     case Intrinsic::memmove:
1017       Limit = TLI->getMaxStoresPerMemmove(F->hasMinSize());
1018       break;
1019     case Intrinsic::memset:
1020       Limit = TLI->getMaxStoresPerMemset(F->hasMinSize());
1021       Factor = 1;
1022       break;
1023     default:
1024       llvm_unreachable("Expected a memcpy/move or memset!");
1025   }
1026 
1027   // MemOps will be poplulated with a list of data types that needs to be
1028   // loaded and stored. That's why we multiply the number of elements by 2 to
1029   // get the cost for this memcpy.
1030   std::vector<EVT> MemOps;
1031   if (getTLI()->findOptimalMemOpLowering(
1032           MemOps, Limit, MOp, DstAddrSpace,
1033           SrcAddrSpace, F->getAttributes()))
1034     return MemOps.size() * Factor;
1035 
1036   // If we can't find an optimal memop lowering, return the default cost
1037   return -1;
1038 }
1039 
1040 int ARMTTIImpl::getMemcpyCost(const Instruction *I) {
1041   int NumOps = getNumMemOps(cast<IntrinsicInst>(I));
1042 
1043   // To model the cost of a library call, we assume 1 for the call, and
1044   // 3 for the argument setup.
1045   if (NumOps == -1)
1046     return 4;
1047   return NumOps;
1048 }
1049 
1050 int ARMTTIImpl::getShuffleCost(TTI::ShuffleKind Kind, VectorType *Tp,
1051                                int Index, VectorType *SubTp) {
1052   if (ST->hasNEON()) {
1053     if (Kind == TTI::SK_Broadcast) {
1054       static const CostTblEntry NEONDupTbl[] = {
1055           // VDUP handles these cases.
1056           {ISD::VECTOR_SHUFFLE, MVT::v2i32, 1},
1057           {ISD::VECTOR_SHUFFLE, MVT::v2f32, 1},
1058           {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1},
1059           {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1},
1060           {ISD::VECTOR_SHUFFLE, MVT::v4i16, 1},
1061           {ISD::VECTOR_SHUFFLE, MVT::v8i8, 1},
1062 
1063           {ISD::VECTOR_SHUFFLE, MVT::v4i32, 1},
1064           {ISD::VECTOR_SHUFFLE, MVT::v4f32, 1},
1065           {ISD::VECTOR_SHUFFLE, MVT::v8i16, 1},
1066           {ISD::VECTOR_SHUFFLE, MVT::v16i8, 1}};
1067 
1068       std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp);
1069 
1070       if (const auto *Entry =
1071               CostTableLookup(NEONDupTbl, ISD::VECTOR_SHUFFLE, LT.second))
1072         return LT.first * Entry->Cost;
1073     }
1074     if (Kind == TTI::SK_Reverse) {
1075       static const CostTblEntry NEONShuffleTbl[] = {
1076           // Reverse shuffle cost one instruction if we are shuffling within a
1077           // double word (vrev) or two if we shuffle a quad word (vrev, vext).
1078           {ISD::VECTOR_SHUFFLE, MVT::v2i32, 1},
1079           {ISD::VECTOR_SHUFFLE, MVT::v2f32, 1},
1080           {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1},
1081           {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1},
1082           {ISD::VECTOR_SHUFFLE, MVT::v4i16, 1},
1083           {ISD::VECTOR_SHUFFLE, MVT::v8i8, 1},
1084 
1085           {ISD::VECTOR_SHUFFLE, MVT::v4i32, 2},
1086           {ISD::VECTOR_SHUFFLE, MVT::v4f32, 2},
1087           {ISD::VECTOR_SHUFFLE, MVT::v8i16, 2},
1088           {ISD::VECTOR_SHUFFLE, MVT::v16i8, 2}};
1089 
1090       std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp);
1091 
1092       if (const auto *Entry =
1093               CostTableLookup(NEONShuffleTbl, ISD::VECTOR_SHUFFLE, LT.second))
1094         return LT.first * Entry->Cost;
1095     }
1096     if (Kind == TTI::SK_Select) {
1097       static const CostTblEntry NEONSelShuffleTbl[] = {
1098           // Select shuffle cost table for ARM. Cost is the number of
1099           // instructions
1100           // required to create the shuffled vector.
1101 
1102           {ISD::VECTOR_SHUFFLE, MVT::v2f32, 1},
1103           {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1},
1104           {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1},
1105           {ISD::VECTOR_SHUFFLE, MVT::v2i32, 1},
1106 
1107           {ISD::VECTOR_SHUFFLE, MVT::v4i32, 2},
1108           {ISD::VECTOR_SHUFFLE, MVT::v4f32, 2},
1109           {ISD::VECTOR_SHUFFLE, MVT::v4i16, 2},
1110 
1111           {ISD::VECTOR_SHUFFLE, MVT::v8i16, 16},
1112 
1113           {ISD::VECTOR_SHUFFLE, MVT::v16i8, 32}};
1114 
1115       std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp);
1116       if (const auto *Entry = CostTableLookup(NEONSelShuffleTbl,
1117                                               ISD::VECTOR_SHUFFLE, LT.second))
1118         return LT.first * Entry->Cost;
1119     }
1120   }
1121   if (ST->hasMVEIntegerOps()) {
1122     if (Kind == TTI::SK_Broadcast) {
1123       static const CostTblEntry MVEDupTbl[] = {
1124           // VDUP handles these cases.
1125           {ISD::VECTOR_SHUFFLE, MVT::v4i32, 1},
1126           {ISD::VECTOR_SHUFFLE, MVT::v8i16, 1},
1127           {ISD::VECTOR_SHUFFLE, MVT::v16i8, 1},
1128           {ISD::VECTOR_SHUFFLE, MVT::v4f32, 1},
1129           {ISD::VECTOR_SHUFFLE, MVT::v8f16, 1}};
1130 
1131       std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp);
1132 
1133       if (const auto *Entry = CostTableLookup(MVEDupTbl, ISD::VECTOR_SHUFFLE,
1134                                               LT.second))
1135         return LT.first * Entry->Cost *
1136                ST->getMVEVectorCostFactor(TTI::TCK_RecipThroughput);
1137     }
1138   }
1139   int BaseCost = ST->hasMVEIntegerOps() && Tp->isVectorTy()
1140                      ? ST->getMVEVectorCostFactor(TTI::TCK_RecipThroughput)
1141                      : 1;
1142   return BaseCost * BaseT::getShuffleCost(Kind, Tp, Index, SubTp);
1143 }
1144 
1145 int ARMTTIImpl::getArithmeticInstrCost(unsigned Opcode, Type *Ty,
1146                                        TTI::TargetCostKind CostKind,
1147                                        TTI::OperandValueKind Op1Info,
1148                                        TTI::OperandValueKind Op2Info,
1149                                        TTI::OperandValueProperties Opd1PropInfo,
1150                                        TTI::OperandValueProperties Opd2PropInfo,
1151                                        ArrayRef<const Value *> Args,
1152                                        const Instruction *CxtI) {
1153   int ISDOpcode = TLI->InstructionOpcodeToISD(Opcode);
1154   if (ST->isThumb() && CostKind == TTI::TCK_CodeSize && Ty->isIntegerTy(1)) {
1155     // Make operations on i1 relatively expensive as this often involves
1156     // combining predicates. AND and XOR should be easier to handle with IT
1157     // blocks.
1158     switch (ISDOpcode) {
1159     default:
1160       break;
1161     case ISD::AND:
1162     case ISD::XOR:
1163       return 2;
1164     case ISD::OR:
1165       return 3;
1166     }
1167   }
1168 
1169   std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
1170 
1171   if (ST->hasNEON()) {
1172     const unsigned FunctionCallDivCost = 20;
1173     const unsigned ReciprocalDivCost = 10;
1174     static const CostTblEntry CostTbl[] = {
1175       // Division.
1176       // These costs are somewhat random. Choose a cost of 20 to indicate that
1177       // vectorizing devision (added function call) is going to be very expensive.
1178       // Double registers types.
1179       { ISD::SDIV, MVT::v1i64, 1 * FunctionCallDivCost},
1180       { ISD::UDIV, MVT::v1i64, 1 * FunctionCallDivCost},
1181       { ISD::SREM, MVT::v1i64, 1 * FunctionCallDivCost},
1182       { ISD::UREM, MVT::v1i64, 1 * FunctionCallDivCost},
1183       { ISD::SDIV, MVT::v2i32, 2 * FunctionCallDivCost},
1184       { ISD::UDIV, MVT::v2i32, 2 * FunctionCallDivCost},
1185       { ISD::SREM, MVT::v2i32, 2 * FunctionCallDivCost},
1186       { ISD::UREM, MVT::v2i32, 2 * FunctionCallDivCost},
1187       { ISD::SDIV, MVT::v4i16,     ReciprocalDivCost},
1188       { ISD::UDIV, MVT::v4i16,     ReciprocalDivCost},
1189       { ISD::SREM, MVT::v4i16, 4 * FunctionCallDivCost},
1190       { ISD::UREM, MVT::v4i16, 4 * FunctionCallDivCost},
1191       { ISD::SDIV, MVT::v8i8,      ReciprocalDivCost},
1192       { ISD::UDIV, MVT::v8i8,      ReciprocalDivCost},
1193       { ISD::SREM, MVT::v8i8,  8 * FunctionCallDivCost},
1194       { ISD::UREM, MVT::v8i8,  8 * FunctionCallDivCost},
1195       // Quad register types.
1196       { ISD::SDIV, MVT::v2i64, 2 * FunctionCallDivCost},
1197       { ISD::UDIV, MVT::v2i64, 2 * FunctionCallDivCost},
1198       { ISD::SREM, MVT::v2i64, 2 * FunctionCallDivCost},
1199       { ISD::UREM, MVT::v2i64, 2 * FunctionCallDivCost},
1200       { ISD::SDIV, MVT::v4i32, 4 * FunctionCallDivCost},
1201       { ISD::UDIV, MVT::v4i32, 4 * FunctionCallDivCost},
1202       { ISD::SREM, MVT::v4i32, 4 * FunctionCallDivCost},
1203       { ISD::UREM, MVT::v4i32, 4 * FunctionCallDivCost},
1204       { ISD::SDIV, MVT::v8i16, 8 * FunctionCallDivCost},
1205       { ISD::UDIV, MVT::v8i16, 8 * FunctionCallDivCost},
1206       { ISD::SREM, MVT::v8i16, 8 * FunctionCallDivCost},
1207       { ISD::UREM, MVT::v8i16, 8 * FunctionCallDivCost},
1208       { ISD::SDIV, MVT::v16i8, 16 * FunctionCallDivCost},
1209       { ISD::UDIV, MVT::v16i8, 16 * FunctionCallDivCost},
1210       { ISD::SREM, MVT::v16i8, 16 * FunctionCallDivCost},
1211       { ISD::UREM, MVT::v16i8, 16 * FunctionCallDivCost},
1212       // Multiplication.
1213     };
1214 
1215     if (const auto *Entry = CostTableLookup(CostTbl, ISDOpcode, LT.second))
1216       return LT.first * Entry->Cost;
1217 
1218     int Cost = BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Op1Info,
1219                                              Op2Info,
1220                                              Opd1PropInfo, Opd2PropInfo);
1221 
1222     // This is somewhat of a hack. The problem that we are facing is that SROA
1223     // creates a sequence of shift, and, or instructions to construct values.
1224     // These sequences are recognized by the ISel and have zero-cost. Not so for
1225     // the vectorized code. Because we have support for v2i64 but not i64 those
1226     // sequences look particularly beneficial to vectorize.
1227     // To work around this we increase the cost of v2i64 operations to make them
1228     // seem less beneficial.
1229     if (LT.second == MVT::v2i64 &&
1230         Op2Info == TargetTransformInfo::OK_UniformConstantValue)
1231       Cost += 4;
1232 
1233     return Cost;
1234   }
1235 
1236   // If this operation is a shift on arm/thumb2, it might well be folded into
1237   // the following instruction, hence having a cost of 0.
1238   auto LooksLikeAFreeShift = [&]() {
1239     if (ST->isThumb1Only() || Ty->isVectorTy())
1240       return false;
1241 
1242     if (!CxtI || !CxtI->hasOneUse() || !CxtI->isShift())
1243       return false;
1244     if (Op2Info != TargetTransformInfo::OK_UniformConstantValue)
1245       return false;
1246 
1247     // Folded into a ADC/ADD/AND/BIC/CMP/EOR/MVN/ORR/ORN/RSB/SBC/SUB
1248     switch (cast<Instruction>(CxtI->user_back())->getOpcode()) {
1249     case Instruction::Add:
1250     case Instruction::Sub:
1251     case Instruction::And:
1252     case Instruction::Xor:
1253     case Instruction::Or:
1254     case Instruction::ICmp:
1255       return true;
1256     default:
1257       return false;
1258     }
1259   };
1260   if (LooksLikeAFreeShift())
1261     return 0;
1262 
1263   // Default to cheap (throughput/size of 1 instruction) but adjust throughput
1264   // for "multiple beats" potentially needed by MVE instructions.
1265   int BaseCost = 1;
1266   if (ST->hasMVEIntegerOps() && Ty->isVectorTy())
1267     BaseCost = ST->getMVEVectorCostFactor(CostKind);
1268 
1269   // The rest of this mostly follows what is done in BaseT::getArithmeticInstrCost,
1270   // without treating floats as more expensive that scalars or increasing the
1271   // costs for custom operations. The results is also multiplied by the
1272   // MVEVectorCostFactor where appropriate.
1273   if (TLI->isOperationLegalOrCustomOrPromote(ISDOpcode, LT.second))
1274     return LT.first * BaseCost;
1275 
1276   // Else this is expand, assume that we need to scalarize this op.
1277   if (auto *VTy = dyn_cast<FixedVectorType>(Ty)) {
1278     unsigned Num = VTy->getNumElements();
1279     unsigned Cost = getArithmeticInstrCost(Opcode, Ty->getScalarType(),
1280                                            CostKind);
1281     // Return the cost of multiple scalar invocation plus the cost of
1282     // inserting and extracting the values.
1283     return BaseT::getScalarizationOverhead(VTy, Args) + Num * Cost;
1284   }
1285 
1286   return BaseCost;
1287 }
1288 
1289 int ARMTTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src,
1290                                 MaybeAlign Alignment, unsigned AddressSpace,
1291                                 TTI::TargetCostKind CostKind,
1292                                 const Instruction *I) {
1293   // TODO: Handle other cost kinds.
1294   if (CostKind != TTI::TCK_RecipThroughput)
1295     return 1;
1296 
1297   // Type legalization can't handle structs
1298   if (TLI->getValueType(DL, Src, true) == MVT::Other)
1299     return BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
1300                                   CostKind);
1301 
1302   if (ST->hasNEON() && Src->isVectorTy() &&
1303       (Alignment && *Alignment != Align(16)) &&
1304       cast<VectorType>(Src)->getElementType()->isDoubleTy()) {
1305     // Unaligned loads/stores are extremely inefficient.
1306     // We need 4 uops for vst.1/vld.1 vs 1uop for vldr/vstr.
1307     std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Src);
1308     return LT.first * 4;
1309   }
1310 
1311   // MVE can optimize a fpext(load(4xhalf)) using an extending integer load.
1312   // Same for stores.
1313   if (ST->hasMVEFloatOps() && isa<FixedVectorType>(Src) && I &&
1314       ((Opcode == Instruction::Load && I->hasOneUse() &&
1315         isa<FPExtInst>(*I->user_begin())) ||
1316        (Opcode == Instruction::Store && isa<FPTruncInst>(I->getOperand(0))))) {
1317     FixedVectorType *SrcVTy = cast<FixedVectorType>(Src);
1318     Type *DstTy =
1319         Opcode == Instruction::Load
1320             ? (*I->user_begin())->getType()
1321             : cast<Instruction>(I->getOperand(0))->getOperand(0)->getType();
1322     if (SrcVTy->getNumElements() == 4 && SrcVTy->getScalarType()->isHalfTy() &&
1323         DstTy->getScalarType()->isFloatTy())
1324       return ST->getMVEVectorCostFactor(CostKind);
1325   }
1326 
1327   int BaseCost = ST->hasMVEIntegerOps() && Src->isVectorTy()
1328                      ? ST->getMVEVectorCostFactor(CostKind)
1329                      : 1;
1330   return BaseCost * BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
1331                                            CostKind, I);
1332 }
1333 
1334 unsigned ARMTTIImpl::getMaskedMemoryOpCost(unsigned Opcode, Type *Src,
1335                                            Align Alignment,
1336                                            unsigned AddressSpace,
1337                                            TTI::TargetCostKind CostKind) {
1338   if (ST->hasMVEIntegerOps()) {
1339     if (Opcode == Instruction::Load && isLegalMaskedLoad(Src, Alignment))
1340       return ST->getMVEVectorCostFactor(CostKind);
1341     if (Opcode == Instruction::Store && isLegalMaskedStore(Src, Alignment))
1342       return ST->getMVEVectorCostFactor(CostKind);
1343   }
1344   if (!isa<FixedVectorType>(Src))
1345     return BaseT::getMaskedMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
1346                                         CostKind);
1347   // Scalar cost, which is currently very high due to the efficiency of the
1348   // generated code.
1349   return cast<FixedVectorType>(Src)->getNumElements() * 8;
1350 }
1351 
1352 int ARMTTIImpl::getInterleavedMemoryOpCost(
1353     unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
1354     Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind,
1355     bool UseMaskForCond, bool UseMaskForGaps) {
1356   assert(Factor >= 2 && "Invalid interleave factor");
1357   assert(isa<VectorType>(VecTy) && "Expect a vector type");
1358 
1359   // vldN/vstN doesn't support vector types of i64/f64 element.
1360   bool EltIs64Bits = DL.getTypeSizeInBits(VecTy->getScalarType()) == 64;
1361 
1362   if (Factor <= TLI->getMaxSupportedInterleaveFactor() && !EltIs64Bits &&
1363       !UseMaskForCond && !UseMaskForGaps) {
1364     unsigned NumElts = cast<FixedVectorType>(VecTy)->getNumElements();
1365     auto *SubVecTy =
1366         FixedVectorType::get(VecTy->getScalarType(), NumElts / Factor);
1367 
1368     // vldN/vstN only support legal vector types of size 64 or 128 in bits.
1369     // Accesses having vector types that are a multiple of 128 bits can be
1370     // matched to more than one vldN/vstN instruction.
1371     int BaseCost =
1372         ST->hasMVEIntegerOps() ? ST->getMVEVectorCostFactor(CostKind) : 1;
1373     if (NumElts % Factor == 0 &&
1374         TLI->isLegalInterleavedAccessType(Factor, SubVecTy, Alignment, DL))
1375       return Factor * BaseCost * TLI->getNumInterleavedAccesses(SubVecTy, DL);
1376 
1377     // Some smaller than legal interleaved patterns are cheap as we can make
1378     // use of the vmovn or vrev patterns to interleave a standard load. This is
1379     // true for v4i8, v8i8 and v4i16 at least (but not for v4f16 as it is
1380     // promoted differently). The cost of 2 here is then a load and vrev or
1381     // vmovn.
1382     if (ST->hasMVEIntegerOps() && Factor == 2 && NumElts / Factor > 2 &&
1383         VecTy->isIntOrIntVectorTy() &&
1384         DL.getTypeSizeInBits(SubVecTy).getFixedSize() <= 64)
1385       return 2 * BaseCost;
1386   }
1387 
1388   return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
1389                                            Alignment, AddressSpace, CostKind,
1390                                            UseMaskForCond, UseMaskForGaps);
1391 }
1392 
1393 unsigned ARMTTIImpl::getGatherScatterOpCost(unsigned Opcode, Type *DataTy,
1394                                             const Value *Ptr, bool VariableMask,
1395                                             Align Alignment,
1396                                             TTI::TargetCostKind CostKind,
1397                                             const Instruction *I) {
1398   using namespace PatternMatch;
1399   if (!ST->hasMVEIntegerOps() || !EnableMaskedGatherScatters)
1400     return BaseT::getGatherScatterOpCost(Opcode, DataTy, Ptr, VariableMask,
1401                                          Alignment, CostKind, I);
1402 
1403   assert(DataTy->isVectorTy() && "Can't do gather/scatters on scalar!");
1404   auto *VTy = cast<FixedVectorType>(DataTy);
1405 
1406   // TODO: Splitting, once we do that.
1407 
1408   unsigned NumElems = VTy->getNumElements();
1409   unsigned EltSize = VTy->getScalarSizeInBits();
1410   std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, DataTy);
1411 
1412   // For now, it is assumed that for the MVE gather instructions the loads are
1413   // all effectively serialised. This means the cost is the scalar cost
1414   // multiplied by the number of elements being loaded. This is possibly very
1415   // conservative, but even so we still end up vectorising loops because the
1416   // cost per iteration for many loops is lower than for scalar loops.
1417   unsigned VectorCost =
1418       NumElems * LT.first * ST->getMVEVectorCostFactor(CostKind);
1419   // The scalarization cost should be a lot higher. We use the number of vector
1420   // elements plus the scalarization overhead.
1421   unsigned ScalarCost = NumElems * LT.first +
1422                         BaseT::getScalarizationOverhead(VTy, true, false) +
1423                         BaseT::getScalarizationOverhead(VTy, false, true);
1424 
1425   if (EltSize < 8 || Alignment < EltSize / 8)
1426     return ScalarCost;
1427 
1428   unsigned ExtSize = EltSize;
1429   // Check whether there's a single user that asks for an extended type
1430   if (I != nullptr) {
1431     // Dependent of the caller of this function, a gather instruction will
1432     // either have opcode Instruction::Load or be a call to the masked_gather
1433     // intrinsic
1434     if ((I->getOpcode() == Instruction::Load ||
1435          match(I, m_Intrinsic<Intrinsic::masked_gather>())) &&
1436         I->hasOneUse()) {
1437       const User *Us = *I->users().begin();
1438       if (isa<ZExtInst>(Us) || isa<SExtInst>(Us)) {
1439         // only allow valid type combinations
1440         unsigned TypeSize =
1441             cast<Instruction>(Us)->getType()->getScalarSizeInBits();
1442         if (((TypeSize == 32 && (EltSize == 8 || EltSize == 16)) ||
1443              (TypeSize == 16 && EltSize == 8)) &&
1444             TypeSize * NumElems == 128) {
1445           ExtSize = TypeSize;
1446         }
1447       }
1448     }
1449     // Check whether the input data needs to be truncated
1450     TruncInst *T;
1451     if ((I->getOpcode() == Instruction::Store ||
1452          match(I, m_Intrinsic<Intrinsic::masked_scatter>())) &&
1453         (T = dyn_cast<TruncInst>(I->getOperand(0)))) {
1454       // Only allow valid type combinations
1455       unsigned TypeSize = T->getOperand(0)->getType()->getScalarSizeInBits();
1456       if (((EltSize == 16 && TypeSize == 32) ||
1457            (EltSize == 8 && (TypeSize == 32 || TypeSize == 16))) &&
1458           TypeSize * NumElems == 128)
1459         ExtSize = TypeSize;
1460     }
1461   }
1462 
1463   if (ExtSize * NumElems != 128 || NumElems < 4)
1464     return ScalarCost;
1465 
1466   // Any (aligned) i32 gather will not need to be scalarised.
1467   if (ExtSize == 32)
1468     return VectorCost;
1469   // For smaller types, we need to ensure that the gep's inputs are correctly
1470   // extended from a small enough value. Other sizes (including i64) are
1471   // scalarized for now.
1472   if (ExtSize != 8 && ExtSize != 16)
1473     return ScalarCost;
1474 
1475   if (const auto *BC = dyn_cast<BitCastInst>(Ptr))
1476     Ptr = BC->getOperand(0);
1477   if (const auto *GEP = dyn_cast<GetElementPtrInst>(Ptr)) {
1478     if (GEP->getNumOperands() != 2)
1479       return ScalarCost;
1480     unsigned Scale = DL.getTypeAllocSize(GEP->getResultElementType());
1481     // Scale needs to be correct (which is only relevant for i16s).
1482     if (Scale != 1 && Scale * 8 != ExtSize)
1483       return ScalarCost;
1484     // And we need to zext (not sext) the indexes from a small enough type.
1485     if (const auto *ZExt = dyn_cast<ZExtInst>(GEP->getOperand(1))) {
1486       if (ZExt->getOperand(0)->getType()->getScalarSizeInBits() <= ExtSize)
1487         return VectorCost;
1488     }
1489     return ScalarCost;
1490   }
1491   return ScalarCost;
1492 }
1493 
1494 int ARMTTIImpl::getArithmeticReductionCost(unsigned Opcode, VectorType *ValTy,
1495                                            bool IsPairwiseForm,
1496                                            TTI::TargetCostKind CostKind) {
1497   EVT ValVT = TLI->getValueType(DL, ValTy);
1498   int ISD = TLI->InstructionOpcodeToISD(Opcode);
1499   if (!ST->hasMVEIntegerOps() || !ValVT.isSimple() || ISD != ISD::ADD)
1500     return BaseT::getArithmeticReductionCost(Opcode, ValTy, IsPairwiseForm,
1501                                              CostKind);
1502 
1503   std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy);
1504 
1505   static const CostTblEntry CostTblAdd[]{
1506       {ISD::ADD, MVT::v16i8, 1},
1507       {ISD::ADD, MVT::v8i16, 1},
1508       {ISD::ADD, MVT::v4i32, 1},
1509   };
1510   if (const auto *Entry = CostTableLookup(CostTblAdd, ISD, LT.second))
1511     return Entry->Cost * ST->getMVEVectorCostFactor(CostKind) * LT.first;
1512 
1513   return BaseT::getArithmeticReductionCost(Opcode, ValTy, IsPairwiseForm,
1514                                            CostKind);
1515 }
1516 
1517 InstructionCost
1518 ARMTTIImpl::getExtendedAddReductionCost(bool IsMLA, bool IsUnsigned,
1519                                         Type *ResTy, VectorType *ValTy,
1520                                         TTI::TargetCostKind CostKind) {
1521   EVT ValVT = TLI->getValueType(DL, ValTy);
1522   EVT ResVT = TLI->getValueType(DL, ResTy);
1523   if (ST->hasMVEIntegerOps() && ValVT.isSimple() && ResVT.isSimple()) {
1524     std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy);
1525     if ((LT.second == MVT::v16i8 && ResVT.getSizeInBits() <= 32) ||
1526         (LT.second == MVT::v8i16 &&
1527          ResVT.getSizeInBits() <= (IsMLA ? 64 : 32)) ||
1528         (LT.second == MVT::v4i32 && ResVT.getSizeInBits() <= 64))
1529       return ST->getMVEVectorCostFactor(CostKind) * LT.first;
1530   }
1531 
1532   return BaseT::getExtendedAddReductionCost(IsMLA, IsUnsigned, ResTy, ValTy,
1533                                             CostKind);
1534 }
1535 
1536 int ARMTTIImpl::getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
1537                                       TTI::TargetCostKind CostKind) {
1538   switch (ICA.getID()) {
1539   case Intrinsic::get_active_lane_mask:
1540     // Currently we make a somewhat optimistic assumption that
1541     // active_lane_mask's are always free. In reality it may be freely folded
1542     // into a tail predicated loop, expanded into a VCPT or expanded into a lot
1543     // of add/icmp code. We may need to improve this in the future, but being
1544     // able to detect if it is free or not involves looking at a lot of other
1545     // code. We currently assume that the vectorizer inserted these, and knew
1546     // what it was doing in adding one.
1547     if (ST->hasMVEIntegerOps())
1548       return 0;
1549     break;
1550   case Intrinsic::sadd_sat:
1551   case Intrinsic::ssub_sat:
1552   case Intrinsic::uadd_sat:
1553   case Intrinsic::usub_sat: {
1554     if (!ST->hasMVEIntegerOps())
1555       break;
1556     // Get the Return type, either directly of from ICA.ReturnType and ICA.VF.
1557     Type *VT = ICA.getReturnType();
1558     if (!VT->isVectorTy() && !ICA.getVectorFactor().isScalar())
1559       VT = VectorType::get(VT, ICA.getVectorFactor());
1560 
1561     std::pair<int, MVT> LT =
1562         TLI->getTypeLegalizationCost(DL, VT);
1563     if (LT.second == MVT::v4i32 || LT.second == MVT::v8i16 ||
1564         LT.second == MVT::v16i8) {
1565       // This is a base cost of 1 for the vadd, plus 3 extract shifts if we
1566       // need to extend the type, as it uses shr(qadd(shl, shl)).
1567       unsigned Instrs = LT.second.getScalarSizeInBits() ==
1568                                 ICA.getReturnType()->getScalarSizeInBits()
1569                             ? 1
1570                             : 4;
1571       return LT.first * ST->getMVEVectorCostFactor(CostKind) * Instrs;
1572     }
1573     break;
1574   }
1575   }
1576 
1577   return BaseT::getIntrinsicInstrCost(ICA, CostKind);
1578 }
1579 
1580 bool ARMTTIImpl::isLoweredToCall(const Function *F) {
1581   if (!F->isIntrinsic())
1582     BaseT::isLoweredToCall(F);
1583 
1584   // Assume all Arm-specific intrinsics map to an instruction.
1585   if (F->getName().startswith("llvm.arm"))
1586     return false;
1587 
1588   switch (F->getIntrinsicID()) {
1589   default: break;
1590   case Intrinsic::powi:
1591   case Intrinsic::sin:
1592   case Intrinsic::cos:
1593   case Intrinsic::pow:
1594   case Intrinsic::log:
1595   case Intrinsic::log10:
1596   case Intrinsic::log2:
1597   case Intrinsic::exp:
1598   case Intrinsic::exp2:
1599     return true;
1600   case Intrinsic::sqrt:
1601   case Intrinsic::fabs:
1602   case Intrinsic::copysign:
1603   case Intrinsic::floor:
1604   case Intrinsic::ceil:
1605   case Intrinsic::trunc:
1606   case Intrinsic::rint:
1607   case Intrinsic::nearbyint:
1608   case Intrinsic::round:
1609   case Intrinsic::canonicalize:
1610   case Intrinsic::lround:
1611   case Intrinsic::llround:
1612   case Intrinsic::lrint:
1613   case Intrinsic::llrint:
1614     if (F->getReturnType()->isDoubleTy() && !ST->hasFP64())
1615       return true;
1616     if (F->getReturnType()->isHalfTy() && !ST->hasFullFP16())
1617       return true;
1618     // Some operations can be handled by vector instructions and assume
1619     // unsupported vectors will be expanded into supported scalar ones.
1620     // TODO Handle scalar operations properly.
1621     return !ST->hasFPARMv8Base() && !ST->hasVFP2Base();
1622   case Intrinsic::masked_store:
1623   case Intrinsic::masked_load:
1624   case Intrinsic::masked_gather:
1625   case Intrinsic::masked_scatter:
1626     return !ST->hasMVEIntegerOps();
1627   case Intrinsic::sadd_with_overflow:
1628   case Intrinsic::uadd_with_overflow:
1629   case Intrinsic::ssub_with_overflow:
1630   case Intrinsic::usub_with_overflow:
1631   case Intrinsic::sadd_sat:
1632   case Intrinsic::uadd_sat:
1633   case Intrinsic::ssub_sat:
1634   case Intrinsic::usub_sat:
1635     return false;
1636   }
1637 
1638   return BaseT::isLoweredToCall(F);
1639 }
1640 
1641 bool ARMTTIImpl::maybeLoweredToCall(Instruction &I) {
1642   unsigned ISD = TLI->InstructionOpcodeToISD(I.getOpcode());
1643   EVT VT = TLI->getValueType(DL, I.getType(), true);
1644   if (TLI->getOperationAction(ISD, VT) == TargetLowering::LibCall)
1645     return true;
1646 
1647   // Check if an intrinsic will be lowered to a call and assume that any
1648   // other CallInst will generate a bl.
1649   if (auto *Call = dyn_cast<CallInst>(&I)) {
1650     if (auto *II = dyn_cast<IntrinsicInst>(Call)) {
1651       switch(II->getIntrinsicID()) {
1652         case Intrinsic::memcpy:
1653         case Intrinsic::memset:
1654         case Intrinsic::memmove:
1655           return getNumMemOps(II) == -1;
1656         default:
1657           if (const Function *F = Call->getCalledFunction())
1658             return isLoweredToCall(F);
1659       }
1660     }
1661     return true;
1662   }
1663 
1664   // FPv5 provides conversions between integer, double-precision,
1665   // single-precision, and half-precision formats.
1666   switch (I.getOpcode()) {
1667   default:
1668     break;
1669   case Instruction::FPToSI:
1670   case Instruction::FPToUI:
1671   case Instruction::SIToFP:
1672   case Instruction::UIToFP:
1673   case Instruction::FPTrunc:
1674   case Instruction::FPExt:
1675     return !ST->hasFPARMv8Base();
1676   }
1677 
1678   // FIXME: Unfortunately the approach of checking the Operation Action does
1679   // not catch all cases of Legalization that use library calls. Our
1680   // Legalization step categorizes some transformations into library calls as
1681   // Custom, Expand or even Legal when doing type legalization. So for now
1682   // we have to special case for instance the SDIV of 64bit integers and the
1683   // use of floating point emulation.
1684   if (VT.isInteger() && VT.getSizeInBits() >= 64) {
1685     switch (ISD) {
1686     default:
1687       break;
1688     case ISD::SDIV:
1689     case ISD::UDIV:
1690     case ISD::SREM:
1691     case ISD::UREM:
1692     case ISD::SDIVREM:
1693     case ISD::UDIVREM:
1694       return true;
1695     }
1696   }
1697 
1698   // Assume all other non-float operations are supported.
1699   if (!VT.isFloatingPoint())
1700     return false;
1701 
1702   // We'll need a library call to handle most floats when using soft.
1703   if (TLI->useSoftFloat()) {
1704     switch (I.getOpcode()) {
1705     default:
1706       return true;
1707     case Instruction::Alloca:
1708     case Instruction::Load:
1709     case Instruction::Store:
1710     case Instruction::Select:
1711     case Instruction::PHI:
1712       return false;
1713     }
1714   }
1715 
1716   // We'll need a libcall to perform double precision operations on a single
1717   // precision only FPU.
1718   if (I.getType()->isDoubleTy() && !ST->hasFP64())
1719     return true;
1720 
1721   // Likewise for half precision arithmetic.
1722   if (I.getType()->isHalfTy() && !ST->hasFullFP16())
1723     return true;
1724 
1725   return false;
1726 }
1727 
1728 bool ARMTTIImpl::isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE,
1729                                           AssumptionCache &AC,
1730                                           TargetLibraryInfo *LibInfo,
1731                                           HardwareLoopInfo &HWLoopInfo) {
1732   // Low-overhead branches are only supported in the 'low-overhead branch'
1733   // extension of v8.1-m.
1734   if (!ST->hasLOB() || DisableLowOverheadLoops) {
1735     LLVM_DEBUG(dbgs() << "ARMHWLoops: Disabled\n");
1736     return false;
1737   }
1738 
1739   if (!SE.hasLoopInvariantBackedgeTakenCount(L)) {
1740     LLVM_DEBUG(dbgs() << "ARMHWLoops: No BETC\n");
1741     return false;
1742   }
1743 
1744   const SCEV *BackedgeTakenCount = SE.getBackedgeTakenCount(L);
1745   if (isa<SCEVCouldNotCompute>(BackedgeTakenCount)) {
1746     LLVM_DEBUG(dbgs() << "ARMHWLoops: Uncomputable BETC\n");
1747     return false;
1748   }
1749 
1750   const SCEV *TripCountSCEV =
1751     SE.getAddExpr(BackedgeTakenCount,
1752                   SE.getOne(BackedgeTakenCount->getType()));
1753 
1754   // We need to store the trip count in LR, a 32-bit register.
1755   if (SE.getUnsignedRangeMax(TripCountSCEV).getBitWidth() > 32) {
1756     LLVM_DEBUG(dbgs() << "ARMHWLoops: Trip count does not fit into 32bits\n");
1757     return false;
1758   }
1759 
1760   // Making a call will trash LR and clear LO_BRANCH_INFO, so there's little
1761   // point in generating a hardware loop if that's going to happen.
1762 
1763   auto IsHardwareLoopIntrinsic = [](Instruction &I) {
1764     if (auto *Call = dyn_cast<IntrinsicInst>(&I)) {
1765       switch (Call->getIntrinsicID()) {
1766       default:
1767         break;
1768       case Intrinsic::start_loop_iterations:
1769       case Intrinsic::test_set_loop_iterations:
1770       case Intrinsic::loop_decrement:
1771       case Intrinsic::loop_decrement_reg:
1772         return true;
1773       }
1774     }
1775     return false;
1776   };
1777 
1778   // Scan the instructions to see if there's any that we know will turn into a
1779   // call or if this loop is already a low-overhead loop or will become a tail
1780   // predicated loop.
1781   bool IsTailPredLoop = false;
1782   auto ScanLoop = [&](Loop *L) {
1783     for (auto *BB : L->getBlocks()) {
1784       for (auto &I : *BB) {
1785         if (maybeLoweredToCall(I) || IsHardwareLoopIntrinsic(I) ||
1786             isa<InlineAsm>(I)) {
1787           LLVM_DEBUG(dbgs() << "ARMHWLoops: Bad instruction: " << I << "\n");
1788           return false;
1789         }
1790         if (auto *II = dyn_cast<IntrinsicInst>(&I))
1791           IsTailPredLoop |=
1792               II->getIntrinsicID() == Intrinsic::get_active_lane_mask ||
1793               II->getIntrinsicID() == Intrinsic::arm_mve_vctp8 ||
1794               II->getIntrinsicID() == Intrinsic::arm_mve_vctp16 ||
1795               II->getIntrinsicID() == Intrinsic::arm_mve_vctp32 ||
1796               II->getIntrinsicID() == Intrinsic::arm_mve_vctp64;
1797       }
1798     }
1799     return true;
1800   };
1801 
1802   // Visit inner loops.
1803   for (auto Inner : *L)
1804     if (!ScanLoop(Inner))
1805       return false;
1806 
1807   if (!ScanLoop(L))
1808     return false;
1809 
1810   // TODO: Check whether the trip count calculation is expensive. If L is the
1811   // inner loop but we know it has a low trip count, calculating that trip
1812   // count (in the parent loop) may be detrimental.
1813 
1814   LLVMContext &C = L->getHeader()->getContext();
1815   HWLoopInfo.CounterInReg = true;
1816   HWLoopInfo.IsNestingLegal = false;
1817   HWLoopInfo.PerformEntryTest = AllowWLSLoops && !IsTailPredLoop;
1818   HWLoopInfo.CountType = Type::getInt32Ty(C);
1819   HWLoopInfo.LoopDecrement = ConstantInt::get(HWLoopInfo.CountType, 1);
1820   return true;
1821 }
1822 
1823 static bool canTailPredicateInstruction(Instruction &I, int &ICmpCount) {
1824   // We don't allow icmp's, and because we only look at single block loops,
1825   // we simply count the icmps, i.e. there should only be 1 for the backedge.
1826   if (isa<ICmpInst>(&I) && ++ICmpCount > 1)
1827     return false;
1828 
1829   if (isa<FCmpInst>(&I))
1830     return false;
1831 
1832   // We could allow extending/narrowing FP loads/stores, but codegen is
1833   // too inefficient so reject this for now.
1834   if (isa<FPExtInst>(&I) || isa<FPTruncInst>(&I))
1835     return false;
1836 
1837   // Extends have to be extending-loads
1838   if (isa<SExtInst>(&I) || isa<ZExtInst>(&I) )
1839     if (!I.getOperand(0)->hasOneUse() || !isa<LoadInst>(I.getOperand(0)))
1840       return false;
1841 
1842   // Truncs have to be narrowing-stores
1843   if (isa<TruncInst>(&I) )
1844     if (!I.hasOneUse() || !isa<StoreInst>(*I.user_begin()))
1845       return false;
1846 
1847   return true;
1848 }
1849 
1850 // To set up a tail-predicated loop, we need to know the total number of
1851 // elements processed by that loop. Thus, we need to determine the element
1852 // size and:
1853 // 1) it should be uniform for all operations in the vector loop, so we
1854 //    e.g. don't want any widening/narrowing operations.
1855 // 2) it should be smaller than i64s because we don't have vector operations
1856 //    that work on i64s.
1857 // 3) we don't want elements to be reversed or shuffled, to make sure the
1858 //    tail-predication masks/predicates the right lanes.
1859 //
1860 static bool canTailPredicateLoop(Loop *L, LoopInfo *LI, ScalarEvolution &SE,
1861                                  const DataLayout &DL,
1862                                  const LoopAccessInfo *LAI) {
1863   LLVM_DEBUG(dbgs() << "Tail-predication: checking allowed instructions\n");
1864 
1865   // If there are live-out values, it is probably a reduction. We can predicate
1866   // most reduction operations freely under MVE using a combination of
1867   // prefer-predicated-reduction-select and inloop reductions. We limit this to
1868   // floating point and integer reductions, but don't check for operators
1869   // specifically here. If the value ends up not being a reduction (and so the
1870   // vectorizer cannot tailfold the loop), we should fall back to standard
1871   // vectorization automatically.
1872   SmallVector< Instruction *, 8 > LiveOuts;
1873   LiveOuts = llvm::findDefsUsedOutsideOfLoop(L);
1874   bool ReductionsDisabled =
1875       EnableTailPredication == TailPredication::EnabledNoReductions ||
1876       EnableTailPredication == TailPredication::ForceEnabledNoReductions;
1877 
1878   for (auto *I : LiveOuts) {
1879     if (!I->getType()->isIntegerTy() && !I->getType()->isFloatTy() &&
1880         !I->getType()->isHalfTy()) {
1881       LLVM_DEBUG(dbgs() << "Don't tail-predicate loop with non-integer/float "
1882                            "live-out value\n");
1883       return false;
1884     }
1885     if (ReductionsDisabled) {
1886       LLVM_DEBUG(dbgs() << "Reductions not enabled\n");
1887       return false;
1888     }
1889   }
1890 
1891   // Next, check that all instructions can be tail-predicated.
1892   PredicatedScalarEvolution PSE = LAI->getPSE();
1893   SmallVector<Instruction *, 16> LoadStores;
1894   int ICmpCount = 0;
1895 
1896   for (BasicBlock *BB : L->blocks()) {
1897     for (Instruction &I : BB->instructionsWithoutDebug()) {
1898       if (isa<PHINode>(&I))
1899         continue;
1900       if (!canTailPredicateInstruction(I, ICmpCount)) {
1901         LLVM_DEBUG(dbgs() << "Instruction not allowed: "; I.dump());
1902         return false;
1903       }
1904 
1905       Type *T  = I.getType();
1906       if (T->isPointerTy())
1907         T = T->getPointerElementType();
1908 
1909       if (T->getScalarSizeInBits() > 32) {
1910         LLVM_DEBUG(dbgs() << "Unsupported Type: "; T->dump());
1911         return false;
1912       }
1913       if (isa<StoreInst>(I) || isa<LoadInst>(I)) {
1914         Value *Ptr = isa<LoadInst>(I) ? I.getOperand(0) : I.getOperand(1);
1915         int64_t NextStride = getPtrStride(PSE, Ptr, L);
1916         if (NextStride == 1) {
1917           // TODO: for now only allow consecutive strides of 1. We could support
1918           // other strides as long as it is uniform, but let's keep it simple
1919           // for now.
1920           continue;
1921         } else if (NextStride == -1 ||
1922                    (NextStride == 2 && MVEMaxSupportedInterleaveFactor >= 2) ||
1923                    (NextStride == 4 && MVEMaxSupportedInterleaveFactor >= 4)) {
1924           LLVM_DEBUG(dbgs()
1925                      << "Consecutive strides of 2 found, vld2/vstr2 can't "
1926                         "be tail-predicated\n.");
1927           return false;
1928           // TODO: don't tail predicate if there is a reversed load?
1929         } else if (EnableMaskedGatherScatters) {
1930           // Gather/scatters do allow loading from arbitrary strides, at
1931           // least if they are loop invariant.
1932           // TODO: Loop variant strides should in theory work, too, but
1933           // this requires further testing.
1934           const SCEV *PtrScev =
1935               replaceSymbolicStrideSCEV(PSE, llvm::ValueToValueMap(), Ptr);
1936           if (auto AR = dyn_cast<SCEVAddRecExpr>(PtrScev)) {
1937             const SCEV *Step = AR->getStepRecurrence(*PSE.getSE());
1938             if (PSE.getSE()->isLoopInvariant(Step, L))
1939               continue;
1940           }
1941         }
1942         LLVM_DEBUG(dbgs() << "Bad stride found, can't "
1943                              "tail-predicate\n.");
1944         return false;
1945       }
1946     }
1947   }
1948 
1949   LLVM_DEBUG(dbgs() << "tail-predication: all instructions allowed!\n");
1950   return true;
1951 }
1952 
1953 bool ARMTTIImpl::preferPredicateOverEpilogue(Loop *L, LoopInfo *LI,
1954                                              ScalarEvolution &SE,
1955                                              AssumptionCache &AC,
1956                                              TargetLibraryInfo *TLI,
1957                                              DominatorTree *DT,
1958                                              const LoopAccessInfo *LAI) {
1959   if (!EnableTailPredication) {
1960     LLVM_DEBUG(dbgs() << "Tail-predication not enabled.\n");
1961     return false;
1962   }
1963 
1964   // Creating a predicated vector loop is the first step for generating a
1965   // tail-predicated hardware loop, for which we need the MVE masked
1966   // load/stores instructions:
1967   if (!ST->hasMVEIntegerOps())
1968     return false;
1969 
1970   // For now, restrict this to single block loops.
1971   if (L->getNumBlocks() > 1) {
1972     LLVM_DEBUG(dbgs() << "preferPredicateOverEpilogue: not a single block "
1973                          "loop.\n");
1974     return false;
1975   }
1976 
1977   assert(L->isInnermost() && "preferPredicateOverEpilogue: inner-loop expected");
1978 
1979   HardwareLoopInfo HWLoopInfo(L);
1980   if (!HWLoopInfo.canAnalyze(*LI)) {
1981     LLVM_DEBUG(dbgs() << "preferPredicateOverEpilogue: hardware-loop is not "
1982                          "analyzable.\n");
1983     return false;
1984   }
1985 
1986   // This checks if we have the low-overhead branch architecture
1987   // extension, and if we will create a hardware-loop:
1988   if (!isHardwareLoopProfitable(L, SE, AC, TLI, HWLoopInfo)) {
1989     LLVM_DEBUG(dbgs() << "preferPredicateOverEpilogue: hardware-loop is not "
1990                          "profitable.\n");
1991     return false;
1992   }
1993 
1994   if (!HWLoopInfo.isHardwareLoopCandidate(SE, *LI, *DT)) {
1995     LLVM_DEBUG(dbgs() << "preferPredicateOverEpilogue: hardware-loop is not "
1996                          "a candidate.\n");
1997     return false;
1998   }
1999 
2000   return canTailPredicateLoop(L, LI, SE, DL, LAI);
2001 }
2002 
2003 bool ARMTTIImpl::emitGetActiveLaneMask() const {
2004   if (!ST->hasMVEIntegerOps() || !EnableTailPredication)
2005     return false;
2006 
2007   // Intrinsic @llvm.get.active.lane.mask is supported.
2008   // It is used in the MVETailPredication pass, which requires the number of
2009   // elements processed by this vector loop to setup the tail-predicated
2010   // loop.
2011   return true;
2012 }
2013 void ARMTTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
2014                                          TTI::UnrollingPreferences &UP) {
2015   // Only currently enable these preferences for M-Class cores.
2016   if (!ST->isMClass())
2017     return BasicTTIImplBase::getUnrollingPreferences(L, SE, UP);
2018 
2019   // Disable loop unrolling for Oz and Os.
2020   UP.OptSizeThreshold = 0;
2021   UP.PartialOptSizeThreshold = 0;
2022   if (L->getHeader()->getParent()->hasOptSize())
2023     return;
2024 
2025   // Only enable on Thumb-2 targets.
2026   if (!ST->isThumb2())
2027     return;
2028 
2029   SmallVector<BasicBlock*, 4> ExitingBlocks;
2030   L->getExitingBlocks(ExitingBlocks);
2031   LLVM_DEBUG(dbgs() << "Loop has:\n"
2032                     << "Blocks: " << L->getNumBlocks() << "\n"
2033                     << "Exit blocks: " << ExitingBlocks.size() << "\n");
2034 
2035   // Only allow another exit other than the latch. This acts as an early exit
2036   // as it mirrors the profitability calculation of the runtime unroller.
2037   if (ExitingBlocks.size() > 2)
2038     return;
2039 
2040   // Limit the CFG of the loop body for targets with a branch predictor.
2041   // Allowing 4 blocks permits if-then-else diamonds in the body.
2042   if (ST->hasBranchPredictor() && L->getNumBlocks() > 4)
2043     return;
2044 
2045   // Don't unroll vectorized loops, including the remainder loop
2046   if (getBooleanLoopAttribute(L, "llvm.loop.isvectorized"))
2047     return;
2048 
2049   // Scan the loop: don't unroll loops with calls as this could prevent
2050   // inlining.
2051   unsigned Cost = 0;
2052   for (auto *BB : L->getBlocks()) {
2053     for (auto &I : *BB) {
2054       // Don't unroll vectorised loop. MVE does not benefit from it as much as
2055       // scalar code.
2056       if (I.getType()->isVectorTy())
2057         return;
2058 
2059       if (isa<CallInst>(I) || isa<InvokeInst>(I)) {
2060         if (const Function *F = cast<CallBase>(I).getCalledFunction()) {
2061           if (!isLoweredToCall(F))
2062             continue;
2063         }
2064         return;
2065       }
2066 
2067       SmallVector<const Value*, 4> Operands(I.operand_values());
2068       Cost +=
2069         getUserCost(&I, Operands, TargetTransformInfo::TCK_SizeAndLatency);
2070     }
2071   }
2072 
2073   LLVM_DEBUG(dbgs() << "Cost of loop: " << Cost << "\n");
2074 
2075   UP.Partial = true;
2076   UP.Runtime = true;
2077   UP.UpperBound = true;
2078   UP.UnrollRemainder = true;
2079   UP.DefaultUnrollRuntimeCount = 4;
2080   UP.UnrollAndJam = true;
2081   UP.UnrollAndJamInnerLoopThreshold = 60;
2082 
2083   // Force unrolling small loops can be very useful because of the branch
2084   // taken cost of the backedge.
2085   if (Cost < 12)
2086     UP.Force = true;
2087 }
2088 
2089 void ARMTTIImpl::getPeelingPreferences(Loop *L, ScalarEvolution &SE,
2090                                        TTI::PeelingPreferences &PP) {
2091   BaseT::getPeelingPreferences(L, SE, PP);
2092 }
2093 
2094 bool ARMTTIImpl::preferInLoopReduction(unsigned Opcode, Type *Ty,
2095                                        TTI::ReductionFlags Flags) const {
2096   if (!ST->hasMVEIntegerOps())
2097     return false;
2098 
2099   unsigned ScalarBits = Ty->getScalarSizeInBits();
2100   switch (Opcode) {
2101   case Instruction::Add:
2102     return ScalarBits <= 64;
2103   default:
2104     return false;
2105   }
2106 }
2107 
2108 bool ARMTTIImpl::preferPredicatedReductionSelect(
2109     unsigned Opcode, Type *Ty, TTI::ReductionFlags Flags) const {
2110   if (!ST->hasMVEIntegerOps())
2111     return false;
2112   return true;
2113 }
2114