xref: /llvm-project/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp (revision cd6de0e8de4a5fd558580be4b1a07116914fc8ed)
1 //===- ARMTargetTransformInfo.cpp - ARM specific TTI ----------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "ARMTargetTransformInfo.h"
10 #include "ARMSubtarget.h"
11 #include "MCTargetDesc/ARMAddressingModes.h"
12 #include "llvm/ADT/APInt.h"
13 #include "llvm/ADT/SmallVector.h"
14 #include "llvm/Analysis/LoopInfo.h"
15 #include "llvm/CodeGen/CostTable.h"
16 #include "llvm/CodeGen/ISDOpcodes.h"
17 #include "llvm/CodeGen/ValueTypes.h"
18 #include "llvm/IR/BasicBlock.h"
19 #include "llvm/IR/DataLayout.h"
20 #include "llvm/IR/DerivedTypes.h"
21 #include "llvm/IR/Instruction.h"
22 #include "llvm/IR/Instructions.h"
23 #include "llvm/IR/Intrinsics.h"
24 #include "llvm/IR/IntrinsicInst.h"
25 #include "llvm/IR/IntrinsicsARM.h"
26 #include "llvm/IR/PatternMatch.h"
27 #include "llvm/IR/Type.h"
28 #include "llvm/MC/SubtargetFeature.h"
29 #include "llvm/Support/Casting.h"
30 #include "llvm/Support/KnownBits.h"
31 #include "llvm/Support/MachineValueType.h"
32 #include "llvm/Target/TargetMachine.h"
33 #include "llvm/Transforms/InstCombine/InstCombiner.h"
34 #include "llvm/Transforms/Utils/Local.h"
35 #include "llvm/Transforms/Utils/LoopUtils.h"
36 #include <algorithm>
37 #include <cassert>
38 #include <cstdint>
39 #include <utility>
40 
41 using namespace llvm;
42 
43 #define DEBUG_TYPE "armtti"
44 
45 static cl::opt<bool> EnableMaskedLoadStores(
46   "enable-arm-maskedldst", cl::Hidden, cl::init(true),
47   cl::desc("Enable the generation of masked loads and stores"));
48 
49 static cl::opt<bool> DisableLowOverheadLoops(
50   "disable-arm-loloops", cl::Hidden, cl::init(false),
51   cl::desc("Disable the generation of low-overhead loops"));
52 
53 static cl::opt<bool>
54     AllowWLSLoops("allow-arm-wlsloops", cl::Hidden, cl::init(true),
55                   cl::desc("Enable the generation of WLS loops"));
56 
57 extern cl::opt<TailPredication::Mode> EnableTailPredication;
58 
59 extern cl::opt<bool> EnableMaskedGatherScatters;
60 
61 extern cl::opt<unsigned> MVEMaxSupportedInterleaveFactor;
62 
63 /// Convert a vector load intrinsic into a simple llvm load instruction.
64 /// This is beneficial when the underlying object being addressed comes
65 /// from a constant, since we get constant-folding for free.
66 static Value *simplifyNeonVld1(const IntrinsicInst &II, unsigned MemAlign,
67                                InstCombiner::BuilderTy &Builder) {
68   auto *IntrAlign = dyn_cast<ConstantInt>(II.getArgOperand(1));
69 
70   if (!IntrAlign)
71     return nullptr;
72 
73   unsigned Alignment = IntrAlign->getLimitedValue() < MemAlign
74                            ? MemAlign
75                            : IntrAlign->getLimitedValue();
76 
77   if (!isPowerOf2_32(Alignment))
78     return nullptr;
79 
80   auto *BCastInst = Builder.CreateBitCast(II.getArgOperand(0),
81                                           PointerType::get(II.getType(), 0));
82   return Builder.CreateAlignedLoad(II.getType(), BCastInst, Align(Alignment));
83 }
84 
85 bool ARMTTIImpl::areInlineCompatible(const Function *Caller,
86                                      const Function *Callee) const {
87   const TargetMachine &TM = getTLI()->getTargetMachine();
88   const FeatureBitset &CallerBits =
89       TM.getSubtargetImpl(*Caller)->getFeatureBits();
90   const FeatureBitset &CalleeBits =
91       TM.getSubtargetImpl(*Callee)->getFeatureBits();
92 
93   // To inline a callee, all features not in the allowed list must match exactly.
94   bool MatchExact = (CallerBits & ~InlineFeaturesAllowed) ==
95                     (CalleeBits & ~InlineFeaturesAllowed);
96   // For features in the allowed list, the callee's features must be a subset of
97   // the callers'.
98   bool MatchSubset = ((CallerBits & CalleeBits) & InlineFeaturesAllowed) ==
99                      (CalleeBits & InlineFeaturesAllowed);
100   return MatchExact && MatchSubset;
101 }
102 
103 TTI::AddressingModeKind
104 ARMTTIImpl::getPreferredAddressingMode(const Loop *L,
105                                        ScalarEvolution *SE) const {
106   if (ST->hasMVEIntegerOps())
107     return TTI::AMK_PostIndexed;
108 
109   if (L->getHeader()->getParent()->hasOptSize())
110     return TTI::AMK_None;
111 
112   if (ST->isMClass() && ST->isThumb2() &&
113       L->getNumBlocks() == 1)
114     return TTI::AMK_PreIndexed;
115 
116   return TTI::AMK_None;
117 }
118 
119 Optional<Instruction *>
120 ARMTTIImpl::instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II) const {
121   using namespace PatternMatch;
122   Intrinsic::ID IID = II.getIntrinsicID();
123   switch (IID) {
124   default:
125     break;
126   case Intrinsic::arm_neon_vld1: {
127     Align MemAlign =
128         getKnownAlignment(II.getArgOperand(0), IC.getDataLayout(), &II,
129                           &IC.getAssumptionCache(), &IC.getDominatorTree());
130     if (Value *V = simplifyNeonVld1(II, MemAlign.value(), IC.Builder)) {
131       return IC.replaceInstUsesWith(II, V);
132     }
133     break;
134   }
135 
136   case Intrinsic::arm_neon_vld2:
137   case Intrinsic::arm_neon_vld3:
138   case Intrinsic::arm_neon_vld4:
139   case Intrinsic::arm_neon_vld2lane:
140   case Intrinsic::arm_neon_vld3lane:
141   case Intrinsic::arm_neon_vld4lane:
142   case Intrinsic::arm_neon_vst1:
143   case Intrinsic::arm_neon_vst2:
144   case Intrinsic::arm_neon_vst3:
145   case Intrinsic::arm_neon_vst4:
146   case Intrinsic::arm_neon_vst2lane:
147   case Intrinsic::arm_neon_vst3lane:
148   case Intrinsic::arm_neon_vst4lane: {
149     Align MemAlign =
150         getKnownAlignment(II.getArgOperand(0), IC.getDataLayout(), &II,
151                           &IC.getAssumptionCache(), &IC.getDominatorTree());
152     unsigned AlignArg = II.getNumArgOperands() - 1;
153     Value *AlignArgOp = II.getArgOperand(AlignArg);
154     MaybeAlign Align = cast<ConstantInt>(AlignArgOp)->getMaybeAlignValue();
155     if (Align && *Align < MemAlign) {
156       return IC.replaceOperand(
157           II, AlignArg,
158           ConstantInt::get(Type::getInt32Ty(II.getContext()), MemAlign.value(),
159                            false));
160     }
161     break;
162   }
163 
164   case Intrinsic::arm_mve_pred_i2v: {
165     Value *Arg = II.getArgOperand(0);
166     Value *ArgArg;
167     if (match(Arg, PatternMatch::m_Intrinsic<Intrinsic::arm_mve_pred_v2i>(
168                        PatternMatch::m_Value(ArgArg))) &&
169         II.getType() == ArgArg->getType()) {
170       return IC.replaceInstUsesWith(II, ArgArg);
171     }
172     Constant *XorMask;
173     if (match(Arg, m_Xor(PatternMatch::m_Intrinsic<Intrinsic::arm_mve_pred_v2i>(
174                              PatternMatch::m_Value(ArgArg)),
175                          PatternMatch::m_Constant(XorMask))) &&
176         II.getType() == ArgArg->getType()) {
177       if (auto *CI = dyn_cast<ConstantInt>(XorMask)) {
178         if (CI->getValue().trunc(16).isAllOnesValue()) {
179           auto TrueVector = IC.Builder.CreateVectorSplat(
180               cast<FixedVectorType>(II.getType())->getNumElements(),
181               IC.Builder.getTrue());
182           return BinaryOperator::Create(Instruction::Xor, ArgArg, TrueVector);
183         }
184       }
185     }
186     KnownBits ScalarKnown(32);
187     if (IC.SimplifyDemandedBits(&II, 0, APInt::getLowBitsSet(32, 16),
188                                 ScalarKnown, 0)) {
189       return &II;
190     }
191     break;
192   }
193   case Intrinsic::arm_mve_pred_v2i: {
194     Value *Arg = II.getArgOperand(0);
195     Value *ArgArg;
196     if (match(Arg, PatternMatch::m_Intrinsic<Intrinsic::arm_mve_pred_i2v>(
197                        PatternMatch::m_Value(ArgArg)))) {
198       return IC.replaceInstUsesWith(II, ArgArg);
199     }
200     if (!II.getMetadata(LLVMContext::MD_range)) {
201       Type *IntTy32 = Type::getInt32Ty(II.getContext());
202       Metadata *M[] = {
203           ConstantAsMetadata::get(ConstantInt::get(IntTy32, 0)),
204           ConstantAsMetadata::get(ConstantInt::get(IntTy32, 0xFFFF))};
205       II.setMetadata(LLVMContext::MD_range, MDNode::get(II.getContext(), M));
206       return &II;
207     }
208     break;
209   }
210   case Intrinsic::arm_mve_vadc:
211   case Intrinsic::arm_mve_vadc_predicated: {
212     unsigned CarryOp =
213         (II.getIntrinsicID() == Intrinsic::arm_mve_vadc_predicated) ? 3 : 2;
214     assert(II.getArgOperand(CarryOp)->getType()->getScalarSizeInBits() == 32 &&
215            "Bad type for intrinsic!");
216 
217     KnownBits CarryKnown(32);
218     if (IC.SimplifyDemandedBits(&II, CarryOp, APInt::getOneBitSet(32, 29),
219                                 CarryKnown)) {
220       return &II;
221     }
222     break;
223   }
224   case Intrinsic::arm_mve_vmldava: {
225     Instruction *I = cast<Instruction>(&II);
226     if (I->hasOneUse()) {
227       auto *User = cast<Instruction>(*I->user_begin());
228       Value *OpZ;
229       if (match(User, m_c_Add(m_Specific(I), m_Value(OpZ))) &&
230           match(I->getOperand(3), m_Zero())) {
231         Value *OpX = I->getOperand(4);
232         Value *OpY = I->getOperand(5);
233         Type *OpTy = OpX->getType();
234 
235         IC.Builder.SetInsertPoint(User);
236         Value *V =
237             IC.Builder.CreateIntrinsic(Intrinsic::arm_mve_vmldava, {OpTy},
238                                        {I->getOperand(0), I->getOperand(1),
239                                         I->getOperand(2), OpZ, OpX, OpY});
240 
241         IC.replaceInstUsesWith(*User, V);
242         return IC.eraseInstFromFunction(*User);
243       }
244     }
245     return None;
246   }
247   }
248   return None;
249 }
250 
251 int ARMTTIImpl::getIntImmCost(const APInt &Imm, Type *Ty,
252                               TTI::TargetCostKind CostKind) {
253   assert(Ty->isIntegerTy());
254 
255  unsigned Bits = Ty->getPrimitiveSizeInBits();
256  if (Bits == 0 || Imm.getActiveBits() >= 64)
257    return 4;
258 
259   int64_t SImmVal = Imm.getSExtValue();
260   uint64_t ZImmVal = Imm.getZExtValue();
261   if (!ST->isThumb()) {
262     if ((SImmVal >= 0 && SImmVal < 65536) ||
263         (ARM_AM::getSOImmVal(ZImmVal) != -1) ||
264         (ARM_AM::getSOImmVal(~ZImmVal) != -1))
265       return 1;
266     return ST->hasV6T2Ops() ? 2 : 3;
267   }
268   if (ST->isThumb2()) {
269     if ((SImmVal >= 0 && SImmVal < 65536) ||
270         (ARM_AM::getT2SOImmVal(ZImmVal) != -1) ||
271         (ARM_AM::getT2SOImmVal(~ZImmVal) != -1))
272       return 1;
273     return ST->hasV6T2Ops() ? 2 : 3;
274   }
275   // Thumb1, any i8 imm cost 1.
276   if (Bits == 8 || (SImmVal >= 0 && SImmVal < 256))
277     return 1;
278   if ((~SImmVal < 256) || ARM_AM::isThumbImmShiftedVal(ZImmVal))
279     return 2;
280   // Load from constantpool.
281   return 3;
282 }
283 
284 // Constants smaller than 256 fit in the immediate field of
285 // Thumb1 instructions so we return a zero cost and 1 otherwise.
286 int ARMTTIImpl::getIntImmCodeSizeCost(unsigned Opcode, unsigned Idx,
287                                       const APInt &Imm, Type *Ty) {
288   if (Imm.isNonNegative() && Imm.getLimitedValue() < 256)
289     return 0;
290 
291   return 1;
292 }
293 
294 // Checks whether Inst is part of a min(max()) or max(min()) pattern
295 // that will match to an SSAT instruction
296 static bool isSSATMinMaxPattern(Instruction *Inst, const APInt &Imm) {
297   Value *LHS, *RHS;
298   ConstantInt *C;
299   SelectPatternFlavor InstSPF = matchSelectPattern(Inst, LHS, RHS).Flavor;
300 
301   if (InstSPF == SPF_SMAX &&
302       PatternMatch::match(RHS, PatternMatch::m_ConstantInt(C)) &&
303       C->getValue() == Imm && Imm.isNegative() && (-Imm).isPowerOf2()) {
304 
305     auto isSSatMin = [&](Value *MinInst) {
306       if (isa<SelectInst>(MinInst)) {
307         Value *MinLHS, *MinRHS;
308         ConstantInt *MinC;
309         SelectPatternFlavor MinSPF =
310             matchSelectPattern(MinInst, MinLHS, MinRHS).Flavor;
311         if (MinSPF == SPF_SMIN &&
312             PatternMatch::match(MinRHS, PatternMatch::m_ConstantInt(MinC)) &&
313             MinC->getValue() == ((-Imm) - 1))
314           return true;
315       }
316       return false;
317     };
318 
319     if (isSSatMin(Inst->getOperand(1)) ||
320         (Inst->hasNUses(2) && (isSSatMin(*Inst->user_begin()) ||
321                                isSSatMin(*(++Inst->user_begin())))))
322       return true;
323   }
324   return false;
325 }
326 
327 int ARMTTIImpl::getIntImmCostInst(unsigned Opcode, unsigned Idx,
328                                   const APInt &Imm, Type *Ty,
329                                   TTI::TargetCostKind CostKind,
330                                   Instruction *Inst) {
331   // Division by a constant can be turned into multiplication, but only if we
332   // know it's constant. So it's not so much that the immediate is cheap (it's
333   // not), but that the alternative is worse.
334   // FIXME: this is probably unneeded with GlobalISel.
335   if ((Opcode == Instruction::SDiv || Opcode == Instruction::UDiv ||
336        Opcode == Instruction::SRem || Opcode == Instruction::URem) &&
337       Idx == 1)
338     return 0;
339 
340   if (Opcode == Instruction::And) {
341     // UXTB/UXTH
342     if (Imm == 255 || Imm == 65535)
343       return 0;
344     // Conversion to BIC is free, and means we can use ~Imm instead.
345     return std::min(getIntImmCost(Imm, Ty, CostKind),
346                     getIntImmCost(~Imm, Ty, CostKind));
347   }
348 
349   if (Opcode == Instruction::Add)
350     // Conversion to SUB is free, and means we can use -Imm instead.
351     return std::min(getIntImmCost(Imm, Ty, CostKind),
352                     getIntImmCost(-Imm, Ty, CostKind));
353 
354   if (Opcode == Instruction::ICmp && Imm.isNegative() &&
355       Ty->getIntegerBitWidth() == 32) {
356     int64_t NegImm = -Imm.getSExtValue();
357     if (ST->isThumb2() && NegImm < 1<<12)
358       // icmp X, #-C -> cmn X, #C
359       return 0;
360     if (ST->isThumb() && NegImm < 1<<8)
361       // icmp X, #-C -> adds X, #C
362       return 0;
363   }
364 
365   // xor a, -1 can always be folded to MVN
366   if (Opcode == Instruction::Xor && Imm.isAllOnesValue())
367     return 0;
368 
369   // Ensures negative constant of min(max()) or max(min()) patterns that
370   // match to SSAT instructions don't get hoisted
371   if (Inst && ((ST->hasV6Ops() && !ST->isThumb()) || ST->isThumb2()) &&
372       Ty->getIntegerBitWidth() <= 32) {
373     if (isSSATMinMaxPattern(Inst, Imm) ||
374         (isa<ICmpInst>(Inst) && Inst->hasOneUse() &&
375          isSSATMinMaxPattern(cast<Instruction>(*Inst->user_begin()), Imm)))
376       return 0;
377   }
378 
379   return getIntImmCost(Imm, Ty, CostKind);
380 }
381 
382 int ARMTTIImpl::getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind) {
383   if (CostKind == TTI::TCK_RecipThroughput &&
384       (ST->hasNEON() || ST->hasMVEIntegerOps())) {
385     // FIXME: The vectorizer is highly sensistive to the cost of these
386     // instructions, which suggests that it may be using the costs incorrectly.
387     // But, for now, just make them free to avoid performance regressions for
388     // vector targets.
389     return 0;
390   }
391   return BaseT::getCFInstrCost(Opcode, CostKind);
392 }
393 
394 int ARMTTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src,
395                                  TTI::CastContextHint CCH,
396                                  TTI::TargetCostKind CostKind,
397                                  const Instruction *I) {
398   int ISD = TLI->InstructionOpcodeToISD(Opcode);
399   assert(ISD && "Invalid opcode");
400 
401   // TODO: Allow non-throughput costs that aren't binary.
402   auto AdjustCost = [&CostKind](int Cost) {
403     if (CostKind != TTI::TCK_RecipThroughput)
404       return Cost == 0 ? 0 : 1;
405     return Cost;
406   };
407   auto IsLegalFPType = [this](EVT VT) {
408     EVT EltVT = VT.getScalarType();
409     return (EltVT == MVT::f32 && ST->hasVFP2Base()) ||
410             (EltVT == MVT::f64 && ST->hasFP64()) ||
411             (EltVT == MVT::f16 && ST->hasFullFP16());
412   };
413 
414   EVT SrcTy = TLI->getValueType(DL, Src);
415   EVT DstTy = TLI->getValueType(DL, Dst);
416 
417   if (!SrcTy.isSimple() || !DstTy.isSimple())
418     return AdjustCost(
419         BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I));
420 
421   // Extending masked load/Truncating masked stores is expensive because we
422   // currently don't split them. This means that we'll likely end up
423   // loading/storing each element individually (hence the high cost).
424   if ((ST->hasMVEIntegerOps() &&
425        (Opcode == Instruction::Trunc || Opcode == Instruction::ZExt ||
426         Opcode == Instruction::SExt)) ||
427       (ST->hasMVEFloatOps() &&
428        (Opcode == Instruction::FPExt || Opcode == Instruction::FPTrunc) &&
429        IsLegalFPType(SrcTy) && IsLegalFPType(DstTy)))
430     if (CCH == TTI::CastContextHint::Masked && DstTy.getSizeInBits() > 128)
431       return 2 * DstTy.getVectorNumElements() *
432              ST->getMVEVectorCostFactor(CostKind);
433 
434   // The extend of other kinds of load is free
435   if (CCH == TTI::CastContextHint::Normal ||
436       CCH == TTI::CastContextHint::Masked) {
437     static const TypeConversionCostTblEntry LoadConversionTbl[] = {
438         {ISD::SIGN_EXTEND, MVT::i32, MVT::i16, 0},
439         {ISD::ZERO_EXTEND, MVT::i32, MVT::i16, 0},
440         {ISD::SIGN_EXTEND, MVT::i32, MVT::i8, 0},
441         {ISD::ZERO_EXTEND, MVT::i32, MVT::i8, 0},
442         {ISD::SIGN_EXTEND, MVT::i16, MVT::i8, 0},
443         {ISD::ZERO_EXTEND, MVT::i16, MVT::i8, 0},
444         {ISD::SIGN_EXTEND, MVT::i64, MVT::i32, 1},
445         {ISD::ZERO_EXTEND, MVT::i64, MVT::i32, 1},
446         {ISD::SIGN_EXTEND, MVT::i64, MVT::i16, 1},
447         {ISD::ZERO_EXTEND, MVT::i64, MVT::i16, 1},
448         {ISD::SIGN_EXTEND, MVT::i64, MVT::i8, 1},
449         {ISD::ZERO_EXTEND, MVT::i64, MVT::i8, 1},
450     };
451     if (const auto *Entry = ConvertCostTableLookup(
452             LoadConversionTbl, ISD, DstTy.getSimpleVT(), SrcTy.getSimpleVT()))
453       return AdjustCost(Entry->Cost);
454 
455     static const TypeConversionCostTblEntry MVELoadConversionTbl[] = {
456         {ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i16, 0},
457         {ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i16, 0},
458         {ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i8, 0},
459         {ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i8, 0},
460         {ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i8, 0},
461         {ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i8, 0},
462         // The following extend from a legal type to an illegal type, so need to
463         // split the load. This introduced an extra load operation, but the
464         // extend is still "free".
465         {ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 1},
466         {ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 1},
467         {ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 3},
468         {ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 3},
469         {ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 1},
470         {ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 1},
471     };
472     if (SrcTy.isVector() && ST->hasMVEIntegerOps()) {
473       if (const auto *Entry =
474               ConvertCostTableLookup(MVELoadConversionTbl, ISD,
475                                      DstTy.getSimpleVT(), SrcTy.getSimpleVT()))
476         return Entry->Cost * ST->getMVEVectorCostFactor(CostKind);
477     }
478 
479     static const TypeConversionCostTblEntry MVEFLoadConversionTbl[] = {
480         // FPExtends are similar but also require the VCVT instructions.
481         {ISD::FP_EXTEND, MVT::v4f32, MVT::v4f16, 1},
482         {ISD::FP_EXTEND, MVT::v8f32, MVT::v8f16, 3},
483     };
484     if (SrcTy.isVector() && ST->hasMVEFloatOps()) {
485       if (const auto *Entry =
486               ConvertCostTableLookup(MVEFLoadConversionTbl, ISD,
487                                      DstTy.getSimpleVT(), SrcTy.getSimpleVT()))
488         return Entry->Cost * ST->getMVEVectorCostFactor(CostKind);
489     }
490 
491     // The truncate of a store is free. This is the mirror of extends above.
492     static const TypeConversionCostTblEntry MVEStoreConversionTbl[] = {
493         {ISD::TRUNCATE, MVT::v4i32, MVT::v4i16, 0},
494         {ISD::TRUNCATE, MVT::v4i32, MVT::v4i8, 0},
495         {ISD::TRUNCATE, MVT::v8i16, MVT::v8i8, 0},
496         {ISD::TRUNCATE, MVT::v8i32, MVT::v8i16, 1},
497         {ISD::TRUNCATE, MVT::v8i32, MVT::v8i8, 1},
498         {ISD::TRUNCATE, MVT::v16i32, MVT::v16i8, 3},
499         {ISD::TRUNCATE, MVT::v16i16, MVT::v16i8, 1},
500     };
501     if (SrcTy.isVector() && ST->hasMVEIntegerOps()) {
502       if (const auto *Entry =
503               ConvertCostTableLookup(MVEStoreConversionTbl, ISD,
504                                      SrcTy.getSimpleVT(), DstTy.getSimpleVT()))
505         return Entry->Cost * ST->getMVEVectorCostFactor(CostKind);
506     }
507 
508     static const TypeConversionCostTblEntry MVEFStoreConversionTbl[] = {
509         {ISD::FP_ROUND, MVT::v4f32, MVT::v4f16, 1},
510         {ISD::FP_ROUND, MVT::v8f32, MVT::v8f16, 3},
511     };
512     if (SrcTy.isVector() && ST->hasMVEFloatOps()) {
513       if (const auto *Entry =
514               ConvertCostTableLookup(MVEFStoreConversionTbl, ISD,
515                                      SrcTy.getSimpleVT(), DstTy.getSimpleVT()))
516         return Entry->Cost * ST->getMVEVectorCostFactor(CostKind);
517     }
518   }
519 
520   // NEON vector operations that can extend their inputs.
521   if ((ISD == ISD::SIGN_EXTEND || ISD == ISD::ZERO_EXTEND) &&
522       I && I->hasOneUse() && ST->hasNEON() && SrcTy.isVector()) {
523     static const TypeConversionCostTblEntry NEONDoubleWidthTbl[] = {
524       // vaddl
525       { ISD::ADD, MVT::v4i32, MVT::v4i16, 0 },
526       { ISD::ADD, MVT::v8i16, MVT::v8i8,  0 },
527       // vsubl
528       { ISD::SUB, MVT::v4i32, MVT::v4i16, 0 },
529       { ISD::SUB, MVT::v8i16, MVT::v8i8,  0 },
530       // vmull
531       { ISD::MUL, MVT::v4i32, MVT::v4i16, 0 },
532       { ISD::MUL, MVT::v8i16, MVT::v8i8,  0 },
533       // vshll
534       { ISD::SHL, MVT::v4i32, MVT::v4i16, 0 },
535       { ISD::SHL, MVT::v8i16, MVT::v8i8,  0 },
536     };
537 
538     auto *User = cast<Instruction>(*I->user_begin());
539     int UserISD = TLI->InstructionOpcodeToISD(User->getOpcode());
540     if (auto *Entry = ConvertCostTableLookup(NEONDoubleWidthTbl, UserISD,
541                                              DstTy.getSimpleVT(),
542                                              SrcTy.getSimpleVT())) {
543       return AdjustCost(Entry->Cost);
544     }
545   }
546 
547   // Single to/from double precision conversions.
548   if (Src->isVectorTy() && ST->hasNEON() &&
549       ((ISD == ISD::FP_ROUND && SrcTy.getScalarType() == MVT::f64 &&
550         DstTy.getScalarType() == MVT::f32) ||
551        (ISD == ISD::FP_EXTEND && SrcTy.getScalarType() == MVT::f32 &&
552         DstTy.getScalarType() == MVT::f64))) {
553     static const CostTblEntry NEONFltDblTbl[] = {
554         // Vector fptrunc/fpext conversions.
555         {ISD::FP_ROUND, MVT::v2f64, 2},
556         {ISD::FP_EXTEND, MVT::v2f32, 2},
557         {ISD::FP_EXTEND, MVT::v4f32, 4}};
558 
559     std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Src);
560     if (const auto *Entry = CostTableLookup(NEONFltDblTbl, ISD, LT.second))
561       return AdjustCost(LT.first * Entry->Cost);
562   }
563 
564   // Some arithmetic, load and store operations have specific instructions
565   // to cast up/down their types automatically at no extra cost.
566   // TODO: Get these tables to know at least what the related operations are.
567   static const TypeConversionCostTblEntry NEONVectorConversionTbl[] = {
568     { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i16, 1 },
569     { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i16, 1 },
570     { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i32, 1 },
571     { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i32, 1 },
572     { ISD::TRUNCATE,    MVT::v4i32, MVT::v4i64, 0 },
573     { ISD::TRUNCATE,    MVT::v4i16, MVT::v4i32, 1 },
574 
575     // The number of vmovl instructions for the extension.
576     { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i8,  1 },
577     { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i8,  1 },
578     { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i8,  2 },
579     { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i8,  2 },
580     { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i8,  3 },
581     { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i8,  3 },
582     { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i16, 2 },
583     { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i16, 2 },
584     { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 3 },
585     { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 },
586     { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 3 },
587     { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 3 },
588     { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i8, 7 },
589     { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i8, 7 },
590     { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i16, 6 },
591     { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i16, 6 },
592     { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 6 },
593     { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 6 },
594 
595     // Operations that we legalize using splitting.
596     { ISD::TRUNCATE,    MVT::v16i8, MVT::v16i32, 6 },
597     { ISD::TRUNCATE,    MVT::v8i8, MVT::v8i32, 3 },
598 
599     // Vector float <-> i32 conversions.
600     { ISD::SINT_TO_FP,  MVT::v4f32, MVT::v4i32, 1 },
601     { ISD::UINT_TO_FP,  MVT::v4f32, MVT::v4i32, 1 },
602 
603     { ISD::SINT_TO_FP,  MVT::v2f32, MVT::v2i8, 3 },
604     { ISD::UINT_TO_FP,  MVT::v2f32, MVT::v2i8, 3 },
605     { ISD::SINT_TO_FP,  MVT::v2f32, MVT::v2i16, 2 },
606     { ISD::UINT_TO_FP,  MVT::v2f32, MVT::v2i16, 2 },
607     { ISD::SINT_TO_FP,  MVT::v2f32, MVT::v2i32, 1 },
608     { ISD::UINT_TO_FP,  MVT::v2f32, MVT::v2i32, 1 },
609     { ISD::SINT_TO_FP,  MVT::v4f32, MVT::v4i1, 3 },
610     { ISD::UINT_TO_FP,  MVT::v4f32, MVT::v4i1, 3 },
611     { ISD::SINT_TO_FP,  MVT::v4f32, MVT::v4i8, 3 },
612     { ISD::UINT_TO_FP,  MVT::v4f32, MVT::v4i8, 3 },
613     { ISD::SINT_TO_FP,  MVT::v4f32, MVT::v4i16, 2 },
614     { ISD::UINT_TO_FP,  MVT::v4f32, MVT::v4i16, 2 },
615     { ISD::SINT_TO_FP,  MVT::v8f32, MVT::v8i16, 4 },
616     { ISD::UINT_TO_FP,  MVT::v8f32, MVT::v8i16, 4 },
617     { ISD::SINT_TO_FP,  MVT::v8f32, MVT::v8i32, 2 },
618     { ISD::UINT_TO_FP,  MVT::v8f32, MVT::v8i32, 2 },
619     { ISD::SINT_TO_FP,  MVT::v16f32, MVT::v16i16, 8 },
620     { ISD::UINT_TO_FP,  MVT::v16f32, MVT::v16i16, 8 },
621     { ISD::SINT_TO_FP,  MVT::v16f32, MVT::v16i32, 4 },
622     { ISD::UINT_TO_FP,  MVT::v16f32, MVT::v16i32, 4 },
623 
624     { ISD::FP_TO_SINT,  MVT::v4i32, MVT::v4f32, 1 },
625     { ISD::FP_TO_UINT,  MVT::v4i32, MVT::v4f32, 1 },
626     { ISD::FP_TO_SINT,  MVT::v4i8, MVT::v4f32, 3 },
627     { ISD::FP_TO_UINT,  MVT::v4i8, MVT::v4f32, 3 },
628     { ISD::FP_TO_SINT,  MVT::v4i16, MVT::v4f32, 2 },
629     { ISD::FP_TO_UINT,  MVT::v4i16, MVT::v4f32, 2 },
630 
631     // Vector double <-> i32 conversions.
632     { ISD::SINT_TO_FP,  MVT::v2f64, MVT::v2i32, 2 },
633     { ISD::UINT_TO_FP,  MVT::v2f64, MVT::v2i32, 2 },
634 
635     { ISD::SINT_TO_FP,  MVT::v2f64, MVT::v2i8, 4 },
636     { ISD::UINT_TO_FP,  MVT::v2f64, MVT::v2i8, 4 },
637     { ISD::SINT_TO_FP,  MVT::v2f64, MVT::v2i16, 3 },
638     { ISD::UINT_TO_FP,  MVT::v2f64, MVT::v2i16, 3 },
639     { ISD::SINT_TO_FP,  MVT::v2f64, MVT::v2i32, 2 },
640     { ISD::UINT_TO_FP,  MVT::v2f64, MVT::v2i32, 2 },
641 
642     { ISD::FP_TO_SINT,  MVT::v2i32, MVT::v2f64, 2 },
643     { ISD::FP_TO_UINT,  MVT::v2i32, MVT::v2f64, 2 },
644     { ISD::FP_TO_SINT,  MVT::v8i16, MVT::v8f32, 4 },
645     { ISD::FP_TO_UINT,  MVT::v8i16, MVT::v8f32, 4 },
646     { ISD::FP_TO_SINT,  MVT::v16i16, MVT::v16f32, 8 },
647     { ISD::FP_TO_UINT,  MVT::v16i16, MVT::v16f32, 8 }
648   };
649 
650   if (SrcTy.isVector() && ST->hasNEON()) {
651     if (const auto *Entry = ConvertCostTableLookup(NEONVectorConversionTbl, ISD,
652                                                    DstTy.getSimpleVT(),
653                                                    SrcTy.getSimpleVT()))
654       return AdjustCost(Entry->Cost);
655   }
656 
657   // Scalar float to integer conversions.
658   static const TypeConversionCostTblEntry NEONFloatConversionTbl[] = {
659     { ISD::FP_TO_SINT,  MVT::i1, MVT::f32, 2 },
660     { ISD::FP_TO_UINT,  MVT::i1, MVT::f32, 2 },
661     { ISD::FP_TO_SINT,  MVT::i1, MVT::f64, 2 },
662     { ISD::FP_TO_UINT,  MVT::i1, MVT::f64, 2 },
663     { ISD::FP_TO_SINT,  MVT::i8, MVT::f32, 2 },
664     { ISD::FP_TO_UINT,  MVT::i8, MVT::f32, 2 },
665     { ISD::FP_TO_SINT,  MVT::i8, MVT::f64, 2 },
666     { ISD::FP_TO_UINT,  MVT::i8, MVT::f64, 2 },
667     { ISD::FP_TO_SINT,  MVT::i16, MVT::f32, 2 },
668     { ISD::FP_TO_UINT,  MVT::i16, MVT::f32, 2 },
669     { ISD::FP_TO_SINT,  MVT::i16, MVT::f64, 2 },
670     { ISD::FP_TO_UINT,  MVT::i16, MVT::f64, 2 },
671     { ISD::FP_TO_SINT,  MVT::i32, MVT::f32, 2 },
672     { ISD::FP_TO_UINT,  MVT::i32, MVT::f32, 2 },
673     { ISD::FP_TO_SINT,  MVT::i32, MVT::f64, 2 },
674     { ISD::FP_TO_UINT,  MVT::i32, MVT::f64, 2 },
675     { ISD::FP_TO_SINT,  MVT::i64, MVT::f32, 10 },
676     { ISD::FP_TO_UINT,  MVT::i64, MVT::f32, 10 },
677     { ISD::FP_TO_SINT,  MVT::i64, MVT::f64, 10 },
678     { ISD::FP_TO_UINT,  MVT::i64, MVT::f64, 10 }
679   };
680   if (SrcTy.isFloatingPoint() && ST->hasNEON()) {
681     if (const auto *Entry = ConvertCostTableLookup(NEONFloatConversionTbl, ISD,
682                                                    DstTy.getSimpleVT(),
683                                                    SrcTy.getSimpleVT()))
684       return AdjustCost(Entry->Cost);
685   }
686 
687   // Scalar integer to float conversions.
688   static const TypeConversionCostTblEntry NEONIntegerConversionTbl[] = {
689     { ISD::SINT_TO_FP,  MVT::f32, MVT::i1, 2 },
690     { ISD::UINT_TO_FP,  MVT::f32, MVT::i1, 2 },
691     { ISD::SINT_TO_FP,  MVT::f64, MVT::i1, 2 },
692     { ISD::UINT_TO_FP,  MVT::f64, MVT::i1, 2 },
693     { ISD::SINT_TO_FP,  MVT::f32, MVT::i8, 2 },
694     { ISD::UINT_TO_FP,  MVT::f32, MVT::i8, 2 },
695     { ISD::SINT_TO_FP,  MVT::f64, MVT::i8, 2 },
696     { ISD::UINT_TO_FP,  MVT::f64, MVT::i8, 2 },
697     { ISD::SINT_TO_FP,  MVT::f32, MVT::i16, 2 },
698     { ISD::UINT_TO_FP,  MVT::f32, MVT::i16, 2 },
699     { ISD::SINT_TO_FP,  MVT::f64, MVT::i16, 2 },
700     { ISD::UINT_TO_FP,  MVT::f64, MVT::i16, 2 },
701     { ISD::SINT_TO_FP,  MVT::f32, MVT::i32, 2 },
702     { ISD::UINT_TO_FP,  MVT::f32, MVT::i32, 2 },
703     { ISD::SINT_TO_FP,  MVT::f64, MVT::i32, 2 },
704     { ISD::UINT_TO_FP,  MVT::f64, MVT::i32, 2 },
705     { ISD::SINT_TO_FP,  MVT::f32, MVT::i64, 10 },
706     { ISD::UINT_TO_FP,  MVT::f32, MVT::i64, 10 },
707     { ISD::SINT_TO_FP,  MVT::f64, MVT::i64, 10 },
708     { ISD::UINT_TO_FP,  MVT::f64, MVT::i64, 10 }
709   };
710 
711   if (SrcTy.isInteger() && ST->hasNEON()) {
712     if (const auto *Entry = ConvertCostTableLookup(NEONIntegerConversionTbl,
713                                                    ISD, DstTy.getSimpleVT(),
714                                                    SrcTy.getSimpleVT()))
715       return AdjustCost(Entry->Cost);
716   }
717 
718   // MVE extend costs, taken from codegen tests. i8->i16 or i16->i32 is one
719   // instruction, i8->i32 is two. i64 zexts are an VAND with a constant, sext
720   // are linearised so take more.
721   static const TypeConversionCostTblEntry MVEVectorConversionTbl[] = {
722     { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i8, 1 },
723     { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i8, 1 },
724     { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i8, 2 },
725     { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i8, 2 },
726     { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i8, 10 },
727     { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i8, 2 },
728     { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i16, 1 },
729     { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i16, 1 },
730     { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i16, 10 },
731     { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i16, 2 },
732     { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i32, 8 },
733     { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i32, 2 },
734   };
735 
736   if (SrcTy.isVector() && ST->hasMVEIntegerOps()) {
737     if (const auto *Entry = ConvertCostTableLookup(MVEVectorConversionTbl,
738                                                    ISD, DstTy.getSimpleVT(),
739                                                    SrcTy.getSimpleVT()))
740       return Entry->Cost * ST->getMVEVectorCostFactor(CostKind);
741   }
742 
743   if (ISD == ISD::FP_ROUND || ISD == ISD::FP_EXTEND) {
744     // As general rule, fp converts that were not matched above are scalarized
745     // and cost 1 vcvt for each lane, so long as the instruction is available.
746     // If not it will become a series of function calls.
747     const int CallCost = getCallInstrCost(nullptr, Dst, {Src}, CostKind);
748     int Lanes = 1;
749     if (SrcTy.isFixedLengthVector())
750       Lanes = SrcTy.getVectorNumElements();
751 
752     if (IsLegalFPType(SrcTy) && IsLegalFPType(DstTy))
753       return Lanes;
754     else
755       return Lanes * CallCost;
756   }
757 
758   if (ISD == ISD::TRUNCATE && ST->hasMVEIntegerOps() &&
759       SrcTy.isFixedLengthVector()) {
760     // Treat a truncate with larger than legal source (128bits for MVE) as
761     // expensive, 2 instructions per lane.
762     if ((SrcTy.getScalarType() == MVT::i8 ||
763          SrcTy.getScalarType() == MVT::i16 ||
764          SrcTy.getScalarType() == MVT::i32) &&
765         SrcTy.getSizeInBits() > 128 &&
766         SrcTy.getSizeInBits() > DstTy.getSizeInBits())
767       return SrcTy.getVectorNumElements() * 2;
768   }
769 
770   // Scalar integer conversion costs.
771   static const TypeConversionCostTblEntry ARMIntegerConversionTbl[] = {
772     // i16 -> i64 requires two dependent operations.
773     { ISD::SIGN_EXTEND, MVT::i64, MVT::i16, 2 },
774 
775     // Truncates on i64 are assumed to be free.
776     { ISD::TRUNCATE,    MVT::i32, MVT::i64, 0 },
777     { ISD::TRUNCATE,    MVT::i16, MVT::i64, 0 },
778     { ISD::TRUNCATE,    MVT::i8,  MVT::i64, 0 },
779     { ISD::TRUNCATE,    MVT::i1,  MVT::i64, 0 }
780   };
781 
782   if (SrcTy.isInteger()) {
783     if (const auto *Entry = ConvertCostTableLookup(ARMIntegerConversionTbl, ISD,
784                                                    DstTy.getSimpleVT(),
785                                                    SrcTy.getSimpleVT()))
786       return AdjustCost(Entry->Cost);
787   }
788 
789   int BaseCost = ST->hasMVEIntegerOps() && Src->isVectorTy()
790                      ? ST->getMVEVectorCostFactor(CostKind)
791                      : 1;
792   return AdjustCost(
793       BaseCost * BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I));
794 }
795 
796 int ARMTTIImpl::getVectorInstrCost(unsigned Opcode, Type *ValTy,
797                                    unsigned Index) {
798   // Penalize inserting into an D-subregister. We end up with a three times
799   // lower estimated throughput on swift.
800   if (ST->hasSlowLoadDSubregister() && Opcode == Instruction::InsertElement &&
801       ValTy->isVectorTy() && ValTy->getScalarSizeInBits() <= 32)
802     return 3;
803 
804   if (ST->hasNEON() && (Opcode == Instruction::InsertElement ||
805                         Opcode == Instruction::ExtractElement)) {
806     // Cross-class copies are expensive on many microarchitectures,
807     // so assume they are expensive by default.
808     if (cast<VectorType>(ValTy)->getElementType()->isIntegerTy())
809       return 3;
810 
811     // Even if it's not a cross class copy, this likely leads to mixing
812     // of NEON and VFP code and should be therefore penalized.
813     if (ValTy->isVectorTy() &&
814         ValTy->getScalarSizeInBits() <= 32)
815       return std::max(BaseT::getVectorInstrCost(Opcode, ValTy, Index), 2U);
816   }
817 
818   if (ST->hasMVEIntegerOps() && (Opcode == Instruction::InsertElement ||
819                                  Opcode == Instruction::ExtractElement)) {
820     // We say MVE moves costs at least the MVEVectorCostFactor, even though
821     // they are scalar instructions. This helps prevent mixing scalar and
822     // vector, to prevent vectorising where we end up just scalarising the
823     // result anyway.
824     return std::max(BaseT::getVectorInstrCost(Opcode, ValTy, Index),
825                     ST->getMVEVectorCostFactor(TTI::TCK_RecipThroughput)) *
826            cast<FixedVectorType>(ValTy)->getNumElements() / 2;
827   }
828 
829   return BaseT::getVectorInstrCost(Opcode, ValTy, Index);
830 }
831 
832 int ARMTTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy,
833                                    CmpInst::Predicate VecPred,
834                                    TTI::TargetCostKind CostKind,
835                                    const Instruction *I) {
836   int ISD = TLI->InstructionOpcodeToISD(Opcode);
837 
838   // Thumb scalar code size cost for select.
839   if (CostKind == TTI::TCK_CodeSize && ISD == ISD::SELECT &&
840       ST->isThumb() && !ValTy->isVectorTy()) {
841     // Assume expensive structs.
842     if (TLI->getValueType(DL, ValTy, true) == MVT::Other)
843       return TTI::TCC_Expensive;
844 
845     // Select costs can vary because they:
846     // - may require one or more conditional mov (including an IT),
847     // - can't operate directly on immediates,
848     // - require live flags, which we can't copy around easily.
849     int Cost = TLI->getTypeLegalizationCost(DL, ValTy).first;
850 
851     // Possible IT instruction for Thumb2, or more for Thumb1.
852     ++Cost;
853 
854     // i1 values may need rematerialising by using mov immediates and/or
855     // flag setting instructions.
856     if (ValTy->isIntegerTy(1))
857       ++Cost;
858 
859     return Cost;
860   }
861 
862   // On NEON a vector select gets lowered to vbsl.
863   if (ST->hasNEON() && ValTy->isVectorTy() && ISD == ISD::SELECT && CondTy) {
864     // Lowering of some vector selects is currently far from perfect.
865     static const TypeConversionCostTblEntry NEONVectorSelectTbl[] = {
866       { ISD::SELECT, MVT::v4i1, MVT::v4i64, 4*4 + 1*2 + 1 },
867       { ISD::SELECT, MVT::v8i1, MVT::v8i64, 50 },
868       { ISD::SELECT, MVT::v16i1, MVT::v16i64, 100 }
869     };
870 
871     EVT SelCondTy = TLI->getValueType(DL, CondTy);
872     EVT SelValTy = TLI->getValueType(DL, ValTy);
873     if (SelCondTy.isSimple() && SelValTy.isSimple()) {
874       if (const auto *Entry = ConvertCostTableLookup(NEONVectorSelectTbl, ISD,
875                                                      SelCondTy.getSimpleVT(),
876                                                      SelValTy.getSimpleVT()))
877         return Entry->Cost;
878     }
879 
880     std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy);
881     return LT.first;
882   }
883 
884   // Default to cheap (throughput/size of 1 instruction) but adjust throughput
885   // for "multiple beats" potentially needed by MVE instructions.
886   int BaseCost = 1;
887   if (ST->hasMVEIntegerOps() && ValTy->isVectorTy())
888     BaseCost = ST->getMVEVectorCostFactor(CostKind);
889 
890   return BaseCost *
891          BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind, I);
892 }
893 
894 int ARMTTIImpl::getAddressComputationCost(Type *Ty, ScalarEvolution *SE,
895                                           const SCEV *Ptr) {
896   // Address computations in vectorized code with non-consecutive addresses will
897   // likely result in more instructions compared to scalar code where the
898   // computation can more often be merged into the index mode. The resulting
899   // extra micro-ops can significantly decrease throughput.
900   unsigned NumVectorInstToHideOverhead = 10;
901   int MaxMergeDistance = 64;
902 
903   if (ST->hasNEON()) {
904     if (Ty->isVectorTy() && SE &&
905         !BaseT::isConstantStridedAccessLessThan(SE, Ptr, MaxMergeDistance + 1))
906       return NumVectorInstToHideOverhead;
907 
908     // In many cases the address computation is not merged into the instruction
909     // addressing mode.
910     return 1;
911   }
912   return BaseT::getAddressComputationCost(Ty, SE, Ptr);
913 }
914 
915 bool ARMTTIImpl::isProfitableLSRChainElement(Instruction *I) {
916   if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
917     // If a VCTP is part of a chain, it's already profitable and shouldn't be
918     // optimized, else LSR may block tail-predication.
919     switch (II->getIntrinsicID()) {
920     case Intrinsic::arm_mve_vctp8:
921     case Intrinsic::arm_mve_vctp16:
922     case Intrinsic::arm_mve_vctp32:
923     case Intrinsic::arm_mve_vctp64:
924       return true;
925     default:
926       break;
927     }
928   }
929   return false;
930 }
931 
932 bool ARMTTIImpl::isLegalMaskedLoad(Type *DataTy, Align Alignment) {
933   if (!EnableMaskedLoadStores || !ST->hasMVEIntegerOps())
934     return false;
935 
936   if (auto *VecTy = dyn_cast<FixedVectorType>(DataTy)) {
937     // Don't support v2i1 yet.
938     if (VecTy->getNumElements() == 2)
939       return false;
940 
941     // We don't support extending fp types.
942      unsigned VecWidth = DataTy->getPrimitiveSizeInBits();
943     if (VecWidth != 128 && VecTy->getElementType()->isFloatingPointTy())
944       return false;
945   }
946 
947   unsigned EltWidth = DataTy->getScalarSizeInBits();
948   return (EltWidth == 32 && Alignment >= 4) ||
949          (EltWidth == 16 && Alignment >= 2) || (EltWidth == 8);
950 }
951 
952 bool ARMTTIImpl::isLegalMaskedGather(Type *Ty, Align Alignment) {
953   if (!EnableMaskedGatherScatters || !ST->hasMVEIntegerOps())
954     return false;
955 
956   // This method is called in 2 places:
957   //  - from the vectorizer with a scalar type, in which case we need to get
958   //  this as good as we can with the limited info we have (and rely on the cost
959   //  model for the rest).
960   //  - from the masked intrinsic lowering pass with the actual vector type.
961   // For MVE, we have a custom lowering pass that will already have custom
962   // legalised any gathers that we can to MVE intrinsics, and want to expand all
963   // the rest. The pass runs before the masked intrinsic lowering pass, so if we
964   // are here, we know we want to expand.
965   if (isa<VectorType>(Ty))
966     return false;
967 
968   unsigned EltWidth = Ty->getScalarSizeInBits();
969   return ((EltWidth == 32 && Alignment >= 4) ||
970           (EltWidth == 16 && Alignment >= 2) || EltWidth == 8);
971 }
972 
973 /// Given a memcpy/memset/memmove instruction, return the number of memory
974 /// operations performed, via querying findOptimalMemOpLowering. Returns -1 if a
975 /// call is used.
976 int ARMTTIImpl::getNumMemOps(const IntrinsicInst *I) const {
977   MemOp MOp;
978   unsigned DstAddrSpace = ~0u;
979   unsigned SrcAddrSpace = ~0u;
980   const Function *F = I->getParent()->getParent();
981 
982   if (const auto *MC = dyn_cast<MemTransferInst>(I)) {
983     ConstantInt *C = dyn_cast<ConstantInt>(MC->getLength());
984     // If 'size' is not a constant, a library call will be generated.
985     if (!C)
986       return -1;
987 
988     const unsigned Size = C->getValue().getZExtValue();
989     const Align DstAlign = *MC->getDestAlign();
990     const Align SrcAlign = *MC->getSourceAlign();
991 
992     MOp = MemOp::Copy(Size, /*DstAlignCanChange*/ false, DstAlign, SrcAlign,
993                       /*IsVolatile*/ false);
994     DstAddrSpace = MC->getDestAddressSpace();
995     SrcAddrSpace = MC->getSourceAddressSpace();
996   }
997   else if (const auto *MS = dyn_cast<MemSetInst>(I)) {
998     ConstantInt *C = dyn_cast<ConstantInt>(MS->getLength());
999     // If 'size' is not a constant, a library call will be generated.
1000     if (!C)
1001       return -1;
1002 
1003     const unsigned Size = C->getValue().getZExtValue();
1004     const Align DstAlign = *MS->getDestAlign();
1005 
1006     MOp = MemOp::Set(Size, /*DstAlignCanChange*/ false, DstAlign,
1007                      /*IsZeroMemset*/ false, /*IsVolatile*/ false);
1008     DstAddrSpace = MS->getDestAddressSpace();
1009   }
1010   else
1011     llvm_unreachable("Expected a memcpy/move or memset!");
1012 
1013   unsigned Limit, Factor = 2;
1014   switch(I->getIntrinsicID()) {
1015     case Intrinsic::memcpy:
1016       Limit = TLI->getMaxStoresPerMemcpy(F->hasMinSize());
1017       break;
1018     case Intrinsic::memmove:
1019       Limit = TLI->getMaxStoresPerMemmove(F->hasMinSize());
1020       break;
1021     case Intrinsic::memset:
1022       Limit = TLI->getMaxStoresPerMemset(F->hasMinSize());
1023       Factor = 1;
1024       break;
1025     default:
1026       llvm_unreachable("Expected a memcpy/move or memset!");
1027   }
1028 
1029   // MemOps will be poplulated with a list of data types that needs to be
1030   // loaded and stored. That's why we multiply the number of elements by 2 to
1031   // get the cost for this memcpy.
1032   std::vector<EVT> MemOps;
1033   if (getTLI()->findOptimalMemOpLowering(
1034           MemOps, Limit, MOp, DstAddrSpace,
1035           SrcAddrSpace, F->getAttributes()))
1036     return MemOps.size() * Factor;
1037 
1038   // If we can't find an optimal memop lowering, return the default cost
1039   return -1;
1040 }
1041 
1042 int ARMTTIImpl::getMemcpyCost(const Instruction *I) {
1043   int NumOps = getNumMemOps(cast<IntrinsicInst>(I));
1044 
1045   // To model the cost of a library call, we assume 1 for the call, and
1046   // 3 for the argument setup.
1047   if (NumOps == -1)
1048     return 4;
1049   return NumOps;
1050 }
1051 
1052 int ARMTTIImpl::getShuffleCost(TTI::ShuffleKind Kind, VectorType *Tp,
1053                                int Index, VectorType *SubTp) {
1054   if (ST->hasNEON()) {
1055     if (Kind == TTI::SK_Broadcast) {
1056       static const CostTblEntry NEONDupTbl[] = {
1057           // VDUP handles these cases.
1058           {ISD::VECTOR_SHUFFLE, MVT::v2i32, 1},
1059           {ISD::VECTOR_SHUFFLE, MVT::v2f32, 1},
1060           {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1},
1061           {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1},
1062           {ISD::VECTOR_SHUFFLE, MVT::v4i16, 1},
1063           {ISD::VECTOR_SHUFFLE, MVT::v8i8, 1},
1064 
1065           {ISD::VECTOR_SHUFFLE, MVT::v4i32, 1},
1066           {ISD::VECTOR_SHUFFLE, MVT::v4f32, 1},
1067           {ISD::VECTOR_SHUFFLE, MVT::v8i16, 1},
1068           {ISD::VECTOR_SHUFFLE, MVT::v16i8, 1}};
1069 
1070       std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp);
1071 
1072       if (const auto *Entry =
1073               CostTableLookup(NEONDupTbl, ISD::VECTOR_SHUFFLE, LT.second))
1074         return LT.first * Entry->Cost;
1075     }
1076     if (Kind == TTI::SK_Reverse) {
1077       static const CostTblEntry NEONShuffleTbl[] = {
1078           // Reverse shuffle cost one instruction if we are shuffling within a
1079           // double word (vrev) or two if we shuffle a quad word (vrev, vext).
1080           {ISD::VECTOR_SHUFFLE, MVT::v2i32, 1},
1081           {ISD::VECTOR_SHUFFLE, MVT::v2f32, 1},
1082           {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1},
1083           {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1},
1084           {ISD::VECTOR_SHUFFLE, MVT::v4i16, 1},
1085           {ISD::VECTOR_SHUFFLE, MVT::v8i8, 1},
1086 
1087           {ISD::VECTOR_SHUFFLE, MVT::v4i32, 2},
1088           {ISD::VECTOR_SHUFFLE, MVT::v4f32, 2},
1089           {ISD::VECTOR_SHUFFLE, MVT::v8i16, 2},
1090           {ISD::VECTOR_SHUFFLE, MVT::v16i8, 2}};
1091 
1092       std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp);
1093 
1094       if (const auto *Entry =
1095               CostTableLookup(NEONShuffleTbl, ISD::VECTOR_SHUFFLE, LT.second))
1096         return LT.first * Entry->Cost;
1097     }
1098     if (Kind == TTI::SK_Select) {
1099       static const CostTblEntry NEONSelShuffleTbl[] = {
1100           // Select shuffle cost table for ARM. Cost is the number of
1101           // instructions
1102           // required to create the shuffled vector.
1103 
1104           {ISD::VECTOR_SHUFFLE, MVT::v2f32, 1},
1105           {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1},
1106           {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1},
1107           {ISD::VECTOR_SHUFFLE, MVT::v2i32, 1},
1108 
1109           {ISD::VECTOR_SHUFFLE, MVT::v4i32, 2},
1110           {ISD::VECTOR_SHUFFLE, MVT::v4f32, 2},
1111           {ISD::VECTOR_SHUFFLE, MVT::v4i16, 2},
1112 
1113           {ISD::VECTOR_SHUFFLE, MVT::v8i16, 16},
1114 
1115           {ISD::VECTOR_SHUFFLE, MVT::v16i8, 32}};
1116 
1117       std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp);
1118       if (const auto *Entry = CostTableLookup(NEONSelShuffleTbl,
1119                                               ISD::VECTOR_SHUFFLE, LT.second))
1120         return LT.first * Entry->Cost;
1121     }
1122   }
1123   if (ST->hasMVEIntegerOps()) {
1124     if (Kind == TTI::SK_Broadcast) {
1125       static const CostTblEntry MVEDupTbl[] = {
1126           // VDUP handles these cases.
1127           {ISD::VECTOR_SHUFFLE, MVT::v4i32, 1},
1128           {ISD::VECTOR_SHUFFLE, MVT::v8i16, 1},
1129           {ISD::VECTOR_SHUFFLE, MVT::v16i8, 1},
1130           {ISD::VECTOR_SHUFFLE, MVT::v4f32, 1},
1131           {ISD::VECTOR_SHUFFLE, MVT::v8f16, 1}};
1132 
1133       std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp);
1134 
1135       if (const auto *Entry = CostTableLookup(MVEDupTbl, ISD::VECTOR_SHUFFLE,
1136                                               LT.second))
1137         return LT.first * Entry->Cost *
1138                ST->getMVEVectorCostFactor(TTI::TCK_RecipThroughput);
1139     }
1140   }
1141   int BaseCost = ST->hasMVEIntegerOps() && Tp->isVectorTy()
1142                      ? ST->getMVEVectorCostFactor(TTI::TCK_RecipThroughput)
1143                      : 1;
1144   return BaseCost * BaseT::getShuffleCost(Kind, Tp, Index, SubTp);
1145 }
1146 
1147 int ARMTTIImpl::getArithmeticInstrCost(unsigned Opcode, Type *Ty,
1148                                        TTI::TargetCostKind CostKind,
1149                                        TTI::OperandValueKind Op1Info,
1150                                        TTI::OperandValueKind Op2Info,
1151                                        TTI::OperandValueProperties Opd1PropInfo,
1152                                        TTI::OperandValueProperties Opd2PropInfo,
1153                                        ArrayRef<const Value *> Args,
1154                                        const Instruction *CxtI) {
1155   int ISDOpcode = TLI->InstructionOpcodeToISD(Opcode);
1156   if (ST->isThumb() && CostKind == TTI::TCK_CodeSize && Ty->isIntegerTy(1)) {
1157     // Make operations on i1 relatively expensive as this often involves
1158     // combining predicates. AND and XOR should be easier to handle with IT
1159     // blocks.
1160     switch (ISDOpcode) {
1161     default:
1162       break;
1163     case ISD::AND:
1164     case ISD::XOR:
1165       return 2;
1166     case ISD::OR:
1167       return 3;
1168     }
1169   }
1170 
1171   std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
1172 
1173   if (ST->hasNEON()) {
1174     const unsigned FunctionCallDivCost = 20;
1175     const unsigned ReciprocalDivCost = 10;
1176     static const CostTblEntry CostTbl[] = {
1177       // Division.
1178       // These costs are somewhat random. Choose a cost of 20 to indicate that
1179       // vectorizing devision (added function call) is going to be very expensive.
1180       // Double registers types.
1181       { ISD::SDIV, MVT::v1i64, 1 * FunctionCallDivCost},
1182       { ISD::UDIV, MVT::v1i64, 1 * FunctionCallDivCost},
1183       { ISD::SREM, MVT::v1i64, 1 * FunctionCallDivCost},
1184       { ISD::UREM, MVT::v1i64, 1 * FunctionCallDivCost},
1185       { ISD::SDIV, MVT::v2i32, 2 * FunctionCallDivCost},
1186       { ISD::UDIV, MVT::v2i32, 2 * FunctionCallDivCost},
1187       { ISD::SREM, MVT::v2i32, 2 * FunctionCallDivCost},
1188       { ISD::UREM, MVT::v2i32, 2 * FunctionCallDivCost},
1189       { ISD::SDIV, MVT::v4i16,     ReciprocalDivCost},
1190       { ISD::UDIV, MVT::v4i16,     ReciprocalDivCost},
1191       { ISD::SREM, MVT::v4i16, 4 * FunctionCallDivCost},
1192       { ISD::UREM, MVT::v4i16, 4 * FunctionCallDivCost},
1193       { ISD::SDIV, MVT::v8i8,      ReciprocalDivCost},
1194       { ISD::UDIV, MVT::v8i8,      ReciprocalDivCost},
1195       { ISD::SREM, MVT::v8i8,  8 * FunctionCallDivCost},
1196       { ISD::UREM, MVT::v8i8,  8 * FunctionCallDivCost},
1197       // Quad register types.
1198       { ISD::SDIV, MVT::v2i64, 2 * FunctionCallDivCost},
1199       { ISD::UDIV, MVT::v2i64, 2 * FunctionCallDivCost},
1200       { ISD::SREM, MVT::v2i64, 2 * FunctionCallDivCost},
1201       { ISD::UREM, MVT::v2i64, 2 * FunctionCallDivCost},
1202       { ISD::SDIV, MVT::v4i32, 4 * FunctionCallDivCost},
1203       { ISD::UDIV, MVT::v4i32, 4 * FunctionCallDivCost},
1204       { ISD::SREM, MVT::v4i32, 4 * FunctionCallDivCost},
1205       { ISD::UREM, MVT::v4i32, 4 * FunctionCallDivCost},
1206       { ISD::SDIV, MVT::v8i16, 8 * FunctionCallDivCost},
1207       { ISD::UDIV, MVT::v8i16, 8 * FunctionCallDivCost},
1208       { ISD::SREM, MVT::v8i16, 8 * FunctionCallDivCost},
1209       { ISD::UREM, MVT::v8i16, 8 * FunctionCallDivCost},
1210       { ISD::SDIV, MVT::v16i8, 16 * FunctionCallDivCost},
1211       { ISD::UDIV, MVT::v16i8, 16 * FunctionCallDivCost},
1212       { ISD::SREM, MVT::v16i8, 16 * FunctionCallDivCost},
1213       { ISD::UREM, MVT::v16i8, 16 * FunctionCallDivCost},
1214       // Multiplication.
1215     };
1216 
1217     if (const auto *Entry = CostTableLookup(CostTbl, ISDOpcode, LT.second))
1218       return LT.first * Entry->Cost;
1219 
1220     int Cost = BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Op1Info,
1221                                              Op2Info,
1222                                              Opd1PropInfo, Opd2PropInfo);
1223 
1224     // This is somewhat of a hack. The problem that we are facing is that SROA
1225     // creates a sequence of shift, and, or instructions to construct values.
1226     // These sequences are recognized by the ISel and have zero-cost. Not so for
1227     // the vectorized code. Because we have support for v2i64 but not i64 those
1228     // sequences look particularly beneficial to vectorize.
1229     // To work around this we increase the cost of v2i64 operations to make them
1230     // seem less beneficial.
1231     if (LT.second == MVT::v2i64 &&
1232         Op2Info == TargetTransformInfo::OK_UniformConstantValue)
1233       Cost += 4;
1234 
1235     return Cost;
1236   }
1237 
1238   // If this operation is a shift on arm/thumb2, it might well be folded into
1239   // the following instruction, hence having a cost of 0.
1240   auto LooksLikeAFreeShift = [&]() {
1241     if (ST->isThumb1Only() || Ty->isVectorTy())
1242       return false;
1243 
1244     if (!CxtI || !CxtI->hasOneUse() || !CxtI->isShift())
1245       return false;
1246     if (Op2Info != TargetTransformInfo::OK_UniformConstantValue)
1247       return false;
1248 
1249     // Folded into a ADC/ADD/AND/BIC/CMP/EOR/MVN/ORR/ORN/RSB/SBC/SUB
1250     switch (cast<Instruction>(CxtI->user_back())->getOpcode()) {
1251     case Instruction::Add:
1252     case Instruction::Sub:
1253     case Instruction::And:
1254     case Instruction::Xor:
1255     case Instruction::Or:
1256     case Instruction::ICmp:
1257       return true;
1258     default:
1259       return false;
1260     }
1261   };
1262   if (LooksLikeAFreeShift())
1263     return 0;
1264 
1265   // Default to cheap (throughput/size of 1 instruction) but adjust throughput
1266   // for "multiple beats" potentially needed by MVE instructions.
1267   int BaseCost = 1;
1268   if (ST->hasMVEIntegerOps() && Ty->isVectorTy())
1269     BaseCost = ST->getMVEVectorCostFactor(CostKind);
1270 
1271   // The rest of this mostly follows what is done in BaseT::getArithmeticInstrCost,
1272   // without treating floats as more expensive that scalars or increasing the
1273   // costs for custom operations. The results is also multiplied by the
1274   // MVEVectorCostFactor where appropriate.
1275   if (TLI->isOperationLegalOrCustomOrPromote(ISDOpcode, LT.second))
1276     return LT.first * BaseCost;
1277 
1278   // Else this is expand, assume that we need to scalarize this op.
1279   if (auto *VTy = dyn_cast<FixedVectorType>(Ty)) {
1280     unsigned Num = VTy->getNumElements();
1281     unsigned Cost = getArithmeticInstrCost(Opcode, Ty->getScalarType(),
1282                                            CostKind);
1283     // Return the cost of multiple scalar invocation plus the cost of
1284     // inserting and extracting the values.
1285     return BaseT::getScalarizationOverhead(VTy, Args) + Num * Cost;
1286   }
1287 
1288   return BaseCost;
1289 }
1290 
1291 int ARMTTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src,
1292                                 MaybeAlign Alignment, unsigned AddressSpace,
1293                                 TTI::TargetCostKind CostKind,
1294                                 const Instruction *I) {
1295   // TODO: Handle other cost kinds.
1296   if (CostKind != TTI::TCK_RecipThroughput)
1297     return 1;
1298 
1299   // Type legalization can't handle structs
1300   if (TLI->getValueType(DL, Src, true) == MVT::Other)
1301     return BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
1302                                   CostKind);
1303 
1304   if (ST->hasNEON() && Src->isVectorTy() &&
1305       (Alignment && *Alignment != Align(16)) &&
1306       cast<VectorType>(Src)->getElementType()->isDoubleTy()) {
1307     // Unaligned loads/stores are extremely inefficient.
1308     // We need 4 uops for vst.1/vld.1 vs 1uop for vldr/vstr.
1309     std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Src);
1310     return LT.first * 4;
1311   }
1312 
1313   // MVE can optimize a fpext(load(4xhalf)) using an extending integer load.
1314   // Same for stores.
1315   if (ST->hasMVEFloatOps() && isa<FixedVectorType>(Src) && I &&
1316       ((Opcode == Instruction::Load && I->hasOneUse() &&
1317         isa<FPExtInst>(*I->user_begin())) ||
1318        (Opcode == Instruction::Store && isa<FPTruncInst>(I->getOperand(0))))) {
1319     FixedVectorType *SrcVTy = cast<FixedVectorType>(Src);
1320     Type *DstTy =
1321         Opcode == Instruction::Load
1322             ? (*I->user_begin())->getType()
1323             : cast<Instruction>(I->getOperand(0))->getOperand(0)->getType();
1324     if (SrcVTy->getNumElements() == 4 && SrcVTy->getScalarType()->isHalfTy() &&
1325         DstTy->getScalarType()->isFloatTy())
1326       return ST->getMVEVectorCostFactor(CostKind);
1327   }
1328 
1329   int BaseCost = ST->hasMVEIntegerOps() && Src->isVectorTy()
1330                      ? ST->getMVEVectorCostFactor(CostKind)
1331                      : 1;
1332   return BaseCost * BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
1333                                            CostKind, I);
1334 }
1335 
1336 unsigned ARMTTIImpl::getMaskedMemoryOpCost(unsigned Opcode, Type *Src,
1337                                            Align Alignment,
1338                                            unsigned AddressSpace,
1339                                            TTI::TargetCostKind CostKind) {
1340   if (ST->hasMVEIntegerOps()) {
1341     if (Opcode == Instruction::Load && isLegalMaskedLoad(Src, Alignment))
1342       return ST->getMVEVectorCostFactor(CostKind);
1343     if (Opcode == Instruction::Store && isLegalMaskedStore(Src, Alignment))
1344       return ST->getMVEVectorCostFactor(CostKind);
1345   }
1346   if (!isa<FixedVectorType>(Src))
1347     return BaseT::getMaskedMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
1348                                         CostKind);
1349   // Scalar cost, which is currently very high due to the efficiency of the
1350   // generated code.
1351   return cast<FixedVectorType>(Src)->getNumElements() * 8;
1352 }
1353 
1354 int ARMTTIImpl::getInterleavedMemoryOpCost(
1355     unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
1356     Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind,
1357     bool UseMaskForCond, bool UseMaskForGaps) {
1358   assert(Factor >= 2 && "Invalid interleave factor");
1359   assert(isa<VectorType>(VecTy) && "Expect a vector type");
1360 
1361   // vldN/vstN doesn't support vector types of i64/f64 element.
1362   bool EltIs64Bits = DL.getTypeSizeInBits(VecTy->getScalarType()) == 64;
1363 
1364   if (Factor <= TLI->getMaxSupportedInterleaveFactor() && !EltIs64Bits &&
1365       !UseMaskForCond && !UseMaskForGaps) {
1366     unsigned NumElts = cast<FixedVectorType>(VecTy)->getNumElements();
1367     auto *SubVecTy =
1368         FixedVectorType::get(VecTy->getScalarType(), NumElts / Factor);
1369 
1370     // vldN/vstN only support legal vector types of size 64 or 128 in bits.
1371     // Accesses having vector types that are a multiple of 128 bits can be
1372     // matched to more than one vldN/vstN instruction.
1373     int BaseCost =
1374         ST->hasMVEIntegerOps() ? ST->getMVEVectorCostFactor(CostKind) : 1;
1375     if (NumElts % Factor == 0 &&
1376         TLI->isLegalInterleavedAccessType(Factor, SubVecTy, Alignment, DL))
1377       return Factor * BaseCost * TLI->getNumInterleavedAccesses(SubVecTy, DL);
1378 
1379     // Some smaller than legal interleaved patterns are cheap as we can make
1380     // use of the vmovn or vrev patterns to interleave a standard load. This is
1381     // true for v4i8, v8i8 and v4i16 at least (but not for v4f16 as it is
1382     // promoted differently). The cost of 2 here is then a load and vrev or
1383     // vmovn.
1384     if (ST->hasMVEIntegerOps() && Factor == 2 && NumElts / Factor > 2 &&
1385         VecTy->isIntOrIntVectorTy() &&
1386         DL.getTypeSizeInBits(SubVecTy).getFixedSize() <= 64)
1387       return 2 * BaseCost;
1388   }
1389 
1390   return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
1391                                            Alignment, AddressSpace, CostKind,
1392                                            UseMaskForCond, UseMaskForGaps);
1393 }
1394 
1395 unsigned ARMTTIImpl::getGatherScatterOpCost(unsigned Opcode, Type *DataTy,
1396                                             const Value *Ptr, bool VariableMask,
1397                                             Align Alignment,
1398                                             TTI::TargetCostKind CostKind,
1399                                             const Instruction *I) {
1400   using namespace PatternMatch;
1401   if (!ST->hasMVEIntegerOps() || !EnableMaskedGatherScatters)
1402     return BaseT::getGatherScatterOpCost(Opcode, DataTy, Ptr, VariableMask,
1403                                          Alignment, CostKind, I);
1404 
1405   assert(DataTy->isVectorTy() && "Can't do gather/scatters on scalar!");
1406   auto *VTy = cast<FixedVectorType>(DataTy);
1407 
1408   // TODO: Splitting, once we do that.
1409 
1410   unsigned NumElems = VTy->getNumElements();
1411   unsigned EltSize = VTy->getScalarSizeInBits();
1412   std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, DataTy);
1413 
1414   // For now, it is assumed that for the MVE gather instructions the loads are
1415   // all effectively serialised. This means the cost is the scalar cost
1416   // multiplied by the number of elements being loaded. This is possibly very
1417   // conservative, but even so we still end up vectorising loops because the
1418   // cost per iteration for many loops is lower than for scalar loops.
1419   unsigned VectorCost =
1420       NumElems * LT.first * ST->getMVEVectorCostFactor(CostKind);
1421   // The scalarization cost should be a lot higher. We use the number of vector
1422   // elements plus the scalarization overhead.
1423   unsigned ScalarCost = NumElems * LT.first +
1424                         BaseT::getScalarizationOverhead(VTy, true, false) +
1425                         BaseT::getScalarizationOverhead(VTy, false, true);
1426 
1427   if (EltSize < 8 || Alignment < EltSize / 8)
1428     return ScalarCost;
1429 
1430   unsigned ExtSize = EltSize;
1431   // Check whether there's a single user that asks for an extended type
1432   if (I != nullptr) {
1433     // Dependent of the caller of this function, a gather instruction will
1434     // either have opcode Instruction::Load or be a call to the masked_gather
1435     // intrinsic
1436     if ((I->getOpcode() == Instruction::Load ||
1437          match(I, m_Intrinsic<Intrinsic::masked_gather>())) &&
1438         I->hasOneUse()) {
1439       const User *Us = *I->users().begin();
1440       if (isa<ZExtInst>(Us) || isa<SExtInst>(Us)) {
1441         // only allow valid type combinations
1442         unsigned TypeSize =
1443             cast<Instruction>(Us)->getType()->getScalarSizeInBits();
1444         if (((TypeSize == 32 && (EltSize == 8 || EltSize == 16)) ||
1445              (TypeSize == 16 && EltSize == 8)) &&
1446             TypeSize * NumElems == 128) {
1447           ExtSize = TypeSize;
1448         }
1449       }
1450     }
1451     // Check whether the input data needs to be truncated
1452     TruncInst *T;
1453     if ((I->getOpcode() == Instruction::Store ||
1454          match(I, m_Intrinsic<Intrinsic::masked_scatter>())) &&
1455         (T = dyn_cast<TruncInst>(I->getOperand(0)))) {
1456       // Only allow valid type combinations
1457       unsigned TypeSize = T->getOperand(0)->getType()->getScalarSizeInBits();
1458       if (((EltSize == 16 && TypeSize == 32) ||
1459            (EltSize == 8 && (TypeSize == 32 || TypeSize == 16))) &&
1460           TypeSize * NumElems == 128)
1461         ExtSize = TypeSize;
1462     }
1463   }
1464 
1465   if (ExtSize * NumElems != 128 || NumElems < 4)
1466     return ScalarCost;
1467 
1468   // Any (aligned) i32 gather will not need to be scalarised.
1469   if (ExtSize == 32)
1470     return VectorCost;
1471   // For smaller types, we need to ensure that the gep's inputs are correctly
1472   // extended from a small enough value. Other sizes (including i64) are
1473   // scalarized for now.
1474   if (ExtSize != 8 && ExtSize != 16)
1475     return ScalarCost;
1476 
1477   if (const auto *BC = dyn_cast<BitCastInst>(Ptr))
1478     Ptr = BC->getOperand(0);
1479   if (const auto *GEP = dyn_cast<GetElementPtrInst>(Ptr)) {
1480     if (GEP->getNumOperands() != 2)
1481       return ScalarCost;
1482     unsigned Scale = DL.getTypeAllocSize(GEP->getResultElementType());
1483     // Scale needs to be correct (which is only relevant for i16s).
1484     if (Scale != 1 && Scale * 8 != ExtSize)
1485       return ScalarCost;
1486     // And we need to zext (not sext) the indexes from a small enough type.
1487     if (const auto *ZExt = dyn_cast<ZExtInst>(GEP->getOperand(1))) {
1488       if (ZExt->getOperand(0)->getType()->getScalarSizeInBits() <= ExtSize)
1489         return VectorCost;
1490     }
1491     return ScalarCost;
1492   }
1493   return ScalarCost;
1494 }
1495 
1496 int ARMTTIImpl::getArithmeticReductionCost(unsigned Opcode, VectorType *ValTy,
1497                                            bool IsPairwiseForm,
1498                                            TTI::TargetCostKind CostKind) {
1499   EVT ValVT = TLI->getValueType(DL, ValTy);
1500   int ISD = TLI->InstructionOpcodeToISD(Opcode);
1501   if (!ST->hasMVEIntegerOps() || !ValVT.isSimple() || ISD != ISD::ADD)
1502     return BaseT::getArithmeticReductionCost(Opcode, ValTy, IsPairwiseForm,
1503                                              CostKind);
1504 
1505   std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy);
1506 
1507   static const CostTblEntry CostTblAdd[]{
1508       {ISD::ADD, MVT::v16i8, 1},
1509       {ISD::ADD, MVT::v8i16, 1},
1510       {ISD::ADD, MVT::v4i32, 1},
1511   };
1512   if (const auto *Entry = CostTableLookup(CostTblAdd, ISD, LT.second))
1513     return Entry->Cost * ST->getMVEVectorCostFactor(CostKind) * LT.first;
1514 
1515   return BaseT::getArithmeticReductionCost(Opcode, ValTy, IsPairwiseForm,
1516                                            CostKind);
1517 }
1518 
1519 InstructionCost
1520 ARMTTIImpl::getExtendedAddReductionCost(bool IsMLA, bool IsUnsigned,
1521                                         Type *ResTy, VectorType *ValTy,
1522                                         TTI::TargetCostKind CostKind) {
1523   EVT ValVT = TLI->getValueType(DL, ValTy);
1524   EVT ResVT = TLI->getValueType(DL, ResTy);
1525   if (ST->hasMVEIntegerOps() && ValVT.isSimple() && ResVT.isSimple()) {
1526     std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy);
1527     if ((LT.second == MVT::v16i8 && ResVT.getSizeInBits() <= 32) ||
1528         (LT.second == MVT::v8i16 &&
1529          ResVT.getSizeInBits() <= (IsMLA ? 64 : 32)) ||
1530         (LT.second == MVT::v4i32 && ResVT.getSizeInBits() <= 64))
1531       return ST->getMVEVectorCostFactor(CostKind) * LT.first;
1532   }
1533 
1534   return BaseT::getExtendedAddReductionCost(IsMLA, IsUnsigned, ResTy, ValTy,
1535                                             CostKind);
1536 }
1537 
1538 int ARMTTIImpl::getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
1539                                       TTI::TargetCostKind CostKind) {
1540   switch (ICA.getID()) {
1541   case Intrinsic::get_active_lane_mask:
1542     // Currently we make a somewhat optimistic assumption that
1543     // active_lane_mask's are always free. In reality it may be freely folded
1544     // into a tail predicated loop, expanded into a VCPT or expanded into a lot
1545     // of add/icmp code. We may need to improve this in the future, but being
1546     // able to detect if it is free or not involves looking at a lot of other
1547     // code. We currently assume that the vectorizer inserted these, and knew
1548     // what it was doing in adding one.
1549     if (ST->hasMVEIntegerOps())
1550       return 0;
1551     break;
1552   case Intrinsic::sadd_sat:
1553   case Intrinsic::ssub_sat:
1554   case Intrinsic::uadd_sat:
1555   case Intrinsic::usub_sat: {
1556     if (!ST->hasMVEIntegerOps())
1557       break;
1558     // Get the Return type, either directly of from ICA.ReturnType and ICA.VF.
1559     Type *VT = ICA.getReturnType();
1560     if (!VT->isVectorTy() && !ICA.getVectorFactor().isScalar())
1561       VT = VectorType::get(VT, ICA.getVectorFactor());
1562 
1563     std::pair<int, MVT> LT =
1564         TLI->getTypeLegalizationCost(DL, VT);
1565     if (LT.second == MVT::v4i32 || LT.second == MVT::v8i16 ||
1566         LT.second == MVT::v16i8) {
1567       // This is a base cost of 1 for the vadd, plus 3 extract shifts if we
1568       // need to extend the type, as it uses shr(qadd(shl, shl)).
1569       unsigned Instrs = LT.second.getScalarSizeInBits() ==
1570                                 ICA.getReturnType()->getScalarSizeInBits()
1571                             ? 1
1572                             : 4;
1573       return LT.first * ST->getMVEVectorCostFactor(CostKind) * Instrs;
1574     }
1575     break;
1576   }
1577   }
1578 
1579   return BaseT::getIntrinsicInstrCost(ICA, CostKind);
1580 }
1581 
1582 bool ARMTTIImpl::isLoweredToCall(const Function *F) {
1583   if (!F->isIntrinsic())
1584     BaseT::isLoweredToCall(F);
1585 
1586   // Assume all Arm-specific intrinsics map to an instruction.
1587   if (F->getName().startswith("llvm.arm"))
1588     return false;
1589 
1590   switch (F->getIntrinsicID()) {
1591   default: break;
1592   case Intrinsic::powi:
1593   case Intrinsic::sin:
1594   case Intrinsic::cos:
1595   case Intrinsic::pow:
1596   case Intrinsic::log:
1597   case Intrinsic::log10:
1598   case Intrinsic::log2:
1599   case Intrinsic::exp:
1600   case Intrinsic::exp2:
1601     return true;
1602   case Intrinsic::sqrt:
1603   case Intrinsic::fabs:
1604   case Intrinsic::copysign:
1605   case Intrinsic::floor:
1606   case Intrinsic::ceil:
1607   case Intrinsic::trunc:
1608   case Intrinsic::rint:
1609   case Intrinsic::nearbyint:
1610   case Intrinsic::round:
1611   case Intrinsic::canonicalize:
1612   case Intrinsic::lround:
1613   case Intrinsic::llround:
1614   case Intrinsic::lrint:
1615   case Intrinsic::llrint:
1616     if (F->getReturnType()->isDoubleTy() && !ST->hasFP64())
1617       return true;
1618     if (F->getReturnType()->isHalfTy() && !ST->hasFullFP16())
1619       return true;
1620     // Some operations can be handled by vector instructions and assume
1621     // unsupported vectors will be expanded into supported scalar ones.
1622     // TODO Handle scalar operations properly.
1623     return !ST->hasFPARMv8Base() && !ST->hasVFP2Base();
1624   case Intrinsic::masked_store:
1625   case Intrinsic::masked_load:
1626   case Intrinsic::masked_gather:
1627   case Intrinsic::masked_scatter:
1628     return !ST->hasMVEIntegerOps();
1629   case Intrinsic::sadd_with_overflow:
1630   case Intrinsic::uadd_with_overflow:
1631   case Intrinsic::ssub_with_overflow:
1632   case Intrinsic::usub_with_overflow:
1633   case Intrinsic::sadd_sat:
1634   case Intrinsic::uadd_sat:
1635   case Intrinsic::ssub_sat:
1636   case Intrinsic::usub_sat:
1637     return false;
1638   }
1639 
1640   return BaseT::isLoweredToCall(F);
1641 }
1642 
1643 bool ARMTTIImpl::maybeLoweredToCall(Instruction &I) {
1644   unsigned ISD = TLI->InstructionOpcodeToISD(I.getOpcode());
1645   EVT VT = TLI->getValueType(DL, I.getType(), true);
1646   if (TLI->getOperationAction(ISD, VT) == TargetLowering::LibCall)
1647     return true;
1648 
1649   // Check if an intrinsic will be lowered to a call and assume that any
1650   // other CallInst will generate a bl.
1651   if (auto *Call = dyn_cast<CallInst>(&I)) {
1652     if (auto *II = dyn_cast<IntrinsicInst>(Call)) {
1653       switch(II->getIntrinsicID()) {
1654         case Intrinsic::memcpy:
1655         case Intrinsic::memset:
1656         case Intrinsic::memmove:
1657           return getNumMemOps(II) == -1;
1658         default:
1659           if (const Function *F = Call->getCalledFunction())
1660             return isLoweredToCall(F);
1661       }
1662     }
1663     return true;
1664   }
1665 
1666   // FPv5 provides conversions between integer, double-precision,
1667   // single-precision, and half-precision formats.
1668   switch (I.getOpcode()) {
1669   default:
1670     break;
1671   case Instruction::FPToSI:
1672   case Instruction::FPToUI:
1673   case Instruction::SIToFP:
1674   case Instruction::UIToFP:
1675   case Instruction::FPTrunc:
1676   case Instruction::FPExt:
1677     return !ST->hasFPARMv8Base();
1678   }
1679 
1680   // FIXME: Unfortunately the approach of checking the Operation Action does
1681   // not catch all cases of Legalization that use library calls. Our
1682   // Legalization step categorizes some transformations into library calls as
1683   // Custom, Expand or even Legal when doing type legalization. So for now
1684   // we have to special case for instance the SDIV of 64bit integers and the
1685   // use of floating point emulation.
1686   if (VT.isInteger() && VT.getSizeInBits() >= 64) {
1687     switch (ISD) {
1688     default:
1689       break;
1690     case ISD::SDIV:
1691     case ISD::UDIV:
1692     case ISD::SREM:
1693     case ISD::UREM:
1694     case ISD::SDIVREM:
1695     case ISD::UDIVREM:
1696       return true;
1697     }
1698   }
1699 
1700   // Assume all other non-float operations are supported.
1701   if (!VT.isFloatingPoint())
1702     return false;
1703 
1704   // We'll need a library call to handle most floats when using soft.
1705   if (TLI->useSoftFloat()) {
1706     switch (I.getOpcode()) {
1707     default:
1708       return true;
1709     case Instruction::Alloca:
1710     case Instruction::Load:
1711     case Instruction::Store:
1712     case Instruction::Select:
1713     case Instruction::PHI:
1714       return false;
1715     }
1716   }
1717 
1718   // We'll need a libcall to perform double precision operations on a single
1719   // precision only FPU.
1720   if (I.getType()->isDoubleTy() && !ST->hasFP64())
1721     return true;
1722 
1723   // Likewise for half precision arithmetic.
1724   if (I.getType()->isHalfTy() && !ST->hasFullFP16())
1725     return true;
1726 
1727   return false;
1728 }
1729 
1730 bool ARMTTIImpl::isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE,
1731                                           AssumptionCache &AC,
1732                                           TargetLibraryInfo *LibInfo,
1733                                           HardwareLoopInfo &HWLoopInfo) {
1734   // Low-overhead branches are only supported in the 'low-overhead branch'
1735   // extension of v8.1-m.
1736   if (!ST->hasLOB() || DisableLowOverheadLoops) {
1737     LLVM_DEBUG(dbgs() << "ARMHWLoops: Disabled\n");
1738     return false;
1739   }
1740 
1741   if (!SE.hasLoopInvariantBackedgeTakenCount(L)) {
1742     LLVM_DEBUG(dbgs() << "ARMHWLoops: No BETC\n");
1743     return false;
1744   }
1745 
1746   const SCEV *BackedgeTakenCount = SE.getBackedgeTakenCount(L);
1747   if (isa<SCEVCouldNotCompute>(BackedgeTakenCount)) {
1748     LLVM_DEBUG(dbgs() << "ARMHWLoops: Uncomputable BETC\n");
1749     return false;
1750   }
1751 
1752   const SCEV *TripCountSCEV =
1753     SE.getAddExpr(BackedgeTakenCount,
1754                   SE.getOne(BackedgeTakenCount->getType()));
1755 
1756   // We need to store the trip count in LR, a 32-bit register.
1757   if (SE.getUnsignedRangeMax(TripCountSCEV).getBitWidth() > 32) {
1758     LLVM_DEBUG(dbgs() << "ARMHWLoops: Trip count does not fit into 32bits\n");
1759     return false;
1760   }
1761 
1762   // Making a call will trash LR and clear LO_BRANCH_INFO, so there's little
1763   // point in generating a hardware loop if that's going to happen.
1764 
1765   auto IsHardwareLoopIntrinsic = [](Instruction &I) {
1766     if (auto *Call = dyn_cast<IntrinsicInst>(&I)) {
1767       switch (Call->getIntrinsicID()) {
1768       default:
1769         break;
1770       case Intrinsic::start_loop_iterations:
1771       case Intrinsic::test_set_loop_iterations:
1772       case Intrinsic::loop_decrement:
1773       case Intrinsic::loop_decrement_reg:
1774         return true;
1775       }
1776     }
1777     return false;
1778   };
1779 
1780   // Scan the instructions to see if there's any that we know will turn into a
1781   // call or if this loop is already a low-overhead loop or will become a tail
1782   // predicated loop.
1783   bool IsTailPredLoop = false;
1784   auto ScanLoop = [&](Loop *L) {
1785     for (auto *BB : L->getBlocks()) {
1786       for (auto &I : *BB) {
1787         if (maybeLoweredToCall(I) || IsHardwareLoopIntrinsic(I) ||
1788             isa<InlineAsm>(I)) {
1789           LLVM_DEBUG(dbgs() << "ARMHWLoops: Bad instruction: " << I << "\n");
1790           return false;
1791         }
1792         if (auto *II = dyn_cast<IntrinsicInst>(&I))
1793           IsTailPredLoop |=
1794               II->getIntrinsicID() == Intrinsic::get_active_lane_mask ||
1795               II->getIntrinsicID() == Intrinsic::arm_mve_vctp8 ||
1796               II->getIntrinsicID() == Intrinsic::arm_mve_vctp16 ||
1797               II->getIntrinsicID() == Intrinsic::arm_mve_vctp32 ||
1798               II->getIntrinsicID() == Intrinsic::arm_mve_vctp64;
1799       }
1800     }
1801     return true;
1802   };
1803 
1804   // Visit inner loops.
1805   for (auto Inner : *L)
1806     if (!ScanLoop(Inner))
1807       return false;
1808 
1809   if (!ScanLoop(L))
1810     return false;
1811 
1812   // TODO: Check whether the trip count calculation is expensive. If L is the
1813   // inner loop but we know it has a low trip count, calculating that trip
1814   // count (in the parent loop) may be detrimental.
1815 
1816   LLVMContext &C = L->getHeader()->getContext();
1817   HWLoopInfo.CounterInReg = true;
1818   HWLoopInfo.IsNestingLegal = false;
1819   HWLoopInfo.PerformEntryTest = AllowWLSLoops && !IsTailPredLoop;
1820   HWLoopInfo.CountType = Type::getInt32Ty(C);
1821   HWLoopInfo.LoopDecrement = ConstantInt::get(HWLoopInfo.CountType, 1);
1822   return true;
1823 }
1824 
1825 static bool canTailPredicateInstruction(Instruction &I, int &ICmpCount) {
1826   // We don't allow icmp's, and because we only look at single block loops,
1827   // we simply count the icmps, i.e. there should only be 1 for the backedge.
1828   if (isa<ICmpInst>(&I) && ++ICmpCount > 1)
1829     return false;
1830 
1831   if (isa<FCmpInst>(&I))
1832     return false;
1833 
1834   // We could allow extending/narrowing FP loads/stores, but codegen is
1835   // too inefficient so reject this for now.
1836   if (isa<FPExtInst>(&I) || isa<FPTruncInst>(&I))
1837     return false;
1838 
1839   // Extends have to be extending-loads
1840   if (isa<SExtInst>(&I) || isa<ZExtInst>(&I) )
1841     if (!I.getOperand(0)->hasOneUse() || !isa<LoadInst>(I.getOperand(0)))
1842       return false;
1843 
1844   // Truncs have to be narrowing-stores
1845   if (isa<TruncInst>(&I) )
1846     if (!I.hasOneUse() || !isa<StoreInst>(*I.user_begin()))
1847       return false;
1848 
1849   return true;
1850 }
1851 
1852 // To set up a tail-predicated loop, we need to know the total number of
1853 // elements processed by that loop. Thus, we need to determine the element
1854 // size and:
1855 // 1) it should be uniform for all operations in the vector loop, so we
1856 //    e.g. don't want any widening/narrowing operations.
1857 // 2) it should be smaller than i64s because we don't have vector operations
1858 //    that work on i64s.
1859 // 3) we don't want elements to be reversed or shuffled, to make sure the
1860 //    tail-predication masks/predicates the right lanes.
1861 //
1862 static bool canTailPredicateLoop(Loop *L, LoopInfo *LI, ScalarEvolution &SE,
1863                                  const DataLayout &DL,
1864                                  const LoopAccessInfo *LAI) {
1865   LLVM_DEBUG(dbgs() << "Tail-predication: checking allowed instructions\n");
1866 
1867   // If there are live-out values, it is probably a reduction. We can predicate
1868   // most reduction operations freely under MVE using a combination of
1869   // prefer-predicated-reduction-select and inloop reductions. We limit this to
1870   // floating point and integer reductions, but don't check for operators
1871   // specifically here. If the value ends up not being a reduction (and so the
1872   // vectorizer cannot tailfold the loop), we should fall back to standard
1873   // vectorization automatically.
1874   SmallVector< Instruction *, 8 > LiveOuts;
1875   LiveOuts = llvm::findDefsUsedOutsideOfLoop(L);
1876   bool ReductionsDisabled =
1877       EnableTailPredication == TailPredication::EnabledNoReductions ||
1878       EnableTailPredication == TailPredication::ForceEnabledNoReductions;
1879 
1880   for (auto *I : LiveOuts) {
1881     if (!I->getType()->isIntegerTy() && !I->getType()->isFloatTy() &&
1882         !I->getType()->isHalfTy()) {
1883       LLVM_DEBUG(dbgs() << "Don't tail-predicate loop with non-integer/float "
1884                            "live-out value\n");
1885       return false;
1886     }
1887     if (ReductionsDisabled) {
1888       LLVM_DEBUG(dbgs() << "Reductions not enabled\n");
1889       return false;
1890     }
1891   }
1892 
1893   // Next, check that all instructions can be tail-predicated.
1894   PredicatedScalarEvolution PSE = LAI->getPSE();
1895   SmallVector<Instruction *, 16> LoadStores;
1896   int ICmpCount = 0;
1897 
1898   for (BasicBlock *BB : L->blocks()) {
1899     for (Instruction &I : BB->instructionsWithoutDebug()) {
1900       if (isa<PHINode>(&I))
1901         continue;
1902       if (!canTailPredicateInstruction(I, ICmpCount)) {
1903         LLVM_DEBUG(dbgs() << "Instruction not allowed: "; I.dump());
1904         return false;
1905       }
1906 
1907       Type *T  = I.getType();
1908       if (T->isPointerTy())
1909         T = T->getPointerElementType();
1910 
1911       if (T->getScalarSizeInBits() > 32) {
1912         LLVM_DEBUG(dbgs() << "Unsupported Type: "; T->dump());
1913         return false;
1914       }
1915       if (isa<StoreInst>(I) || isa<LoadInst>(I)) {
1916         Value *Ptr = isa<LoadInst>(I) ? I.getOperand(0) : I.getOperand(1);
1917         int64_t NextStride = getPtrStride(PSE, Ptr, L);
1918         if (NextStride == 1) {
1919           // TODO: for now only allow consecutive strides of 1. We could support
1920           // other strides as long as it is uniform, but let's keep it simple
1921           // for now.
1922           continue;
1923         } else if (NextStride == -1 ||
1924                    (NextStride == 2 && MVEMaxSupportedInterleaveFactor >= 2) ||
1925                    (NextStride == 4 && MVEMaxSupportedInterleaveFactor >= 4)) {
1926           LLVM_DEBUG(dbgs()
1927                      << "Consecutive strides of 2 found, vld2/vstr2 can't "
1928                         "be tail-predicated\n.");
1929           return false;
1930           // TODO: don't tail predicate if there is a reversed load?
1931         } else if (EnableMaskedGatherScatters) {
1932           // Gather/scatters do allow loading from arbitrary strides, at
1933           // least if they are loop invariant.
1934           // TODO: Loop variant strides should in theory work, too, but
1935           // this requires further testing.
1936           const SCEV *PtrScev =
1937               replaceSymbolicStrideSCEV(PSE, llvm::ValueToValueMap(), Ptr);
1938           if (auto AR = dyn_cast<SCEVAddRecExpr>(PtrScev)) {
1939             const SCEV *Step = AR->getStepRecurrence(*PSE.getSE());
1940             if (PSE.getSE()->isLoopInvariant(Step, L))
1941               continue;
1942           }
1943         }
1944         LLVM_DEBUG(dbgs() << "Bad stride found, can't "
1945                              "tail-predicate\n.");
1946         return false;
1947       }
1948     }
1949   }
1950 
1951   LLVM_DEBUG(dbgs() << "tail-predication: all instructions allowed!\n");
1952   return true;
1953 }
1954 
1955 bool ARMTTIImpl::preferPredicateOverEpilogue(Loop *L, LoopInfo *LI,
1956                                              ScalarEvolution &SE,
1957                                              AssumptionCache &AC,
1958                                              TargetLibraryInfo *TLI,
1959                                              DominatorTree *DT,
1960                                              const LoopAccessInfo *LAI) {
1961   if (!EnableTailPredication) {
1962     LLVM_DEBUG(dbgs() << "Tail-predication not enabled.\n");
1963     return false;
1964   }
1965 
1966   // Creating a predicated vector loop is the first step for generating a
1967   // tail-predicated hardware loop, for which we need the MVE masked
1968   // load/stores instructions:
1969   if (!ST->hasMVEIntegerOps())
1970     return false;
1971 
1972   // For now, restrict this to single block loops.
1973   if (L->getNumBlocks() > 1) {
1974     LLVM_DEBUG(dbgs() << "preferPredicateOverEpilogue: not a single block "
1975                          "loop.\n");
1976     return false;
1977   }
1978 
1979   assert(L->isInnermost() && "preferPredicateOverEpilogue: inner-loop expected");
1980 
1981   HardwareLoopInfo HWLoopInfo(L);
1982   if (!HWLoopInfo.canAnalyze(*LI)) {
1983     LLVM_DEBUG(dbgs() << "preferPredicateOverEpilogue: hardware-loop is not "
1984                          "analyzable.\n");
1985     return false;
1986   }
1987 
1988   // This checks if we have the low-overhead branch architecture
1989   // extension, and if we will create a hardware-loop:
1990   if (!isHardwareLoopProfitable(L, SE, AC, TLI, HWLoopInfo)) {
1991     LLVM_DEBUG(dbgs() << "preferPredicateOverEpilogue: hardware-loop is not "
1992                          "profitable.\n");
1993     return false;
1994   }
1995 
1996   if (!HWLoopInfo.isHardwareLoopCandidate(SE, *LI, *DT)) {
1997     LLVM_DEBUG(dbgs() << "preferPredicateOverEpilogue: hardware-loop is not "
1998                          "a candidate.\n");
1999     return false;
2000   }
2001 
2002   return canTailPredicateLoop(L, LI, SE, DL, LAI);
2003 }
2004 
2005 bool ARMTTIImpl::emitGetActiveLaneMask() const {
2006   if (!ST->hasMVEIntegerOps() || !EnableTailPredication)
2007     return false;
2008 
2009   // Intrinsic @llvm.get.active.lane.mask is supported.
2010   // It is used in the MVETailPredication pass, which requires the number of
2011   // elements processed by this vector loop to setup the tail-predicated
2012   // loop.
2013   return true;
2014 }
2015 void ARMTTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
2016                                          TTI::UnrollingPreferences &UP) {
2017   // Only currently enable these preferences for M-Class cores.
2018   if (!ST->isMClass())
2019     return BasicTTIImplBase::getUnrollingPreferences(L, SE, UP);
2020 
2021   // Disable loop unrolling for Oz and Os.
2022   UP.OptSizeThreshold = 0;
2023   UP.PartialOptSizeThreshold = 0;
2024   if (L->getHeader()->getParent()->hasOptSize())
2025     return;
2026 
2027   // Only enable on Thumb-2 targets.
2028   if (!ST->isThumb2())
2029     return;
2030 
2031   SmallVector<BasicBlock*, 4> ExitingBlocks;
2032   L->getExitingBlocks(ExitingBlocks);
2033   LLVM_DEBUG(dbgs() << "Loop has:\n"
2034                     << "Blocks: " << L->getNumBlocks() << "\n"
2035                     << "Exit blocks: " << ExitingBlocks.size() << "\n");
2036 
2037   // Only allow another exit other than the latch. This acts as an early exit
2038   // as it mirrors the profitability calculation of the runtime unroller.
2039   if (ExitingBlocks.size() > 2)
2040     return;
2041 
2042   // Limit the CFG of the loop body for targets with a branch predictor.
2043   // Allowing 4 blocks permits if-then-else diamonds in the body.
2044   if (ST->hasBranchPredictor() && L->getNumBlocks() > 4)
2045     return;
2046 
2047   // Don't unroll vectorized loops, including the remainder loop
2048   if (getBooleanLoopAttribute(L, "llvm.loop.isvectorized"))
2049     return;
2050 
2051   // Scan the loop: don't unroll loops with calls as this could prevent
2052   // inlining.
2053   unsigned Cost = 0;
2054   for (auto *BB : L->getBlocks()) {
2055     for (auto &I : *BB) {
2056       // Don't unroll vectorised loop. MVE does not benefit from it as much as
2057       // scalar code.
2058       if (I.getType()->isVectorTy())
2059         return;
2060 
2061       if (isa<CallInst>(I) || isa<InvokeInst>(I)) {
2062         if (const Function *F = cast<CallBase>(I).getCalledFunction()) {
2063           if (!isLoweredToCall(F))
2064             continue;
2065         }
2066         return;
2067       }
2068 
2069       SmallVector<const Value*, 4> Operands(I.operand_values());
2070       Cost +=
2071         getUserCost(&I, Operands, TargetTransformInfo::TCK_SizeAndLatency);
2072     }
2073   }
2074 
2075   LLVM_DEBUG(dbgs() << "Cost of loop: " << Cost << "\n");
2076 
2077   UP.Partial = true;
2078   UP.Runtime = true;
2079   UP.UpperBound = true;
2080   UP.UnrollRemainder = true;
2081   UP.DefaultUnrollRuntimeCount = 4;
2082   UP.UnrollAndJam = true;
2083   UP.UnrollAndJamInnerLoopThreshold = 60;
2084 
2085   // Force unrolling small loops can be very useful because of the branch
2086   // taken cost of the backedge.
2087   if (Cost < 12)
2088     UP.Force = true;
2089 }
2090 
2091 void ARMTTIImpl::getPeelingPreferences(Loop *L, ScalarEvolution &SE,
2092                                        TTI::PeelingPreferences &PP) {
2093   BaseT::getPeelingPreferences(L, SE, PP);
2094 }
2095 
2096 bool ARMTTIImpl::preferInLoopReduction(unsigned Opcode, Type *Ty,
2097                                        TTI::ReductionFlags Flags) const {
2098   if (!ST->hasMVEIntegerOps())
2099     return false;
2100 
2101   unsigned ScalarBits = Ty->getScalarSizeInBits();
2102   switch (Opcode) {
2103   case Instruction::Add:
2104     return ScalarBits <= 64;
2105   default:
2106     return false;
2107   }
2108 }
2109 
2110 bool ARMTTIImpl::preferPredicatedReductionSelect(
2111     unsigned Opcode, Type *Ty, TTI::ReductionFlags Flags) const {
2112   if (!ST->hasMVEIntegerOps())
2113     return false;
2114   return true;
2115 }
2116