xref: /llvm-project/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp (revision ab90d2793cf56758a91f7a7ae027850af2455d3e)
1 //===- ARMTargetTransformInfo.cpp - ARM specific TTI ----------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "ARMTargetTransformInfo.h"
10 #include "ARMSubtarget.h"
11 #include "MCTargetDesc/ARMAddressingModes.h"
12 #include "llvm/ADT/APInt.h"
13 #include "llvm/ADT/SmallVector.h"
14 #include "llvm/Analysis/LoopInfo.h"
15 #include "llvm/CodeGen/CostTable.h"
16 #include "llvm/CodeGen/ISDOpcodes.h"
17 #include "llvm/CodeGen/ValueTypes.h"
18 #include "llvm/CodeGenTypes/MachineValueType.h"
19 #include "llvm/IR/BasicBlock.h"
20 #include "llvm/IR/DataLayout.h"
21 #include "llvm/IR/DerivedTypes.h"
22 #include "llvm/IR/Instruction.h"
23 #include "llvm/IR/Instructions.h"
24 #include "llvm/IR/IntrinsicInst.h"
25 #include "llvm/IR/Intrinsics.h"
26 #include "llvm/IR/IntrinsicsARM.h"
27 #include "llvm/IR/PatternMatch.h"
28 #include "llvm/IR/Type.h"
29 #include "llvm/Support/Casting.h"
30 #include "llvm/Support/KnownBits.h"
31 #include "llvm/Target/TargetMachine.h"
32 #include "llvm/TargetParser/SubtargetFeature.h"
33 #include "llvm/Transforms/InstCombine/InstCombiner.h"
34 #include "llvm/Transforms/Utils/Local.h"
35 #include "llvm/Transforms/Utils/LoopUtils.h"
36 #include "llvm/Transforms/Vectorize/LoopVectorizationLegality.h"
37 #include <algorithm>
38 #include <cassert>
39 #include <cstdint>
40 #include <optional>
41 #include <utility>
42 
43 using namespace llvm;
44 
45 #define DEBUG_TYPE "armtti"
46 
47 static cl::opt<bool> EnableMaskedLoadStores(
48   "enable-arm-maskedldst", cl::Hidden, cl::init(true),
49   cl::desc("Enable the generation of masked loads and stores"));
50 
51 static cl::opt<bool> DisableLowOverheadLoops(
52   "disable-arm-loloops", cl::Hidden, cl::init(false),
53   cl::desc("Disable the generation of low-overhead loops"));
54 
55 static cl::opt<bool>
56     AllowWLSLoops("allow-arm-wlsloops", cl::Hidden, cl::init(true),
57                   cl::desc("Enable the generation of WLS loops"));
58 
59 static cl::opt<bool> UseWidenGlobalArrays(
60     "widen-global-strings", cl::Hidden, cl::init(true),
61     cl::desc("Enable the widening of global strings to alignment boundaries"));
62 
63 extern cl::opt<TailPredication::Mode> EnableTailPredication;
64 
65 extern cl::opt<bool> EnableMaskedGatherScatters;
66 
67 extern cl::opt<unsigned> MVEMaxSupportedInterleaveFactor;
68 
69 /// Convert a vector load intrinsic into a simple llvm load instruction.
70 /// This is beneficial when the underlying object being addressed comes
71 /// from a constant, since we get constant-folding for free.
72 static Value *simplifyNeonVld1(const IntrinsicInst &II, unsigned MemAlign,
73                                InstCombiner::BuilderTy &Builder) {
74   auto *IntrAlign = dyn_cast<ConstantInt>(II.getArgOperand(1));
75 
76   if (!IntrAlign)
77     return nullptr;
78 
79   unsigned Alignment = IntrAlign->getLimitedValue() < MemAlign
80                            ? MemAlign
81                            : IntrAlign->getLimitedValue();
82 
83   if (!isPowerOf2_32(Alignment))
84     return nullptr;
85 
86   auto *BCastInst = Builder.CreateBitCast(II.getArgOperand(0),
87                                           PointerType::get(II.getType(), 0));
88   return Builder.CreateAlignedLoad(II.getType(), BCastInst, Align(Alignment));
89 }
90 
91 bool ARMTTIImpl::areInlineCompatible(const Function *Caller,
92                                      const Function *Callee) const {
93   const TargetMachine &TM = getTLI()->getTargetMachine();
94   const FeatureBitset &CallerBits =
95       TM.getSubtargetImpl(*Caller)->getFeatureBits();
96   const FeatureBitset &CalleeBits =
97       TM.getSubtargetImpl(*Callee)->getFeatureBits();
98 
99   // To inline a callee, all features not in the allowed list must match exactly.
100   bool MatchExact = (CallerBits & ~InlineFeaturesAllowed) ==
101                     (CalleeBits & ~InlineFeaturesAllowed);
102   // For features in the allowed list, the callee's features must be a subset of
103   // the callers'.
104   bool MatchSubset = ((CallerBits & CalleeBits) & InlineFeaturesAllowed) ==
105                      (CalleeBits & InlineFeaturesAllowed);
106   return MatchExact && MatchSubset;
107 }
108 
109 TTI::AddressingModeKind
110 ARMTTIImpl::getPreferredAddressingMode(const Loop *L,
111                                        ScalarEvolution *SE) const {
112   if (ST->hasMVEIntegerOps())
113     return TTI::AMK_PostIndexed;
114 
115   if (L->getHeader()->getParent()->hasOptSize())
116     return TTI::AMK_None;
117 
118   if (ST->isMClass() && ST->isThumb2() &&
119       L->getNumBlocks() == 1)
120     return TTI::AMK_PreIndexed;
121 
122   return TTI::AMK_None;
123 }
124 
125 std::optional<Instruction *>
126 ARMTTIImpl::instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II) const {
127   using namespace PatternMatch;
128   Intrinsic::ID IID = II.getIntrinsicID();
129   switch (IID) {
130   default:
131     break;
132   case Intrinsic::arm_neon_vld1: {
133     Align MemAlign =
134         getKnownAlignment(II.getArgOperand(0), IC.getDataLayout(), &II,
135                           &IC.getAssumptionCache(), &IC.getDominatorTree());
136     if (Value *V = simplifyNeonVld1(II, MemAlign.value(), IC.Builder)) {
137       return IC.replaceInstUsesWith(II, V);
138     }
139     break;
140   }
141 
142   case Intrinsic::arm_neon_vld2:
143   case Intrinsic::arm_neon_vld3:
144   case Intrinsic::arm_neon_vld4:
145   case Intrinsic::arm_neon_vld2lane:
146   case Intrinsic::arm_neon_vld3lane:
147   case Intrinsic::arm_neon_vld4lane:
148   case Intrinsic::arm_neon_vst1:
149   case Intrinsic::arm_neon_vst2:
150   case Intrinsic::arm_neon_vst3:
151   case Intrinsic::arm_neon_vst4:
152   case Intrinsic::arm_neon_vst2lane:
153   case Intrinsic::arm_neon_vst3lane:
154   case Intrinsic::arm_neon_vst4lane: {
155     Align MemAlign =
156         getKnownAlignment(II.getArgOperand(0), IC.getDataLayout(), &II,
157                           &IC.getAssumptionCache(), &IC.getDominatorTree());
158     unsigned AlignArg = II.arg_size() - 1;
159     Value *AlignArgOp = II.getArgOperand(AlignArg);
160     MaybeAlign Align = cast<ConstantInt>(AlignArgOp)->getMaybeAlignValue();
161     if (Align && *Align < MemAlign) {
162       return IC.replaceOperand(
163           II, AlignArg,
164           ConstantInt::get(Type::getInt32Ty(II.getContext()), MemAlign.value(),
165                            false));
166     }
167     break;
168   }
169 
170   case Intrinsic::arm_neon_vld1x2:
171   case Intrinsic::arm_neon_vld1x3:
172   case Intrinsic::arm_neon_vld1x4:
173   case Intrinsic::arm_neon_vst1x2:
174   case Intrinsic::arm_neon_vst1x3:
175   case Intrinsic::arm_neon_vst1x4: {
176     Align NewAlign =
177         getKnownAlignment(II.getArgOperand(0), IC.getDataLayout(), &II,
178                           &IC.getAssumptionCache(), &IC.getDominatorTree());
179     Align OldAlign = II.getParamAlign(0).valueOrOne();
180     if (NewAlign > OldAlign)
181       II.addParamAttr(0,
182                       Attribute::getWithAlignment(II.getContext(), NewAlign));
183     break;
184   }
185 
186   case Intrinsic::arm_mve_pred_i2v: {
187     Value *Arg = II.getArgOperand(0);
188     Value *ArgArg;
189     if (match(Arg, PatternMatch::m_Intrinsic<Intrinsic::arm_mve_pred_v2i>(
190                        PatternMatch::m_Value(ArgArg))) &&
191         II.getType() == ArgArg->getType()) {
192       return IC.replaceInstUsesWith(II, ArgArg);
193     }
194     Constant *XorMask;
195     if (match(Arg, m_Xor(PatternMatch::m_Intrinsic<Intrinsic::arm_mve_pred_v2i>(
196                              PatternMatch::m_Value(ArgArg)),
197                          PatternMatch::m_Constant(XorMask))) &&
198         II.getType() == ArgArg->getType()) {
199       if (auto *CI = dyn_cast<ConstantInt>(XorMask)) {
200         if (CI->getValue().trunc(16).isAllOnes()) {
201           auto TrueVector = IC.Builder.CreateVectorSplat(
202               cast<FixedVectorType>(II.getType())->getNumElements(),
203               IC.Builder.getTrue());
204           return BinaryOperator::Create(Instruction::Xor, ArgArg, TrueVector);
205         }
206       }
207     }
208     KnownBits ScalarKnown(32);
209     if (IC.SimplifyDemandedBits(&II, 0, APInt::getLowBitsSet(32, 16),
210                                 ScalarKnown)) {
211       return &II;
212     }
213     break;
214   }
215   case Intrinsic::arm_mve_pred_v2i: {
216     Value *Arg = II.getArgOperand(0);
217     Value *ArgArg;
218     if (match(Arg, PatternMatch::m_Intrinsic<Intrinsic::arm_mve_pred_i2v>(
219                        PatternMatch::m_Value(ArgArg)))) {
220       return IC.replaceInstUsesWith(II, ArgArg);
221     }
222 
223     if (II.getMetadata(LLVMContext::MD_range))
224       break;
225 
226     ConstantRange Range(APInt(32, 0), APInt(32, 0x10000));
227 
228     if (auto CurrentRange = II.getRange()) {
229       Range = Range.intersectWith(*CurrentRange);
230       if (Range == CurrentRange)
231         break;
232     }
233 
234     II.addRangeRetAttr(Range);
235     II.addRetAttr(Attribute::NoUndef);
236     return &II;
237   }
238   case Intrinsic::arm_mve_vadc:
239   case Intrinsic::arm_mve_vadc_predicated: {
240     unsigned CarryOp =
241         (II.getIntrinsicID() == Intrinsic::arm_mve_vadc_predicated) ? 3 : 2;
242     assert(II.getArgOperand(CarryOp)->getType()->getScalarSizeInBits() == 32 &&
243            "Bad type for intrinsic!");
244 
245     KnownBits CarryKnown(32);
246     if (IC.SimplifyDemandedBits(&II, CarryOp, APInt::getOneBitSet(32, 29),
247                                 CarryKnown)) {
248       return &II;
249     }
250     break;
251   }
252   case Intrinsic::arm_mve_vmldava: {
253     Instruction *I = cast<Instruction>(&II);
254     if (I->hasOneUse()) {
255       auto *User = cast<Instruction>(*I->user_begin());
256       Value *OpZ;
257       if (match(User, m_c_Add(m_Specific(I), m_Value(OpZ))) &&
258           match(I->getOperand(3), m_Zero())) {
259         Value *OpX = I->getOperand(4);
260         Value *OpY = I->getOperand(5);
261         Type *OpTy = OpX->getType();
262 
263         IC.Builder.SetInsertPoint(User);
264         Value *V =
265             IC.Builder.CreateIntrinsic(Intrinsic::arm_mve_vmldava, {OpTy},
266                                        {I->getOperand(0), I->getOperand(1),
267                                         I->getOperand(2), OpZ, OpX, OpY});
268 
269         IC.replaceInstUsesWith(*User, V);
270         return IC.eraseInstFromFunction(*User);
271       }
272     }
273     return std::nullopt;
274   }
275   }
276   return std::nullopt;
277 }
278 
279 std::optional<Value *> ARMTTIImpl::simplifyDemandedVectorEltsIntrinsic(
280     InstCombiner &IC, IntrinsicInst &II, APInt OrigDemandedElts,
281     APInt &UndefElts, APInt &UndefElts2, APInt &UndefElts3,
282     std::function<void(Instruction *, unsigned, APInt, APInt &)>
283         SimplifyAndSetOp) const {
284 
285   // Compute the demanded bits for a narrowing MVE intrinsic. The TopOpc is the
286   // opcode specifying a Top/Bottom instruction, which can change between
287   // instructions.
288   auto SimplifyNarrowInstrTopBottom =[&](unsigned TopOpc) {
289     unsigned NumElts = cast<FixedVectorType>(II.getType())->getNumElements();
290     unsigned IsTop = cast<ConstantInt>(II.getOperand(TopOpc))->getZExtValue();
291 
292     // The only odd/even lanes of operand 0 will only be demanded depending
293     // on whether this is a top/bottom instruction.
294     APInt DemandedElts =
295         APInt::getSplat(NumElts, IsTop ? APInt::getLowBitsSet(2, 1)
296                                        : APInt::getHighBitsSet(2, 1));
297     SimplifyAndSetOp(&II, 0, OrigDemandedElts & DemandedElts, UndefElts);
298     // The other lanes will be defined from the inserted elements.
299     UndefElts &= APInt::getSplat(NumElts, IsTop ? APInt::getLowBitsSet(2, 1)
300                                                 : APInt::getHighBitsSet(2, 1));
301     return std::nullopt;
302   };
303 
304   switch (II.getIntrinsicID()) {
305   default:
306     break;
307   case Intrinsic::arm_mve_vcvt_narrow:
308     SimplifyNarrowInstrTopBottom(2);
309     break;
310   case Intrinsic::arm_mve_vqmovn:
311     SimplifyNarrowInstrTopBottom(4);
312     break;
313   case Intrinsic::arm_mve_vshrn:
314     SimplifyNarrowInstrTopBottom(7);
315     break;
316   }
317 
318   return std::nullopt;
319 }
320 
321 InstructionCost ARMTTIImpl::getIntImmCost(const APInt &Imm, Type *Ty,
322                                           TTI::TargetCostKind CostKind) {
323   assert(Ty->isIntegerTy());
324 
325  unsigned Bits = Ty->getPrimitiveSizeInBits();
326  if (Bits == 0 || Imm.getActiveBits() >= 64)
327    return 4;
328 
329   int64_t SImmVal = Imm.getSExtValue();
330   uint64_t ZImmVal = Imm.getZExtValue();
331   if (!ST->isThumb()) {
332     if ((SImmVal >= 0 && SImmVal < 65536) ||
333         (ARM_AM::getSOImmVal(ZImmVal) != -1) ||
334         (ARM_AM::getSOImmVal(~ZImmVal) != -1))
335       return 1;
336     return ST->hasV6T2Ops() ? 2 : 3;
337   }
338   if (ST->isThumb2()) {
339     if ((SImmVal >= 0 && SImmVal < 65536) ||
340         (ARM_AM::getT2SOImmVal(ZImmVal) != -1) ||
341         (ARM_AM::getT2SOImmVal(~ZImmVal) != -1))
342       return 1;
343     return ST->hasV6T2Ops() ? 2 : 3;
344   }
345   // Thumb1, any i8 imm cost 1.
346   if (Bits == 8 || (SImmVal >= 0 && SImmVal < 256))
347     return 1;
348   if ((~SImmVal < 256) || ARM_AM::isThumbImmShiftedVal(ZImmVal))
349     return 2;
350   // Load from constantpool.
351   return 3;
352 }
353 
354 // Constants smaller than 256 fit in the immediate field of
355 // Thumb1 instructions so we return a zero cost and 1 otherwise.
356 InstructionCost ARMTTIImpl::getIntImmCodeSizeCost(unsigned Opcode, unsigned Idx,
357                                                   const APInt &Imm, Type *Ty) {
358   if (Imm.isNonNegative() && Imm.getLimitedValue() < 256)
359     return 0;
360 
361   return 1;
362 }
363 
364 // Checks whether Inst is part of a min(max()) or max(min()) pattern
365 // that will match to an SSAT instruction. Returns the instruction being
366 // saturated, or null if no saturation pattern was found.
367 static Value *isSSATMinMaxPattern(Instruction *Inst, const APInt &Imm) {
368   Value *LHS, *RHS;
369   ConstantInt *C;
370   SelectPatternFlavor InstSPF = matchSelectPattern(Inst, LHS, RHS).Flavor;
371 
372   if (InstSPF == SPF_SMAX &&
373       PatternMatch::match(RHS, PatternMatch::m_ConstantInt(C)) &&
374       C->getValue() == Imm && Imm.isNegative() && Imm.isNegatedPowerOf2()) {
375 
376     auto isSSatMin = [&](Value *MinInst) {
377       if (isa<SelectInst>(MinInst)) {
378         Value *MinLHS, *MinRHS;
379         ConstantInt *MinC;
380         SelectPatternFlavor MinSPF =
381             matchSelectPattern(MinInst, MinLHS, MinRHS).Flavor;
382         if (MinSPF == SPF_SMIN &&
383             PatternMatch::match(MinRHS, PatternMatch::m_ConstantInt(MinC)) &&
384             MinC->getValue() == ((-Imm) - 1))
385           return true;
386       }
387       return false;
388     };
389 
390     if (isSSatMin(Inst->getOperand(1)))
391       return cast<Instruction>(Inst->getOperand(1))->getOperand(1);
392     if (Inst->hasNUses(2) &&
393         (isSSatMin(*Inst->user_begin()) || isSSatMin(*(++Inst->user_begin()))))
394       return Inst->getOperand(1);
395   }
396   return nullptr;
397 }
398 
399 // Look for a FP Saturation pattern, where the instruction can be simplified to
400 // a fptosi.sat. max(min(fptosi)). The constant in this case is always free.
401 static bool isFPSatMinMaxPattern(Instruction *Inst, const APInt &Imm) {
402   if (Imm.getBitWidth() != 64 ||
403       Imm != APInt::getHighBitsSet(64, 33)) // -2147483648
404     return false;
405   Value *FP = isSSATMinMaxPattern(Inst, Imm);
406   if (!FP && isa<ICmpInst>(Inst) && Inst->hasOneUse())
407     FP = isSSATMinMaxPattern(cast<Instruction>(*Inst->user_begin()), Imm);
408   if (!FP)
409     return false;
410   return isa<FPToSIInst>(FP);
411 }
412 
413 InstructionCost ARMTTIImpl::getIntImmCostInst(unsigned Opcode, unsigned Idx,
414                                               const APInt &Imm, Type *Ty,
415                                               TTI::TargetCostKind CostKind,
416                                               Instruction *Inst) {
417   // Division by a constant can be turned into multiplication, but only if we
418   // know it's constant. So it's not so much that the immediate is cheap (it's
419   // not), but that the alternative is worse.
420   // FIXME: this is probably unneeded with GlobalISel.
421   if ((Opcode == Instruction::SDiv || Opcode == Instruction::UDiv ||
422        Opcode == Instruction::SRem || Opcode == Instruction::URem) &&
423       Idx == 1)
424     return 0;
425 
426   // Leave any gep offsets for the CodeGenPrepare, which will do a better job at
427   // splitting any large offsets.
428   if (Opcode == Instruction::GetElementPtr && Idx != 0)
429     return 0;
430 
431   if (Opcode == Instruction::And) {
432     // UXTB/UXTH
433     if (Imm == 255 || Imm == 65535)
434       return 0;
435     // Conversion to BIC is free, and means we can use ~Imm instead.
436     return std::min(getIntImmCost(Imm, Ty, CostKind),
437                     getIntImmCost(~Imm, Ty, CostKind));
438   }
439 
440   if (Opcode == Instruction::Add)
441     // Conversion to SUB is free, and means we can use -Imm instead.
442     return std::min(getIntImmCost(Imm, Ty, CostKind),
443                     getIntImmCost(-Imm, Ty, CostKind));
444 
445   if (Opcode == Instruction::ICmp && Imm.isNegative() &&
446       Ty->getIntegerBitWidth() == 32) {
447     int64_t NegImm = -Imm.getSExtValue();
448     if (ST->isThumb2() && NegImm < 1<<12)
449       // icmp X, #-C -> cmn X, #C
450       return 0;
451     if (ST->isThumb() && NegImm < 1<<8)
452       // icmp X, #-C -> adds X, #C
453       return 0;
454   }
455 
456   // xor a, -1 can always be folded to MVN
457   if (Opcode == Instruction::Xor && Imm.isAllOnes())
458     return 0;
459 
460   // Ensures negative constant of min(max()) or max(min()) patterns that
461   // match to SSAT instructions don't get hoisted
462   if (Inst && ((ST->hasV6Ops() && !ST->isThumb()) || ST->isThumb2()) &&
463       Ty->getIntegerBitWidth() <= 32) {
464     if (isSSATMinMaxPattern(Inst, Imm) ||
465         (isa<ICmpInst>(Inst) && Inst->hasOneUse() &&
466          isSSATMinMaxPattern(cast<Instruction>(*Inst->user_begin()), Imm)))
467       return 0;
468   }
469 
470   if (Inst && ST->hasVFP2Base() && isFPSatMinMaxPattern(Inst, Imm))
471     return 0;
472 
473   // We can convert <= -1 to < 0, which is generally quite cheap.
474   if (Inst && Opcode == Instruction::ICmp && Idx == 1 && Imm.isAllOnes()) {
475     ICmpInst::Predicate Pred = cast<ICmpInst>(Inst)->getPredicate();
476     if (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SLE)
477       return std::min(getIntImmCost(Imm, Ty, CostKind),
478                       getIntImmCost(Imm + 1, Ty, CostKind));
479   }
480 
481   return getIntImmCost(Imm, Ty, CostKind);
482 }
483 
484 InstructionCost ARMTTIImpl::getCFInstrCost(unsigned Opcode,
485                                            TTI::TargetCostKind CostKind,
486                                            const Instruction *I) {
487   if (CostKind == TTI::TCK_RecipThroughput &&
488       (ST->hasNEON() || ST->hasMVEIntegerOps())) {
489     // FIXME: The vectorizer is highly sensistive to the cost of these
490     // instructions, which suggests that it may be using the costs incorrectly.
491     // But, for now, just make them free to avoid performance regressions for
492     // vector targets.
493     return 0;
494   }
495   return BaseT::getCFInstrCost(Opcode, CostKind, I);
496 }
497 
498 InstructionCost ARMTTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst,
499                                              Type *Src,
500                                              TTI::CastContextHint CCH,
501                                              TTI::TargetCostKind CostKind,
502                                              const Instruction *I) {
503   int ISD = TLI->InstructionOpcodeToISD(Opcode);
504   assert(ISD && "Invalid opcode");
505 
506   // TODO: Allow non-throughput costs that aren't binary.
507   auto AdjustCost = [&CostKind](InstructionCost Cost) -> InstructionCost {
508     if (CostKind != TTI::TCK_RecipThroughput)
509       return Cost == 0 ? 0 : 1;
510     return Cost;
511   };
512   auto IsLegalFPType = [this](EVT VT) {
513     EVT EltVT = VT.getScalarType();
514     return (EltVT == MVT::f32 && ST->hasVFP2Base()) ||
515             (EltVT == MVT::f64 && ST->hasFP64()) ||
516             (EltVT == MVT::f16 && ST->hasFullFP16());
517   };
518 
519   EVT SrcTy = TLI->getValueType(DL, Src);
520   EVT DstTy = TLI->getValueType(DL, Dst);
521 
522   if (!SrcTy.isSimple() || !DstTy.isSimple())
523     return AdjustCost(
524         BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I));
525 
526   // Extending masked load/Truncating masked stores is expensive because we
527   // currently don't split them. This means that we'll likely end up
528   // loading/storing each element individually (hence the high cost).
529   if ((ST->hasMVEIntegerOps() &&
530        (Opcode == Instruction::Trunc || Opcode == Instruction::ZExt ||
531         Opcode == Instruction::SExt)) ||
532       (ST->hasMVEFloatOps() &&
533        (Opcode == Instruction::FPExt || Opcode == Instruction::FPTrunc) &&
534        IsLegalFPType(SrcTy) && IsLegalFPType(DstTy)))
535     if (CCH == TTI::CastContextHint::Masked && DstTy.getSizeInBits() > 128)
536       return 2 * DstTy.getVectorNumElements() *
537              ST->getMVEVectorCostFactor(CostKind);
538 
539   // The extend of other kinds of load is free
540   if (CCH == TTI::CastContextHint::Normal ||
541       CCH == TTI::CastContextHint::Masked) {
542     static const TypeConversionCostTblEntry LoadConversionTbl[] = {
543         {ISD::SIGN_EXTEND, MVT::i32, MVT::i16, 0},
544         {ISD::ZERO_EXTEND, MVT::i32, MVT::i16, 0},
545         {ISD::SIGN_EXTEND, MVT::i32, MVT::i8, 0},
546         {ISD::ZERO_EXTEND, MVT::i32, MVT::i8, 0},
547         {ISD::SIGN_EXTEND, MVT::i16, MVT::i8, 0},
548         {ISD::ZERO_EXTEND, MVT::i16, MVT::i8, 0},
549         {ISD::SIGN_EXTEND, MVT::i64, MVT::i32, 1},
550         {ISD::ZERO_EXTEND, MVT::i64, MVT::i32, 1},
551         {ISD::SIGN_EXTEND, MVT::i64, MVT::i16, 1},
552         {ISD::ZERO_EXTEND, MVT::i64, MVT::i16, 1},
553         {ISD::SIGN_EXTEND, MVT::i64, MVT::i8, 1},
554         {ISD::ZERO_EXTEND, MVT::i64, MVT::i8, 1},
555     };
556     if (const auto *Entry = ConvertCostTableLookup(
557             LoadConversionTbl, ISD, DstTy.getSimpleVT(), SrcTy.getSimpleVT()))
558       return AdjustCost(Entry->Cost);
559 
560     static const TypeConversionCostTblEntry MVELoadConversionTbl[] = {
561         {ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i16, 0},
562         {ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i16, 0},
563         {ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i8, 0},
564         {ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i8, 0},
565         {ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i8, 0},
566         {ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i8, 0},
567         // The following extend from a legal type to an illegal type, so need to
568         // split the load. This introduced an extra load operation, but the
569         // extend is still "free".
570         {ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 1},
571         {ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 1},
572         {ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 3},
573         {ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 3},
574         {ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 1},
575         {ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 1},
576     };
577     if (SrcTy.isVector() && ST->hasMVEIntegerOps()) {
578       if (const auto *Entry =
579               ConvertCostTableLookup(MVELoadConversionTbl, ISD,
580                                      DstTy.getSimpleVT(), SrcTy.getSimpleVT()))
581         return Entry->Cost * ST->getMVEVectorCostFactor(CostKind);
582     }
583 
584     static const TypeConversionCostTblEntry MVEFLoadConversionTbl[] = {
585         // FPExtends are similar but also require the VCVT instructions.
586         {ISD::FP_EXTEND, MVT::v4f32, MVT::v4f16, 1},
587         {ISD::FP_EXTEND, MVT::v8f32, MVT::v8f16, 3},
588     };
589     if (SrcTy.isVector() && ST->hasMVEFloatOps()) {
590       if (const auto *Entry =
591               ConvertCostTableLookup(MVEFLoadConversionTbl, ISD,
592                                      DstTy.getSimpleVT(), SrcTy.getSimpleVT()))
593         return Entry->Cost * ST->getMVEVectorCostFactor(CostKind);
594     }
595 
596     // The truncate of a store is free. This is the mirror of extends above.
597     static const TypeConversionCostTblEntry MVEStoreConversionTbl[] = {
598         {ISD::TRUNCATE, MVT::v4i32, MVT::v4i16, 0},
599         {ISD::TRUNCATE, MVT::v4i32, MVT::v4i8, 0},
600         {ISD::TRUNCATE, MVT::v8i16, MVT::v8i8, 0},
601         {ISD::TRUNCATE, MVT::v8i32, MVT::v8i16, 1},
602         {ISD::TRUNCATE, MVT::v8i32, MVT::v8i8, 1},
603         {ISD::TRUNCATE, MVT::v16i32, MVT::v16i8, 3},
604         {ISD::TRUNCATE, MVT::v16i16, MVT::v16i8, 1},
605     };
606     if (SrcTy.isVector() && ST->hasMVEIntegerOps()) {
607       if (const auto *Entry =
608               ConvertCostTableLookup(MVEStoreConversionTbl, ISD,
609                                      SrcTy.getSimpleVT(), DstTy.getSimpleVT()))
610         return Entry->Cost * ST->getMVEVectorCostFactor(CostKind);
611     }
612 
613     static const TypeConversionCostTblEntry MVEFStoreConversionTbl[] = {
614         {ISD::FP_ROUND, MVT::v4f32, MVT::v4f16, 1},
615         {ISD::FP_ROUND, MVT::v8f32, MVT::v8f16, 3},
616     };
617     if (SrcTy.isVector() && ST->hasMVEFloatOps()) {
618       if (const auto *Entry =
619               ConvertCostTableLookup(MVEFStoreConversionTbl, ISD,
620                                      SrcTy.getSimpleVT(), DstTy.getSimpleVT()))
621         return Entry->Cost * ST->getMVEVectorCostFactor(CostKind);
622     }
623   }
624 
625   // NEON vector operations that can extend their inputs.
626   if ((ISD == ISD::SIGN_EXTEND || ISD == ISD::ZERO_EXTEND) &&
627       I && I->hasOneUse() && ST->hasNEON() && SrcTy.isVector()) {
628     static const TypeConversionCostTblEntry NEONDoubleWidthTbl[] = {
629       // vaddl
630       { ISD::ADD, MVT::v4i32, MVT::v4i16, 0 },
631       { ISD::ADD, MVT::v8i16, MVT::v8i8,  0 },
632       // vsubl
633       { ISD::SUB, MVT::v4i32, MVT::v4i16, 0 },
634       { ISD::SUB, MVT::v8i16, MVT::v8i8,  0 },
635       // vmull
636       { ISD::MUL, MVT::v4i32, MVT::v4i16, 0 },
637       { ISD::MUL, MVT::v8i16, MVT::v8i8,  0 },
638       // vshll
639       { ISD::SHL, MVT::v4i32, MVT::v4i16, 0 },
640       { ISD::SHL, MVT::v8i16, MVT::v8i8,  0 },
641     };
642 
643     auto *User = cast<Instruction>(*I->user_begin());
644     int UserISD = TLI->InstructionOpcodeToISD(User->getOpcode());
645     if (auto *Entry = ConvertCostTableLookup(NEONDoubleWidthTbl, UserISD,
646                                              DstTy.getSimpleVT(),
647                                              SrcTy.getSimpleVT())) {
648       return AdjustCost(Entry->Cost);
649     }
650   }
651 
652   // Single to/from double precision conversions.
653   if (Src->isVectorTy() && ST->hasNEON() &&
654       ((ISD == ISD::FP_ROUND && SrcTy.getScalarType() == MVT::f64 &&
655         DstTy.getScalarType() == MVT::f32) ||
656        (ISD == ISD::FP_EXTEND && SrcTy.getScalarType() == MVT::f32 &&
657         DstTy.getScalarType() == MVT::f64))) {
658     static const CostTblEntry NEONFltDblTbl[] = {
659         // Vector fptrunc/fpext conversions.
660         {ISD::FP_ROUND, MVT::v2f64, 2},
661         {ISD::FP_EXTEND, MVT::v2f32, 2},
662         {ISD::FP_EXTEND, MVT::v4f32, 4}};
663 
664     std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Src);
665     if (const auto *Entry = CostTableLookup(NEONFltDblTbl, ISD, LT.second))
666       return AdjustCost(LT.first * Entry->Cost);
667   }
668 
669   // Some arithmetic, load and store operations have specific instructions
670   // to cast up/down their types automatically at no extra cost.
671   // TODO: Get these tables to know at least what the related operations are.
672   static const TypeConversionCostTblEntry NEONVectorConversionTbl[] = {
673     { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i16, 1 },
674     { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i16, 1 },
675     { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i32, 1 },
676     { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i32, 1 },
677     { ISD::TRUNCATE,    MVT::v4i32, MVT::v4i64, 0 },
678     { ISD::TRUNCATE,    MVT::v4i16, MVT::v4i32, 1 },
679 
680     // The number of vmovl instructions for the extension.
681     { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i8,  1 },
682     { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i8,  1 },
683     { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i8,  2 },
684     { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i8,  2 },
685     { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i8,  3 },
686     { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i8,  3 },
687     { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i16, 2 },
688     { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i16, 2 },
689     { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 3 },
690     { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 },
691     { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 3 },
692     { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 3 },
693     { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i8, 7 },
694     { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i8, 7 },
695     { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i16, 6 },
696     { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i16, 6 },
697     { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 6 },
698     { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 6 },
699 
700     // Operations that we legalize using splitting.
701     { ISD::TRUNCATE,    MVT::v16i8, MVT::v16i32, 6 },
702     { ISD::TRUNCATE,    MVT::v8i8, MVT::v8i32, 3 },
703 
704     // Vector float <-> i32 conversions.
705     { ISD::SINT_TO_FP,  MVT::v4f32, MVT::v4i32, 1 },
706     { ISD::UINT_TO_FP,  MVT::v4f32, MVT::v4i32, 1 },
707 
708     { ISD::SINT_TO_FP,  MVT::v2f32, MVT::v2i8, 3 },
709     { ISD::UINT_TO_FP,  MVT::v2f32, MVT::v2i8, 3 },
710     { ISD::SINT_TO_FP,  MVT::v2f32, MVT::v2i16, 2 },
711     { ISD::UINT_TO_FP,  MVT::v2f32, MVT::v2i16, 2 },
712     { ISD::SINT_TO_FP,  MVT::v2f32, MVT::v2i32, 1 },
713     { ISD::UINT_TO_FP,  MVT::v2f32, MVT::v2i32, 1 },
714     { ISD::SINT_TO_FP,  MVT::v4f32, MVT::v4i1, 3 },
715     { ISD::UINT_TO_FP,  MVT::v4f32, MVT::v4i1, 3 },
716     { ISD::SINT_TO_FP,  MVT::v4f32, MVT::v4i8, 3 },
717     { ISD::UINT_TO_FP,  MVT::v4f32, MVT::v4i8, 3 },
718     { ISD::SINT_TO_FP,  MVT::v4f32, MVT::v4i16, 2 },
719     { ISD::UINT_TO_FP,  MVT::v4f32, MVT::v4i16, 2 },
720     { ISD::SINT_TO_FP,  MVT::v8f32, MVT::v8i16, 4 },
721     { ISD::UINT_TO_FP,  MVT::v8f32, MVT::v8i16, 4 },
722     { ISD::SINT_TO_FP,  MVT::v8f32, MVT::v8i32, 2 },
723     { ISD::UINT_TO_FP,  MVT::v8f32, MVT::v8i32, 2 },
724     { ISD::SINT_TO_FP,  MVT::v16f32, MVT::v16i16, 8 },
725     { ISD::UINT_TO_FP,  MVT::v16f32, MVT::v16i16, 8 },
726     { ISD::SINT_TO_FP,  MVT::v16f32, MVT::v16i32, 4 },
727     { ISD::UINT_TO_FP,  MVT::v16f32, MVT::v16i32, 4 },
728 
729     { ISD::FP_TO_SINT,  MVT::v4i32, MVT::v4f32, 1 },
730     { ISD::FP_TO_UINT,  MVT::v4i32, MVT::v4f32, 1 },
731     { ISD::FP_TO_SINT,  MVT::v4i8, MVT::v4f32, 3 },
732     { ISD::FP_TO_UINT,  MVT::v4i8, MVT::v4f32, 3 },
733     { ISD::FP_TO_SINT,  MVT::v4i16, MVT::v4f32, 2 },
734     { ISD::FP_TO_UINT,  MVT::v4i16, MVT::v4f32, 2 },
735 
736     // Vector double <-> i32 conversions.
737     { ISD::SINT_TO_FP,  MVT::v2f64, MVT::v2i32, 2 },
738     { ISD::UINT_TO_FP,  MVT::v2f64, MVT::v2i32, 2 },
739 
740     { ISD::SINT_TO_FP,  MVT::v2f64, MVT::v2i8, 4 },
741     { ISD::UINT_TO_FP,  MVT::v2f64, MVT::v2i8, 4 },
742     { ISD::SINT_TO_FP,  MVT::v2f64, MVT::v2i16, 3 },
743     { ISD::UINT_TO_FP,  MVT::v2f64, MVT::v2i16, 3 },
744     { ISD::SINT_TO_FP,  MVT::v2f64, MVT::v2i32, 2 },
745     { ISD::UINT_TO_FP,  MVT::v2f64, MVT::v2i32, 2 },
746 
747     { ISD::FP_TO_SINT,  MVT::v2i32, MVT::v2f64, 2 },
748     { ISD::FP_TO_UINT,  MVT::v2i32, MVT::v2f64, 2 },
749     { ISD::FP_TO_SINT,  MVT::v8i16, MVT::v8f32, 4 },
750     { ISD::FP_TO_UINT,  MVT::v8i16, MVT::v8f32, 4 },
751     { ISD::FP_TO_SINT,  MVT::v16i16, MVT::v16f32, 8 },
752     { ISD::FP_TO_UINT,  MVT::v16i16, MVT::v16f32, 8 }
753   };
754 
755   if (SrcTy.isVector() && ST->hasNEON()) {
756     if (const auto *Entry = ConvertCostTableLookup(NEONVectorConversionTbl, ISD,
757                                                    DstTy.getSimpleVT(),
758                                                    SrcTy.getSimpleVT()))
759       return AdjustCost(Entry->Cost);
760   }
761 
762   // Scalar float to integer conversions.
763   static const TypeConversionCostTblEntry NEONFloatConversionTbl[] = {
764     { ISD::FP_TO_SINT,  MVT::i1, MVT::f32, 2 },
765     { ISD::FP_TO_UINT,  MVT::i1, MVT::f32, 2 },
766     { ISD::FP_TO_SINT,  MVT::i1, MVT::f64, 2 },
767     { ISD::FP_TO_UINT,  MVT::i1, MVT::f64, 2 },
768     { ISD::FP_TO_SINT,  MVT::i8, MVT::f32, 2 },
769     { ISD::FP_TO_UINT,  MVT::i8, MVT::f32, 2 },
770     { ISD::FP_TO_SINT,  MVT::i8, MVT::f64, 2 },
771     { ISD::FP_TO_UINT,  MVT::i8, MVT::f64, 2 },
772     { ISD::FP_TO_SINT,  MVT::i16, MVT::f32, 2 },
773     { ISD::FP_TO_UINT,  MVT::i16, MVT::f32, 2 },
774     { ISD::FP_TO_SINT,  MVT::i16, MVT::f64, 2 },
775     { ISD::FP_TO_UINT,  MVT::i16, MVT::f64, 2 },
776     { ISD::FP_TO_SINT,  MVT::i32, MVT::f32, 2 },
777     { ISD::FP_TO_UINT,  MVT::i32, MVT::f32, 2 },
778     { ISD::FP_TO_SINT,  MVT::i32, MVT::f64, 2 },
779     { ISD::FP_TO_UINT,  MVT::i32, MVT::f64, 2 },
780     { ISD::FP_TO_SINT,  MVT::i64, MVT::f32, 10 },
781     { ISD::FP_TO_UINT,  MVT::i64, MVT::f32, 10 },
782     { ISD::FP_TO_SINT,  MVT::i64, MVT::f64, 10 },
783     { ISD::FP_TO_UINT,  MVT::i64, MVT::f64, 10 }
784   };
785   if (SrcTy.isFloatingPoint() && ST->hasNEON()) {
786     if (const auto *Entry = ConvertCostTableLookup(NEONFloatConversionTbl, ISD,
787                                                    DstTy.getSimpleVT(),
788                                                    SrcTy.getSimpleVT()))
789       return AdjustCost(Entry->Cost);
790   }
791 
792   // Scalar integer to float conversions.
793   static const TypeConversionCostTblEntry NEONIntegerConversionTbl[] = {
794     { ISD::SINT_TO_FP,  MVT::f32, MVT::i1, 2 },
795     { ISD::UINT_TO_FP,  MVT::f32, MVT::i1, 2 },
796     { ISD::SINT_TO_FP,  MVT::f64, MVT::i1, 2 },
797     { ISD::UINT_TO_FP,  MVT::f64, MVT::i1, 2 },
798     { ISD::SINT_TO_FP,  MVT::f32, MVT::i8, 2 },
799     { ISD::UINT_TO_FP,  MVT::f32, MVT::i8, 2 },
800     { ISD::SINT_TO_FP,  MVT::f64, MVT::i8, 2 },
801     { ISD::UINT_TO_FP,  MVT::f64, MVT::i8, 2 },
802     { ISD::SINT_TO_FP,  MVT::f32, MVT::i16, 2 },
803     { ISD::UINT_TO_FP,  MVT::f32, MVT::i16, 2 },
804     { ISD::SINT_TO_FP,  MVT::f64, MVT::i16, 2 },
805     { ISD::UINT_TO_FP,  MVT::f64, MVT::i16, 2 },
806     { ISD::SINT_TO_FP,  MVT::f32, MVT::i32, 2 },
807     { ISD::UINT_TO_FP,  MVT::f32, MVT::i32, 2 },
808     { ISD::SINT_TO_FP,  MVT::f64, MVT::i32, 2 },
809     { ISD::UINT_TO_FP,  MVT::f64, MVT::i32, 2 },
810     { ISD::SINT_TO_FP,  MVT::f32, MVT::i64, 10 },
811     { ISD::UINT_TO_FP,  MVT::f32, MVT::i64, 10 },
812     { ISD::SINT_TO_FP,  MVT::f64, MVT::i64, 10 },
813     { ISD::UINT_TO_FP,  MVT::f64, MVT::i64, 10 }
814   };
815 
816   if (SrcTy.isInteger() && ST->hasNEON()) {
817     if (const auto *Entry = ConvertCostTableLookup(NEONIntegerConversionTbl,
818                                                    ISD, DstTy.getSimpleVT(),
819                                                    SrcTy.getSimpleVT()))
820       return AdjustCost(Entry->Cost);
821   }
822 
823   // MVE extend costs, taken from codegen tests. i8->i16 or i16->i32 is one
824   // instruction, i8->i32 is two. i64 zexts are an VAND with a constant, sext
825   // are linearised so take more.
826   static const TypeConversionCostTblEntry MVEVectorConversionTbl[] = {
827     { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i8, 1 },
828     { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i8, 1 },
829     { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i8, 2 },
830     { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i8, 2 },
831     { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i8, 10 },
832     { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i8, 2 },
833     { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i16, 1 },
834     { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i16, 1 },
835     { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i16, 10 },
836     { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i16, 2 },
837     { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i32, 8 },
838     { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i32, 2 },
839   };
840 
841   if (SrcTy.isVector() && ST->hasMVEIntegerOps()) {
842     if (const auto *Entry = ConvertCostTableLookup(MVEVectorConversionTbl,
843                                                    ISD, DstTy.getSimpleVT(),
844                                                    SrcTy.getSimpleVT()))
845       return Entry->Cost * ST->getMVEVectorCostFactor(CostKind);
846   }
847 
848   if (ISD == ISD::FP_ROUND || ISD == ISD::FP_EXTEND) {
849     // As general rule, fp converts that were not matched above are scalarized
850     // and cost 1 vcvt for each lane, so long as the instruction is available.
851     // If not it will become a series of function calls.
852     const InstructionCost CallCost =
853         getCallInstrCost(nullptr, Dst, {Src}, CostKind);
854     int Lanes = 1;
855     if (SrcTy.isFixedLengthVector())
856       Lanes = SrcTy.getVectorNumElements();
857 
858     if (IsLegalFPType(SrcTy) && IsLegalFPType(DstTy))
859       return Lanes;
860     else
861       return Lanes * CallCost;
862   }
863 
864   if (ISD == ISD::TRUNCATE && ST->hasMVEIntegerOps() &&
865       SrcTy.isFixedLengthVector()) {
866     // Treat a truncate with larger than legal source (128bits for MVE) as
867     // expensive, 2 instructions per lane.
868     if ((SrcTy.getScalarType() == MVT::i8 ||
869          SrcTy.getScalarType() == MVT::i16 ||
870          SrcTy.getScalarType() == MVT::i32) &&
871         SrcTy.getSizeInBits() > 128 &&
872         SrcTy.getSizeInBits() > DstTy.getSizeInBits())
873       return SrcTy.getVectorNumElements() * 2;
874   }
875 
876   // Scalar integer conversion costs.
877   static const TypeConversionCostTblEntry ARMIntegerConversionTbl[] = {
878     // i16 -> i64 requires two dependent operations.
879     { ISD::SIGN_EXTEND, MVT::i64, MVT::i16, 2 },
880 
881     // Truncates on i64 are assumed to be free.
882     { ISD::TRUNCATE,    MVT::i32, MVT::i64, 0 },
883     { ISD::TRUNCATE,    MVT::i16, MVT::i64, 0 },
884     { ISD::TRUNCATE,    MVT::i8,  MVT::i64, 0 },
885     { ISD::TRUNCATE,    MVT::i1,  MVT::i64, 0 }
886   };
887 
888   if (SrcTy.isInteger()) {
889     if (const auto *Entry = ConvertCostTableLookup(ARMIntegerConversionTbl, ISD,
890                                                    DstTy.getSimpleVT(),
891                                                    SrcTy.getSimpleVT()))
892       return AdjustCost(Entry->Cost);
893   }
894 
895   int BaseCost = ST->hasMVEIntegerOps() && Src->isVectorTy()
896                      ? ST->getMVEVectorCostFactor(CostKind)
897                      : 1;
898   return AdjustCost(
899       BaseCost * BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I));
900 }
901 
902 InstructionCost ARMTTIImpl::getVectorInstrCost(unsigned Opcode, Type *ValTy,
903                                                TTI::TargetCostKind CostKind,
904                                                unsigned Index, Value *Op0,
905                                                Value *Op1) {
906   // Penalize inserting into an D-subregister. We end up with a three times
907   // lower estimated throughput on swift.
908   if (ST->hasSlowLoadDSubregister() && Opcode == Instruction::InsertElement &&
909       ValTy->isVectorTy() && ValTy->getScalarSizeInBits() <= 32)
910     return 3;
911 
912   if (ST->hasNEON() && (Opcode == Instruction::InsertElement ||
913                         Opcode == Instruction::ExtractElement)) {
914     // Cross-class copies are expensive on many microarchitectures,
915     // so assume they are expensive by default.
916     if (cast<VectorType>(ValTy)->getElementType()->isIntegerTy())
917       return 3;
918 
919     // Even if it's not a cross class copy, this likely leads to mixing
920     // of NEON and VFP code and should be therefore penalized.
921     if (ValTy->isVectorTy() &&
922         ValTy->getScalarSizeInBits() <= 32)
923       return std::max<InstructionCost>(
924           BaseT::getVectorInstrCost(Opcode, ValTy, CostKind, Index, Op0, Op1),
925           2U);
926   }
927 
928   if (ST->hasMVEIntegerOps() && (Opcode == Instruction::InsertElement ||
929                                  Opcode == Instruction::ExtractElement)) {
930     // Integer cross-lane moves are more expensive than float, which can
931     // sometimes just be vmovs. Integer involve being passes to GPR registers,
932     // causing more of a delay.
933     std::pair<InstructionCost, MVT> LT =
934         getTypeLegalizationCost(ValTy->getScalarType());
935     return LT.first * (ValTy->getScalarType()->isIntegerTy() ? 4 : 1);
936   }
937 
938   return BaseT::getVectorInstrCost(Opcode, ValTy, CostKind, Index, Op0, Op1);
939 }
940 
941 InstructionCost ARMTTIImpl::getCmpSelInstrCost(
942     unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred,
943     TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info,
944     TTI::OperandValueInfo Op2Info, const Instruction *I) {
945   int ISD = TLI->InstructionOpcodeToISD(Opcode);
946 
947   // Thumb scalar code size cost for select.
948   if (CostKind == TTI::TCK_CodeSize && ISD == ISD::SELECT &&
949       ST->isThumb() && !ValTy->isVectorTy()) {
950     // Assume expensive structs.
951     if (TLI->getValueType(DL, ValTy, true) == MVT::Other)
952       return TTI::TCC_Expensive;
953 
954     // Select costs can vary because they:
955     // - may require one or more conditional mov (including an IT),
956     // - can't operate directly on immediates,
957     // - require live flags, which we can't copy around easily.
958     InstructionCost Cost = getTypeLegalizationCost(ValTy).first;
959 
960     // Possible IT instruction for Thumb2, or more for Thumb1.
961     ++Cost;
962 
963     // i1 values may need rematerialising by using mov immediates and/or
964     // flag setting instructions.
965     if (ValTy->isIntegerTy(1))
966       ++Cost;
967 
968     return Cost;
969   }
970 
971   // If this is a vector min/max/abs, use the cost of that intrinsic directly
972   // instead. Hopefully when min/max intrinsics are more prevalent this code
973   // will not be needed.
974   const Instruction *Sel = I;
975   if ((Opcode == Instruction::ICmp || Opcode == Instruction::FCmp) && Sel &&
976       Sel->hasOneUse())
977     Sel = cast<Instruction>(Sel->user_back());
978   if (Sel && ValTy->isVectorTy() &&
979       (ValTy->isIntOrIntVectorTy() || ValTy->isFPOrFPVectorTy())) {
980     const Value *LHS, *RHS;
981     SelectPatternFlavor SPF = matchSelectPattern(Sel, LHS, RHS).Flavor;
982     unsigned IID = 0;
983     switch (SPF) {
984     case SPF_ABS:
985       IID = Intrinsic::abs;
986       break;
987     case SPF_SMIN:
988       IID = Intrinsic::smin;
989       break;
990     case SPF_SMAX:
991       IID = Intrinsic::smax;
992       break;
993     case SPF_UMIN:
994       IID = Intrinsic::umin;
995       break;
996     case SPF_UMAX:
997       IID = Intrinsic::umax;
998       break;
999     case SPF_FMINNUM:
1000       IID = Intrinsic::minnum;
1001       break;
1002     case SPF_FMAXNUM:
1003       IID = Intrinsic::maxnum;
1004       break;
1005     default:
1006       break;
1007     }
1008     if (IID) {
1009       // The ICmp is free, the select gets the cost of the min/max/etc
1010       if (Sel != I)
1011         return 0;
1012       IntrinsicCostAttributes CostAttrs(IID, ValTy, {ValTy, ValTy});
1013       return getIntrinsicInstrCost(CostAttrs, CostKind);
1014     }
1015   }
1016 
1017   // On NEON a vector select gets lowered to vbsl.
1018   if (ST->hasNEON() && ValTy->isVectorTy() && ISD == ISD::SELECT && CondTy) {
1019     // Lowering of some vector selects is currently far from perfect.
1020     static const TypeConversionCostTblEntry NEONVectorSelectTbl[] = {
1021       { ISD::SELECT, MVT::v4i1, MVT::v4i64, 4*4 + 1*2 + 1 },
1022       { ISD::SELECT, MVT::v8i1, MVT::v8i64, 50 },
1023       { ISD::SELECT, MVT::v16i1, MVT::v16i64, 100 }
1024     };
1025 
1026     EVT SelCondTy = TLI->getValueType(DL, CondTy);
1027     EVT SelValTy = TLI->getValueType(DL, ValTy);
1028     if (SelCondTy.isSimple() && SelValTy.isSimple()) {
1029       if (const auto *Entry = ConvertCostTableLookup(NEONVectorSelectTbl, ISD,
1030                                                      SelCondTy.getSimpleVT(),
1031                                                      SelValTy.getSimpleVT()))
1032         return Entry->Cost;
1033     }
1034 
1035     std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(ValTy);
1036     return LT.first;
1037   }
1038 
1039   if (ST->hasMVEIntegerOps() && ValTy->isVectorTy() &&
1040       (Opcode == Instruction::ICmp || Opcode == Instruction::FCmp) &&
1041       cast<FixedVectorType>(ValTy)->getNumElements() > 1) {
1042     FixedVectorType *VecValTy = cast<FixedVectorType>(ValTy);
1043     FixedVectorType *VecCondTy = dyn_cast_or_null<FixedVectorType>(CondTy);
1044     if (!VecCondTy)
1045       VecCondTy = cast<FixedVectorType>(CmpInst::makeCmpResultType(VecValTy));
1046 
1047     // If we don't have mve.fp any fp operations will need to be scalarized.
1048     if (Opcode == Instruction::FCmp && !ST->hasMVEFloatOps()) {
1049       // One scalaization insert, one scalarization extract and the cost of the
1050       // fcmps.
1051       return BaseT::getScalarizationOverhead(VecValTy, /*Insert*/ false,
1052                                              /*Extract*/ true, CostKind) +
1053              BaseT::getScalarizationOverhead(VecCondTy, /*Insert*/ true,
1054                                              /*Extract*/ false, CostKind) +
1055              VecValTy->getNumElements() *
1056                  getCmpSelInstrCost(Opcode, ValTy->getScalarType(),
1057                                     VecCondTy->getScalarType(), VecPred,
1058                                     CostKind, Op1Info, Op2Info, I);
1059     }
1060 
1061     std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(ValTy);
1062     int BaseCost = ST->getMVEVectorCostFactor(CostKind);
1063     // There are two types - the input that specifies the type of the compare
1064     // and the output vXi1 type. Because we don't know how the output will be
1065     // split, we may need an expensive shuffle to get two in sync. This has the
1066     // effect of making larger than legal compares (v8i32 for example)
1067     // expensive.
1068     if (LT.second.isVector() && LT.second.getVectorNumElements() > 2) {
1069       if (LT.first > 1)
1070         return LT.first * BaseCost +
1071                BaseT::getScalarizationOverhead(VecCondTy, /*Insert*/ true,
1072                                                /*Extract*/ false, CostKind);
1073       return BaseCost;
1074     }
1075   }
1076 
1077   // Default to cheap (throughput/size of 1 instruction) but adjust throughput
1078   // for "multiple beats" potentially needed by MVE instructions.
1079   int BaseCost = 1;
1080   if (ST->hasMVEIntegerOps() && ValTy->isVectorTy())
1081     BaseCost = ST->getMVEVectorCostFactor(CostKind);
1082 
1083   return BaseCost * BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred,
1084                                               CostKind, Op1Info, Op2Info, I);
1085 }
1086 
1087 InstructionCost ARMTTIImpl::getAddressComputationCost(Type *Ty,
1088                                                       ScalarEvolution *SE,
1089                                                       const SCEV *Ptr) {
1090   // Address computations in vectorized code with non-consecutive addresses will
1091   // likely result in more instructions compared to scalar code where the
1092   // computation can more often be merged into the index mode. The resulting
1093   // extra micro-ops can significantly decrease throughput.
1094   unsigned NumVectorInstToHideOverhead = 10;
1095   int MaxMergeDistance = 64;
1096 
1097   if (ST->hasNEON()) {
1098     if (Ty->isVectorTy() && SE &&
1099         !BaseT::isConstantStridedAccessLessThan(SE, Ptr, MaxMergeDistance + 1))
1100       return NumVectorInstToHideOverhead;
1101 
1102     // In many cases the address computation is not merged into the instruction
1103     // addressing mode.
1104     return 1;
1105   }
1106   return BaseT::getAddressComputationCost(Ty, SE, Ptr);
1107 }
1108 
1109 bool ARMTTIImpl::isProfitableLSRChainElement(Instruction *I) {
1110   if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
1111     // If a VCTP is part of a chain, it's already profitable and shouldn't be
1112     // optimized, else LSR may block tail-predication.
1113     switch (II->getIntrinsicID()) {
1114     case Intrinsic::arm_mve_vctp8:
1115     case Intrinsic::arm_mve_vctp16:
1116     case Intrinsic::arm_mve_vctp32:
1117     case Intrinsic::arm_mve_vctp64:
1118       return true;
1119     default:
1120       break;
1121     }
1122   }
1123   return false;
1124 }
1125 
1126 bool ARMTTIImpl::isLegalMaskedLoad(Type *DataTy, Align Alignment) {
1127   if (!EnableMaskedLoadStores || !ST->hasMVEIntegerOps())
1128     return false;
1129 
1130   if (auto *VecTy = dyn_cast<FixedVectorType>(DataTy)) {
1131     // Don't support v2i1 yet.
1132     if (VecTy->getNumElements() == 2)
1133       return false;
1134 
1135     // We don't support extending fp types.
1136      unsigned VecWidth = DataTy->getPrimitiveSizeInBits();
1137     if (VecWidth != 128 && VecTy->getElementType()->isFloatingPointTy())
1138       return false;
1139   }
1140 
1141   unsigned EltWidth = DataTy->getScalarSizeInBits();
1142   return (EltWidth == 32 && Alignment >= 4) ||
1143          (EltWidth == 16 && Alignment >= 2) || (EltWidth == 8);
1144 }
1145 
1146 bool ARMTTIImpl::isLegalMaskedGather(Type *Ty, Align Alignment) {
1147   if (!EnableMaskedGatherScatters || !ST->hasMVEIntegerOps())
1148     return false;
1149 
1150   unsigned EltWidth = Ty->getScalarSizeInBits();
1151   return ((EltWidth == 32 && Alignment >= 4) ||
1152           (EltWidth == 16 && Alignment >= 2) || EltWidth == 8);
1153 }
1154 
1155 /// Given a memcpy/memset/memmove instruction, return the number of memory
1156 /// operations performed, via querying findOptimalMemOpLowering. Returns -1 if a
1157 /// call is used.
1158 int ARMTTIImpl::getNumMemOps(const IntrinsicInst *I) const {
1159   MemOp MOp;
1160   unsigned DstAddrSpace = ~0u;
1161   unsigned SrcAddrSpace = ~0u;
1162   const Function *F = I->getParent()->getParent();
1163 
1164   if (const auto *MC = dyn_cast<MemTransferInst>(I)) {
1165     ConstantInt *C = dyn_cast<ConstantInt>(MC->getLength());
1166     // If 'size' is not a constant, a library call will be generated.
1167     if (!C)
1168       return -1;
1169 
1170     const unsigned Size = C->getValue().getZExtValue();
1171     const Align DstAlign = *MC->getDestAlign();
1172     const Align SrcAlign = *MC->getSourceAlign();
1173 
1174     MOp = MemOp::Copy(Size, /*DstAlignCanChange*/ false, DstAlign, SrcAlign,
1175                       /*IsVolatile*/ false);
1176     DstAddrSpace = MC->getDestAddressSpace();
1177     SrcAddrSpace = MC->getSourceAddressSpace();
1178   }
1179   else if (const auto *MS = dyn_cast<MemSetInst>(I)) {
1180     ConstantInt *C = dyn_cast<ConstantInt>(MS->getLength());
1181     // If 'size' is not a constant, a library call will be generated.
1182     if (!C)
1183       return -1;
1184 
1185     const unsigned Size = C->getValue().getZExtValue();
1186     const Align DstAlign = *MS->getDestAlign();
1187 
1188     MOp = MemOp::Set(Size, /*DstAlignCanChange*/ false, DstAlign,
1189                      /*IsZeroMemset*/ false, /*IsVolatile*/ false);
1190     DstAddrSpace = MS->getDestAddressSpace();
1191   }
1192   else
1193     llvm_unreachable("Expected a memcpy/move or memset!");
1194 
1195   unsigned Limit, Factor = 2;
1196   switch(I->getIntrinsicID()) {
1197     case Intrinsic::memcpy:
1198       Limit = TLI->getMaxStoresPerMemcpy(F->hasMinSize());
1199       break;
1200     case Intrinsic::memmove:
1201       Limit = TLI->getMaxStoresPerMemmove(F->hasMinSize());
1202       break;
1203     case Intrinsic::memset:
1204       Limit = TLI->getMaxStoresPerMemset(F->hasMinSize());
1205       Factor = 1;
1206       break;
1207     default:
1208       llvm_unreachable("Expected a memcpy/move or memset!");
1209   }
1210 
1211   // MemOps will be poplulated with a list of data types that needs to be
1212   // loaded and stored. That's why we multiply the number of elements by 2 to
1213   // get the cost for this memcpy.
1214   std::vector<EVT> MemOps;
1215   if (getTLI()->findOptimalMemOpLowering(
1216           MemOps, Limit, MOp, DstAddrSpace,
1217           SrcAddrSpace, F->getAttributes()))
1218     return MemOps.size() * Factor;
1219 
1220   // If we can't find an optimal memop lowering, return the default cost
1221   return -1;
1222 }
1223 
1224 InstructionCost ARMTTIImpl::getMemcpyCost(const Instruction *I) {
1225   int NumOps = getNumMemOps(cast<IntrinsicInst>(I));
1226 
1227   // To model the cost of a library call, we assume 1 for the call, and
1228   // 3 for the argument setup.
1229   if (NumOps == -1)
1230     return 4;
1231   return NumOps;
1232 }
1233 
1234 InstructionCost ARMTTIImpl::getShuffleCost(TTI::ShuffleKind Kind,
1235                                            VectorType *Tp, ArrayRef<int> Mask,
1236                                            TTI::TargetCostKind CostKind,
1237                                            int Index, VectorType *SubTp,
1238                                            ArrayRef<const Value *> Args,
1239                                            const Instruction *CxtI) {
1240   Kind = improveShuffleKindFromMask(Kind, Mask, Tp, Index, SubTp);
1241   // Treat extractsubvector as single op permutation.
1242   bool IsExtractSubvector = Kind == TTI::SK_ExtractSubvector;
1243   if (IsExtractSubvector)
1244     Kind = TTI::SK_PermuteSingleSrc;
1245   if (ST->hasNEON()) {
1246     if (Kind == TTI::SK_Broadcast) {
1247       static const CostTblEntry NEONDupTbl[] = {
1248           // VDUP handles these cases.
1249           {ISD::VECTOR_SHUFFLE, MVT::v2i32, 1},
1250           {ISD::VECTOR_SHUFFLE, MVT::v2f32, 1},
1251           {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1},
1252           {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1},
1253           {ISD::VECTOR_SHUFFLE, MVT::v4i16, 1},
1254           {ISD::VECTOR_SHUFFLE, MVT::v8i8, 1},
1255 
1256           {ISD::VECTOR_SHUFFLE, MVT::v4i32, 1},
1257           {ISD::VECTOR_SHUFFLE, MVT::v4f32, 1},
1258           {ISD::VECTOR_SHUFFLE, MVT::v8i16, 1},
1259           {ISD::VECTOR_SHUFFLE, MVT::v16i8, 1}};
1260 
1261       std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Tp);
1262       if (const auto *Entry =
1263               CostTableLookup(NEONDupTbl, ISD::VECTOR_SHUFFLE, LT.second))
1264         return LT.first * Entry->Cost;
1265     }
1266     if (Kind == TTI::SK_Reverse) {
1267       static const CostTblEntry NEONShuffleTbl[] = {
1268           // Reverse shuffle cost one instruction if we are shuffling within a
1269           // double word (vrev) or two if we shuffle a quad word (vrev, vext).
1270           {ISD::VECTOR_SHUFFLE, MVT::v2i32, 1},
1271           {ISD::VECTOR_SHUFFLE, MVT::v2f32, 1},
1272           {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1},
1273           {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1},
1274           {ISD::VECTOR_SHUFFLE, MVT::v4i16, 1},
1275           {ISD::VECTOR_SHUFFLE, MVT::v8i8, 1},
1276 
1277           {ISD::VECTOR_SHUFFLE, MVT::v4i32, 2},
1278           {ISD::VECTOR_SHUFFLE, MVT::v4f32, 2},
1279           {ISD::VECTOR_SHUFFLE, MVT::v8i16, 2},
1280           {ISD::VECTOR_SHUFFLE, MVT::v16i8, 2}};
1281 
1282       std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Tp);
1283       if (const auto *Entry =
1284               CostTableLookup(NEONShuffleTbl, ISD::VECTOR_SHUFFLE, LT.second))
1285         return LT.first * Entry->Cost;
1286     }
1287     if (Kind == TTI::SK_Select) {
1288       static const CostTblEntry NEONSelShuffleTbl[] = {
1289           // Select shuffle cost table for ARM. Cost is the number of
1290           // instructions
1291           // required to create the shuffled vector.
1292 
1293           {ISD::VECTOR_SHUFFLE, MVT::v2f32, 1},
1294           {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1},
1295           {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1},
1296           {ISD::VECTOR_SHUFFLE, MVT::v2i32, 1},
1297 
1298           {ISD::VECTOR_SHUFFLE, MVT::v4i32, 2},
1299           {ISD::VECTOR_SHUFFLE, MVT::v4f32, 2},
1300           {ISD::VECTOR_SHUFFLE, MVT::v4i16, 2},
1301 
1302           {ISD::VECTOR_SHUFFLE, MVT::v8i16, 16},
1303 
1304           {ISD::VECTOR_SHUFFLE, MVT::v16i8, 32}};
1305 
1306       std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Tp);
1307       if (const auto *Entry = CostTableLookup(NEONSelShuffleTbl,
1308                                               ISD::VECTOR_SHUFFLE, LT.second))
1309         return LT.first * Entry->Cost;
1310     }
1311   }
1312   if (ST->hasMVEIntegerOps()) {
1313     if (Kind == TTI::SK_Broadcast) {
1314       static const CostTblEntry MVEDupTbl[] = {
1315           // VDUP handles these cases.
1316           {ISD::VECTOR_SHUFFLE, MVT::v4i32, 1},
1317           {ISD::VECTOR_SHUFFLE, MVT::v8i16, 1},
1318           {ISD::VECTOR_SHUFFLE, MVT::v16i8, 1},
1319           {ISD::VECTOR_SHUFFLE, MVT::v4f32, 1},
1320           {ISD::VECTOR_SHUFFLE, MVT::v8f16, 1}};
1321 
1322       std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Tp);
1323       if (const auto *Entry = CostTableLookup(MVEDupTbl, ISD::VECTOR_SHUFFLE,
1324                                               LT.second))
1325         return LT.first * Entry->Cost *
1326                ST->getMVEVectorCostFactor(TTI::TCK_RecipThroughput);
1327     }
1328 
1329     if (!Mask.empty()) {
1330       std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Tp);
1331       if (LT.second.isVector() &&
1332           Mask.size() <= LT.second.getVectorNumElements() &&
1333           (isVREVMask(Mask, LT.second, 16) || isVREVMask(Mask, LT.second, 32) ||
1334            isVREVMask(Mask, LT.second, 64)))
1335         return ST->getMVEVectorCostFactor(TTI::TCK_RecipThroughput) * LT.first;
1336     }
1337   }
1338 
1339   // Restore optimal kind.
1340   if (IsExtractSubvector)
1341     Kind = TTI::SK_ExtractSubvector;
1342   int BaseCost = ST->hasMVEIntegerOps() && Tp->isVectorTy()
1343                      ? ST->getMVEVectorCostFactor(TTI::TCK_RecipThroughput)
1344                      : 1;
1345   return BaseCost *
1346          BaseT::getShuffleCost(Kind, Tp, Mask, CostKind, Index, SubTp);
1347 }
1348 
1349 InstructionCost ARMTTIImpl::getArithmeticInstrCost(
1350     unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind,
1351     TTI::OperandValueInfo Op1Info, TTI::OperandValueInfo Op2Info,
1352     ArrayRef<const Value *> Args,
1353     const Instruction *CxtI) {
1354   int ISDOpcode = TLI->InstructionOpcodeToISD(Opcode);
1355   if (ST->isThumb() && CostKind == TTI::TCK_CodeSize && Ty->isIntegerTy(1)) {
1356     // Make operations on i1 relatively expensive as this often involves
1357     // combining predicates. AND and XOR should be easier to handle with IT
1358     // blocks.
1359     switch (ISDOpcode) {
1360     default:
1361       break;
1362     case ISD::AND:
1363     case ISD::XOR:
1364       return 2;
1365     case ISD::OR:
1366       return 3;
1367     }
1368   }
1369 
1370   std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Ty);
1371 
1372   if (ST->hasNEON()) {
1373     const unsigned FunctionCallDivCost = 20;
1374     const unsigned ReciprocalDivCost = 10;
1375     static const CostTblEntry CostTbl[] = {
1376       // Division.
1377       // These costs are somewhat random. Choose a cost of 20 to indicate that
1378       // vectorizing devision (added function call) is going to be very expensive.
1379       // Double registers types.
1380       { ISD::SDIV, MVT::v1i64, 1 * FunctionCallDivCost},
1381       { ISD::UDIV, MVT::v1i64, 1 * FunctionCallDivCost},
1382       { ISD::SREM, MVT::v1i64, 1 * FunctionCallDivCost},
1383       { ISD::UREM, MVT::v1i64, 1 * FunctionCallDivCost},
1384       { ISD::SDIV, MVT::v2i32, 2 * FunctionCallDivCost},
1385       { ISD::UDIV, MVT::v2i32, 2 * FunctionCallDivCost},
1386       { ISD::SREM, MVT::v2i32, 2 * FunctionCallDivCost},
1387       { ISD::UREM, MVT::v2i32, 2 * FunctionCallDivCost},
1388       { ISD::SDIV, MVT::v4i16,     ReciprocalDivCost},
1389       { ISD::UDIV, MVT::v4i16,     ReciprocalDivCost},
1390       { ISD::SREM, MVT::v4i16, 4 * FunctionCallDivCost},
1391       { ISD::UREM, MVT::v4i16, 4 * FunctionCallDivCost},
1392       { ISD::SDIV, MVT::v8i8,      ReciprocalDivCost},
1393       { ISD::UDIV, MVT::v8i8,      ReciprocalDivCost},
1394       { ISD::SREM, MVT::v8i8,  8 * FunctionCallDivCost},
1395       { ISD::UREM, MVT::v8i8,  8 * FunctionCallDivCost},
1396       // Quad register types.
1397       { ISD::SDIV, MVT::v2i64, 2 * FunctionCallDivCost},
1398       { ISD::UDIV, MVT::v2i64, 2 * FunctionCallDivCost},
1399       { ISD::SREM, MVT::v2i64, 2 * FunctionCallDivCost},
1400       { ISD::UREM, MVT::v2i64, 2 * FunctionCallDivCost},
1401       { ISD::SDIV, MVT::v4i32, 4 * FunctionCallDivCost},
1402       { ISD::UDIV, MVT::v4i32, 4 * FunctionCallDivCost},
1403       { ISD::SREM, MVT::v4i32, 4 * FunctionCallDivCost},
1404       { ISD::UREM, MVT::v4i32, 4 * FunctionCallDivCost},
1405       { ISD::SDIV, MVT::v8i16, 8 * FunctionCallDivCost},
1406       { ISD::UDIV, MVT::v8i16, 8 * FunctionCallDivCost},
1407       { ISD::SREM, MVT::v8i16, 8 * FunctionCallDivCost},
1408       { ISD::UREM, MVT::v8i16, 8 * FunctionCallDivCost},
1409       { ISD::SDIV, MVT::v16i8, 16 * FunctionCallDivCost},
1410       { ISD::UDIV, MVT::v16i8, 16 * FunctionCallDivCost},
1411       { ISD::SREM, MVT::v16i8, 16 * FunctionCallDivCost},
1412       { ISD::UREM, MVT::v16i8, 16 * FunctionCallDivCost},
1413       // Multiplication.
1414     };
1415 
1416     if (const auto *Entry = CostTableLookup(CostTbl, ISDOpcode, LT.second))
1417       return LT.first * Entry->Cost;
1418 
1419     InstructionCost Cost = BaseT::getArithmeticInstrCost(
1420         Opcode, Ty, CostKind, Op1Info, Op2Info);
1421 
1422     // This is somewhat of a hack. The problem that we are facing is that SROA
1423     // creates a sequence of shift, and, or instructions to construct values.
1424     // These sequences are recognized by the ISel and have zero-cost. Not so for
1425     // the vectorized code. Because we have support for v2i64 but not i64 those
1426     // sequences look particularly beneficial to vectorize.
1427     // To work around this we increase the cost of v2i64 operations to make them
1428     // seem less beneficial.
1429     if (LT.second == MVT::v2i64 && Op2Info.isUniform() && Op2Info.isConstant())
1430       Cost += 4;
1431 
1432     return Cost;
1433   }
1434 
1435   // If this operation is a shift on arm/thumb2, it might well be folded into
1436   // the following instruction, hence having a cost of 0.
1437   auto LooksLikeAFreeShift = [&]() {
1438     if (ST->isThumb1Only() || Ty->isVectorTy())
1439       return false;
1440 
1441     if (!CxtI || !CxtI->hasOneUse() || !CxtI->isShift())
1442       return false;
1443     if (!Op2Info.isUniform() || !Op2Info.isConstant())
1444       return false;
1445 
1446     // Folded into a ADC/ADD/AND/BIC/CMP/EOR/MVN/ORR/ORN/RSB/SBC/SUB
1447     switch (cast<Instruction>(CxtI->user_back())->getOpcode()) {
1448     case Instruction::Add:
1449     case Instruction::Sub:
1450     case Instruction::And:
1451     case Instruction::Xor:
1452     case Instruction::Or:
1453     case Instruction::ICmp:
1454       return true;
1455     default:
1456       return false;
1457     }
1458   };
1459   if (LooksLikeAFreeShift())
1460     return 0;
1461 
1462   // Default to cheap (throughput/size of 1 instruction) but adjust throughput
1463   // for "multiple beats" potentially needed by MVE instructions.
1464   int BaseCost = 1;
1465   if (ST->hasMVEIntegerOps() && Ty->isVectorTy())
1466     BaseCost = ST->getMVEVectorCostFactor(CostKind);
1467 
1468   // The rest of this mostly follows what is done in BaseT::getArithmeticInstrCost,
1469   // without treating floats as more expensive that scalars or increasing the
1470   // costs for custom operations. The results is also multiplied by the
1471   // MVEVectorCostFactor where appropriate.
1472   if (TLI->isOperationLegalOrCustomOrPromote(ISDOpcode, LT.second))
1473     return LT.first * BaseCost;
1474 
1475   // Else this is expand, assume that we need to scalarize this op.
1476   if (auto *VTy = dyn_cast<FixedVectorType>(Ty)) {
1477     unsigned Num = VTy->getNumElements();
1478     InstructionCost Cost =
1479         getArithmeticInstrCost(Opcode, Ty->getScalarType(), CostKind);
1480     // Return the cost of multiple scalar invocation plus the cost of
1481     // inserting and extracting the values.
1482     SmallVector<Type *> Tys(Args.size(), Ty);
1483     return BaseT::getScalarizationOverhead(VTy, Args, Tys, CostKind) +
1484            Num * Cost;
1485   }
1486 
1487   return BaseCost;
1488 }
1489 
1490 InstructionCost ARMTTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src,
1491                                             MaybeAlign Alignment,
1492                                             unsigned AddressSpace,
1493                                             TTI::TargetCostKind CostKind,
1494                                             TTI::OperandValueInfo OpInfo,
1495                                             const Instruction *I) {
1496   // TODO: Handle other cost kinds.
1497   if (CostKind != TTI::TCK_RecipThroughput)
1498     return 1;
1499 
1500   // Type legalization can't handle structs
1501   if (TLI->getValueType(DL, Src, true) == MVT::Other)
1502     return BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
1503                                   CostKind);
1504 
1505   if (ST->hasNEON() && Src->isVectorTy() &&
1506       (Alignment && *Alignment != Align(16)) &&
1507       cast<VectorType>(Src)->getElementType()->isDoubleTy()) {
1508     // Unaligned loads/stores are extremely inefficient.
1509     // We need 4 uops for vst.1/vld.1 vs 1uop for vldr/vstr.
1510     std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Src);
1511     return LT.first * 4;
1512   }
1513 
1514   // MVE can optimize a fpext(load(4xhalf)) using an extending integer load.
1515   // Same for stores.
1516   if (ST->hasMVEFloatOps() && isa<FixedVectorType>(Src) && I &&
1517       ((Opcode == Instruction::Load && I->hasOneUse() &&
1518         isa<FPExtInst>(*I->user_begin())) ||
1519        (Opcode == Instruction::Store && isa<FPTruncInst>(I->getOperand(0))))) {
1520     FixedVectorType *SrcVTy = cast<FixedVectorType>(Src);
1521     Type *DstTy =
1522         Opcode == Instruction::Load
1523             ? (*I->user_begin())->getType()
1524             : cast<Instruction>(I->getOperand(0))->getOperand(0)->getType();
1525     if (SrcVTy->getNumElements() == 4 && SrcVTy->getScalarType()->isHalfTy() &&
1526         DstTy->getScalarType()->isFloatTy())
1527       return ST->getMVEVectorCostFactor(CostKind);
1528   }
1529 
1530   int BaseCost = ST->hasMVEIntegerOps() && Src->isVectorTy()
1531                      ? ST->getMVEVectorCostFactor(CostKind)
1532                      : 1;
1533   return BaseCost * BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
1534                                            CostKind, OpInfo, I);
1535 }
1536 
1537 InstructionCost
1538 ARMTTIImpl::getMaskedMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment,
1539                                   unsigned AddressSpace,
1540                                   TTI::TargetCostKind CostKind) {
1541   if (ST->hasMVEIntegerOps()) {
1542     if (Opcode == Instruction::Load && isLegalMaskedLoad(Src, Alignment))
1543       return ST->getMVEVectorCostFactor(CostKind);
1544     if (Opcode == Instruction::Store && isLegalMaskedStore(Src, Alignment))
1545       return ST->getMVEVectorCostFactor(CostKind);
1546   }
1547   if (!isa<FixedVectorType>(Src))
1548     return BaseT::getMaskedMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
1549                                         CostKind);
1550   // Scalar cost, which is currently very high due to the efficiency of the
1551   // generated code.
1552   return cast<FixedVectorType>(Src)->getNumElements() * 8;
1553 }
1554 
1555 InstructionCost ARMTTIImpl::getInterleavedMemoryOpCost(
1556     unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
1557     Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind,
1558     bool UseMaskForCond, bool UseMaskForGaps) {
1559   assert(Factor >= 2 && "Invalid interleave factor");
1560   assert(isa<VectorType>(VecTy) && "Expect a vector type");
1561 
1562   // vldN/vstN doesn't support vector types of i64/f64 element.
1563   bool EltIs64Bits = DL.getTypeSizeInBits(VecTy->getScalarType()) == 64;
1564 
1565   if (Factor <= TLI->getMaxSupportedInterleaveFactor() && !EltIs64Bits &&
1566       !UseMaskForCond && !UseMaskForGaps) {
1567     unsigned NumElts = cast<FixedVectorType>(VecTy)->getNumElements();
1568     auto *SubVecTy =
1569         FixedVectorType::get(VecTy->getScalarType(), NumElts / Factor);
1570 
1571     // vldN/vstN only support legal vector types of size 64 or 128 in bits.
1572     // Accesses having vector types that are a multiple of 128 bits can be
1573     // matched to more than one vldN/vstN instruction.
1574     int BaseCost =
1575         ST->hasMVEIntegerOps() ? ST->getMVEVectorCostFactor(CostKind) : 1;
1576     if (NumElts % Factor == 0 &&
1577         TLI->isLegalInterleavedAccessType(Factor, SubVecTy, Alignment, DL))
1578       return Factor * BaseCost * TLI->getNumInterleavedAccesses(SubVecTy, DL);
1579 
1580     // Some smaller than legal interleaved patterns are cheap as we can make
1581     // use of the vmovn or vrev patterns to interleave a standard load. This is
1582     // true for v4i8, v8i8 and v4i16 at least (but not for v4f16 as it is
1583     // promoted differently). The cost of 2 here is then a load and vrev or
1584     // vmovn.
1585     if (ST->hasMVEIntegerOps() && Factor == 2 && NumElts / Factor > 2 &&
1586         VecTy->isIntOrIntVectorTy() &&
1587         DL.getTypeSizeInBits(SubVecTy).getFixedValue() <= 64)
1588       return 2 * BaseCost;
1589   }
1590 
1591   return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
1592                                            Alignment, AddressSpace, CostKind,
1593                                            UseMaskForCond, UseMaskForGaps);
1594 }
1595 
1596 InstructionCost ARMTTIImpl::getGatherScatterOpCost(
1597     unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask,
1598     Align Alignment, TTI::TargetCostKind CostKind, const Instruction *I) {
1599   using namespace PatternMatch;
1600   if (!ST->hasMVEIntegerOps() || !EnableMaskedGatherScatters)
1601     return BaseT::getGatherScatterOpCost(Opcode, DataTy, Ptr, VariableMask,
1602                                          Alignment, CostKind, I);
1603 
1604   assert(DataTy->isVectorTy() && "Can't do gather/scatters on scalar!");
1605   auto *VTy = cast<FixedVectorType>(DataTy);
1606 
1607   // TODO: Splitting, once we do that.
1608 
1609   unsigned NumElems = VTy->getNumElements();
1610   unsigned EltSize = VTy->getScalarSizeInBits();
1611   std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(DataTy);
1612 
1613   // For now, it is assumed that for the MVE gather instructions the loads are
1614   // all effectively serialised. This means the cost is the scalar cost
1615   // multiplied by the number of elements being loaded. This is possibly very
1616   // conservative, but even so we still end up vectorising loops because the
1617   // cost per iteration for many loops is lower than for scalar loops.
1618   InstructionCost VectorCost =
1619       NumElems * LT.first * ST->getMVEVectorCostFactor(CostKind);
1620   // The scalarization cost should be a lot higher. We use the number of vector
1621   // elements plus the scalarization overhead. If masking is required then a lot
1622   // of little blocks will be needed and potentially a scalarized p0 mask,
1623   // greatly increasing the cost.
1624   InstructionCost ScalarCost =
1625       NumElems * LT.first + (VariableMask ? NumElems * 5 : 0) +
1626       BaseT::getScalarizationOverhead(VTy, /*Insert*/ true, /*Extract*/ false,
1627                                       CostKind) +
1628       BaseT::getScalarizationOverhead(VTy, /*Insert*/ false, /*Extract*/ true,
1629                                       CostKind);
1630 
1631   if (EltSize < 8 || Alignment < EltSize / 8)
1632     return ScalarCost;
1633 
1634   unsigned ExtSize = EltSize;
1635   // Check whether there's a single user that asks for an extended type
1636   if (I != nullptr) {
1637     // Dependent of the caller of this function, a gather instruction will
1638     // either have opcode Instruction::Load or be a call to the masked_gather
1639     // intrinsic
1640     if ((I->getOpcode() == Instruction::Load ||
1641          match(I, m_Intrinsic<Intrinsic::masked_gather>())) &&
1642         I->hasOneUse()) {
1643       const User *Us = *I->users().begin();
1644       if (isa<ZExtInst>(Us) || isa<SExtInst>(Us)) {
1645         // only allow valid type combinations
1646         unsigned TypeSize =
1647             cast<Instruction>(Us)->getType()->getScalarSizeInBits();
1648         if (((TypeSize == 32 && (EltSize == 8 || EltSize == 16)) ||
1649              (TypeSize == 16 && EltSize == 8)) &&
1650             TypeSize * NumElems == 128) {
1651           ExtSize = TypeSize;
1652         }
1653       }
1654     }
1655     // Check whether the input data needs to be truncated
1656     TruncInst *T;
1657     if ((I->getOpcode() == Instruction::Store ||
1658          match(I, m_Intrinsic<Intrinsic::masked_scatter>())) &&
1659         (T = dyn_cast<TruncInst>(I->getOperand(0)))) {
1660       // Only allow valid type combinations
1661       unsigned TypeSize = T->getOperand(0)->getType()->getScalarSizeInBits();
1662       if (((EltSize == 16 && TypeSize == 32) ||
1663            (EltSize == 8 && (TypeSize == 32 || TypeSize == 16))) &&
1664           TypeSize * NumElems == 128)
1665         ExtSize = TypeSize;
1666     }
1667   }
1668 
1669   if (ExtSize * NumElems != 128 || NumElems < 4)
1670     return ScalarCost;
1671 
1672   // Any (aligned) i32 gather will not need to be scalarised.
1673   if (ExtSize == 32)
1674     return VectorCost;
1675   // For smaller types, we need to ensure that the gep's inputs are correctly
1676   // extended from a small enough value. Other sizes (including i64) are
1677   // scalarized for now.
1678   if (ExtSize != 8 && ExtSize != 16)
1679     return ScalarCost;
1680 
1681   if (const auto *BC = dyn_cast<BitCastInst>(Ptr))
1682     Ptr = BC->getOperand(0);
1683   if (const auto *GEP = dyn_cast<GetElementPtrInst>(Ptr)) {
1684     if (GEP->getNumOperands() != 2)
1685       return ScalarCost;
1686     unsigned Scale = DL.getTypeAllocSize(GEP->getResultElementType());
1687     // Scale needs to be correct (which is only relevant for i16s).
1688     if (Scale != 1 && Scale * 8 != ExtSize)
1689       return ScalarCost;
1690     // And we need to zext (not sext) the indexes from a small enough type.
1691     if (const auto *ZExt = dyn_cast<ZExtInst>(GEP->getOperand(1))) {
1692       if (ZExt->getOperand(0)->getType()->getScalarSizeInBits() <= ExtSize)
1693         return VectorCost;
1694     }
1695     return ScalarCost;
1696   }
1697   return ScalarCost;
1698 }
1699 
1700 InstructionCost
1701 ARMTTIImpl::getArithmeticReductionCost(unsigned Opcode, VectorType *ValTy,
1702                                        std::optional<FastMathFlags> FMF,
1703                                        TTI::TargetCostKind CostKind) {
1704 
1705   EVT ValVT = TLI->getValueType(DL, ValTy);
1706   int ISD = TLI->InstructionOpcodeToISD(Opcode);
1707   unsigned EltSize = ValVT.getScalarSizeInBits();
1708 
1709   // In general floating point reductions are a series of elementwise
1710   // operations, with free extracts on each step. These are either in-order or
1711   // treewise depending on whether that is allowed by the fast math flags.
1712   if ((ISD == ISD::FADD || ISD == ISD::FMUL) &&
1713       ((EltSize == 32 && ST->hasVFP2Base()) ||
1714        (EltSize == 64 && ST->hasFP64()) ||
1715        (EltSize == 16 && ST->hasFullFP16()))) {
1716     unsigned NumElts = cast<FixedVectorType>(ValTy)->getNumElements();
1717     unsigned VecLimit = ST->hasMVEFloatOps() ? 128 : (ST->hasNEON() ? 64 : -1);
1718     InstructionCost VecCost = 0;
1719     while (!TTI::requiresOrderedReduction(FMF) && isPowerOf2_32(NumElts) &&
1720            NumElts * EltSize > VecLimit) {
1721       Type *VecTy = FixedVectorType::get(ValTy->getElementType(), NumElts / 2);
1722       VecCost += getArithmeticInstrCost(Opcode, VecTy, CostKind);
1723       NumElts /= 2;
1724     }
1725 
1726     // For fp16 we need to extract the upper lane elements. MVE can add a
1727     // VREV+FMIN/MAX to perform another vector step instead.
1728     InstructionCost ExtractCost = 0;
1729     if (!TTI::requiresOrderedReduction(FMF) && ST->hasMVEFloatOps() &&
1730         ValVT.getVectorElementType() == MVT::f16 && NumElts == 8) {
1731       VecCost += ST->getMVEVectorCostFactor(CostKind) * 2;
1732       NumElts /= 2;
1733     } else if (ValVT.getVectorElementType() == MVT::f16)
1734       ExtractCost = NumElts / 2;
1735 
1736     return VecCost + ExtractCost +
1737            NumElts *
1738                getArithmeticInstrCost(Opcode, ValTy->getElementType(), CostKind);
1739   }
1740 
1741   if ((ISD == ISD::AND || ISD == ISD::OR || ISD == ISD::XOR) &&
1742       (EltSize == 64 || EltSize == 32 || EltSize == 16 || EltSize == 8)) {
1743     unsigned NumElts = cast<FixedVectorType>(ValTy)->getNumElements();
1744     unsigned VecLimit =
1745         ST->hasMVEIntegerOps() ? 128 : (ST->hasNEON() ? 64 : -1);
1746     InstructionCost VecCost = 0;
1747     while (isPowerOf2_32(NumElts) && NumElts * EltSize > VecLimit) {
1748       Type *VecTy = FixedVectorType::get(ValTy->getElementType(), NumElts / 2);
1749       VecCost += getArithmeticInstrCost(Opcode, VecTy, CostKind);
1750       NumElts /= 2;
1751     }
1752     // For i16/i8, MVE will perform a VREV + VORR/VAND/VEOR for the 64bit vector
1753     // step.
1754     if (ST->hasMVEIntegerOps() && ValVT.getScalarSizeInBits() <= 16 &&
1755         NumElts * EltSize == 64) {
1756       Type *VecTy = FixedVectorType::get(ValTy->getElementType(), NumElts);
1757       VecCost += ST->getMVEVectorCostFactor(CostKind) +
1758                  getArithmeticInstrCost(Opcode, VecTy, CostKind);
1759       NumElts /= 2;
1760     }
1761 
1762     // From here we extract the elements and perform the and/or/xor.
1763     InstructionCost ExtractCost = NumElts;
1764     return VecCost + ExtractCost +
1765            (NumElts - 1) * getArithmeticInstrCost(
1766                                Opcode, ValTy->getElementType(), CostKind);
1767   }
1768 
1769   if (!ST->hasMVEIntegerOps() || !ValVT.isSimple() || ISD != ISD::ADD ||
1770       TTI::requiresOrderedReduction(FMF))
1771     return BaseT::getArithmeticReductionCost(Opcode, ValTy, FMF, CostKind);
1772 
1773   std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(ValTy);
1774 
1775   static const CostTblEntry CostTblAdd[]{
1776       {ISD::ADD, MVT::v16i8, 1},
1777       {ISD::ADD, MVT::v8i16, 1},
1778       {ISD::ADD, MVT::v4i32, 1},
1779   };
1780   if (const auto *Entry = CostTableLookup(CostTblAdd, ISD, LT.second))
1781     return Entry->Cost * ST->getMVEVectorCostFactor(CostKind) * LT.first;
1782 
1783   return BaseT::getArithmeticReductionCost(Opcode, ValTy, FMF, CostKind);
1784 }
1785 
1786 InstructionCost ARMTTIImpl::getExtendedReductionCost(
1787     unsigned Opcode, bool IsUnsigned, Type *ResTy, VectorType *ValTy,
1788     FastMathFlags FMF, TTI::TargetCostKind CostKind) {
1789   EVT ValVT = TLI->getValueType(DL, ValTy);
1790   EVT ResVT = TLI->getValueType(DL, ResTy);
1791 
1792   int ISD = TLI->InstructionOpcodeToISD(Opcode);
1793 
1794   switch (ISD) {
1795   case ISD::ADD:
1796     if (ST->hasMVEIntegerOps() && ValVT.isSimple() && ResVT.isSimple()) {
1797       std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(ValTy);
1798 
1799       // The legal cases are:
1800       //   VADDV u/s 8/16/32
1801       //   VADDLV u/s 32
1802       // Codegen currently cannot always handle larger than legal vectors very
1803       // well, especially for predicated reductions where the mask needs to be
1804       // split, so restrict to 128bit or smaller input types.
1805       unsigned RevVTSize = ResVT.getSizeInBits();
1806       if (ValVT.getSizeInBits() <= 128 &&
1807           ((LT.second == MVT::v16i8 && RevVTSize <= 32) ||
1808            (LT.second == MVT::v8i16 && RevVTSize <= 32) ||
1809            (LT.second == MVT::v4i32 && RevVTSize <= 64)))
1810         return ST->getMVEVectorCostFactor(CostKind) * LT.first;
1811     }
1812     break;
1813   default:
1814     break;
1815   }
1816   return BaseT::getExtendedReductionCost(Opcode, IsUnsigned, ResTy, ValTy, FMF,
1817                                          CostKind);
1818 }
1819 
1820 InstructionCost
1821 ARMTTIImpl::getMulAccReductionCost(bool IsUnsigned, Type *ResTy,
1822                                    VectorType *ValTy,
1823                                    TTI::TargetCostKind CostKind) {
1824   EVT ValVT = TLI->getValueType(DL, ValTy);
1825   EVT ResVT = TLI->getValueType(DL, ResTy);
1826 
1827   if (ST->hasMVEIntegerOps() && ValVT.isSimple() && ResVT.isSimple()) {
1828     std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(ValTy);
1829 
1830     // The legal cases are:
1831     //   VMLAV u/s 8/16/32
1832     //   VMLALV u/s 16/32
1833     // Codegen currently cannot always handle larger than legal vectors very
1834     // well, especially for predicated reductions where the mask needs to be
1835     // split, so restrict to 128bit or smaller input types.
1836     unsigned RevVTSize = ResVT.getSizeInBits();
1837     if (ValVT.getSizeInBits() <= 128 &&
1838         ((LT.second == MVT::v16i8 && RevVTSize <= 32) ||
1839          (LT.second == MVT::v8i16 && RevVTSize <= 64) ||
1840          (LT.second == MVT::v4i32 && RevVTSize <= 64)))
1841       return ST->getMVEVectorCostFactor(CostKind) * LT.first;
1842   }
1843 
1844   return BaseT::getMulAccReductionCost(IsUnsigned, ResTy, ValTy, CostKind);
1845 }
1846 
1847 InstructionCost
1848 ARMTTIImpl::getMinMaxReductionCost(Intrinsic::ID IID, VectorType *Ty,
1849                                    FastMathFlags FMF,
1850                                    TTI::TargetCostKind CostKind) {
1851   EVT ValVT = TLI->getValueType(DL, Ty);
1852 
1853   // In general floating point reductions are a series of elementwise
1854   // operations, with free extracts on each step. These are either in-order or
1855   // treewise depending on whether that is allowed by the fast math flags.
1856   if ((IID == Intrinsic::minnum || IID == Intrinsic::maxnum) &&
1857       ((ValVT.getVectorElementType() == MVT::f32 && ST->hasVFP2Base()) ||
1858        (ValVT.getVectorElementType() == MVT::f64 && ST->hasFP64()) ||
1859        (ValVT.getVectorElementType() == MVT::f16 && ST->hasFullFP16()))) {
1860     unsigned NumElts = cast<FixedVectorType>(Ty)->getNumElements();
1861     unsigned EltSize = ValVT.getScalarSizeInBits();
1862     unsigned VecLimit = ST->hasMVEFloatOps() ? 128 : (ST->hasNEON() ? 64 : -1);
1863     InstructionCost VecCost;
1864     while (isPowerOf2_32(NumElts) && NumElts * EltSize > VecLimit) {
1865       Type *VecTy = FixedVectorType::get(Ty->getElementType(), NumElts/2);
1866       IntrinsicCostAttributes ICA(IID, VecTy, {VecTy, VecTy}, FMF);
1867       VecCost += getIntrinsicInstrCost(ICA, CostKind);
1868       NumElts /= 2;
1869     }
1870 
1871     // For fp16 we need to extract the upper lane elements. MVE can add a
1872     // VREV+FMIN/MAX to perform another vector step instead.
1873     InstructionCost ExtractCost = 0;
1874     if (ST->hasMVEFloatOps() && ValVT.getVectorElementType() == MVT::f16 &&
1875         NumElts == 8) {
1876       VecCost += ST->getMVEVectorCostFactor(CostKind) * 2;
1877       NumElts /= 2;
1878     } else if (ValVT.getVectorElementType() == MVT::f16)
1879       ExtractCost = cast<FixedVectorType>(Ty)->getNumElements() / 2;
1880 
1881     IntrinsicCostAttributes ICA(IID, Ty->getElementType(),
1882                                 {Ty->getElementType(), Ty->getElementType()},
1883                                 FMF);
1884     return VecCost + ExtractCost +
1885            (NumElts - 1) * getIntrinsicInstrCost(ICA, CostKind);
1886   }
1887 
1888   if (IID == Intrinsic::smin || IID == Intrinsic::smax ||
1889       IID == Intrinsic::umin || IID == Intrinsic::umax) {
1890     std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Ty);
1891 
1892     // All costs are the same for u/s min/max.  These lower to vminv, which are
1893     // given a slightly higher cost as they tend to take multiple cycles for
1894     // smaller type sizes.
1895     static const CostTblEntry CostTblAdd[]{
1896         {ISD::SMIN, MVT::v16i8, 4},
1897         {ISD::SMIN, MVT::v8i16, 3},
1898         {ISD::SMIN, MVT::v4i32, 2},
1899     };
1900     if (const auto *Entry = CostTableLookup(CostTblAdd, ISD::SMIN, LT.second))
1901       return Entry->Cost * ST->getMVEVectorCostFactor(CostKind) * LT.first;
1902   }
1903 
1904   return BaseT::getMinMaxReductionCost(IID, Ty, FMF, CostKind);
1905 }
1906 
1907 InstructionCost
1908 ARMTTIImpl::getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
1909                                   TTI::TargetCostKind CostKind) {
1910   unsigned Opc = ICA.getID();
1911   switch (Opc) {
1912   case Intrinsic::get_active_lane_mask:
1913     // Currently we make a somewhat optimistic assumption that
1914     // active_lane_mask's are always free. In reality it may be freely folded
1915     // into a tail predicated loop, expanded into a VCPT or expanded into a lot
1916     // of add/icmp code. We may need to improve this in the future, but being
1917     // able to detect if it is free or not involves looking at a lot of other
1918     // code. We currently assume that the vectorizer inserted these, and knew
1919     // what it was doing in adding one.
1920     if (ST->hasMVEIntegerOps())
1921       return 0;
1922     break;
1923   case Intrinsic::sadd_sat:
1924   case Intrinsic::ssub_sat:
1925   case Intrinsic::uadd_sat:
1926   case Intrinsic::usub_sat: {
1927     bool IsAdd = (Opc == Intrinsic::sadd_sat || Opc == Intrinsic::ssub_sat);
1928     bool IsSigned = (Opc == Intrinsic::sadd_sat || Opc == Intrinsic::ssub_sat);
1929     Type *RetTy = ICA.getReturnType();
1930 
1931     if (auto *ITy = dyn_cast<IntegerType>(RetTy)) {
1932       if (IsSigned && ST->hasDSP() && ITy->getBitWidth() == 32)
1933         return 1; // qadd / qsub
1934       if (ST->hasDSP() && (ITy->getBitWidth() == 8 || ITy->getBitWidth() == 16))
1935         return 2; // uqadd16 / qadd16 / uqsub16 / qsub16 + possible extend.
1936       // Otherwise return the cost of expanding the node. Generally an add +
1937       // icmp + sel.
1938       CmpInst::Predicate Pred = CmpInst::ICMP_SGT;
1939       Type *CondTy = RetTy->getWithNewBitWidth(1);
1940       return getArithmeticInstrCost(IsAdd ? Instruction::Add : Instruction::Sub,
1941                                     RetTy, CostKind) +
1942              2 * getCmpSelInstrCost(BinaryOperator::ICmp, RetTy, CondTy, Pred,
1943                                     CostKind) +
1944              2 * getCmpSelInstrCost(BinaryOperator::Select, RetTy, CondTy, Pred,
1945                                     CostKind);
1946     }
1947 
1948     if (!ST->hasMVEIntegerOps())
1949       break;
1950 
1951     std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(RetTy);
1952     if (LT.second == MVT::v4i32 || LT.second == MVT::v8i16 ||
1953         LT.second == MVT::v16i8) {
1954       // This is a base cost of 1 for the vqadd, plus 3 extract shifts if we
1955       // need to extend the type, as it uses shr(qadd(shl, shl)).
1956       unsigned Instrs =
1957           LT.second.getScalarSizeInBits() == RetTy->getScalarSizeInBits() ? 1
1958                                                                           : 4;
1959       return LT.first * ST->getMVEVectorCostFactor(CostKind) * Instrs;
1960     }
1961     break;
1962   }
1963   case Intrinsic::abs:
1964   case Intrinsic::smin:
1965   case Intrinsic::smax:
1966   case Intrinsic::umin:
1967   case Intrinsic::umax: {
1968     if (!ST->hasMVEIntegerOps())
1969       break;
1970     Type *VT = ICA.getReturnType();
1971 
1972     std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(VT);
1973     if (LT.second == MVT::v4i32 || LT.second == MVT::v8i16 ||
1974         LT.second == MVT::v16i8)
1975       return LT.first * ST->getMVEVectorCostFactor(CostKind);
1976     break;
1977   }
1978   case Intrinsic::minnum:
1979   case Intrinsic::maxnum: {
1980     if (!ST->hasMVEFloatOps())
1981       break;
1982     Type *VT = ICA.getReturnType();
1983     std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(VT);
1984     if (LT.second == MVT::v4f32 || LT.second == MVT::v8f16)
1985       return LT.first * ST->getMVEVectorCostFactor(CostKind);
1986     break;
1987   }
1988   case Intrinsic::fptosi_sat:
1989   case Intrinsic::fptoui_sat: {
1990     if (ICA.getArgTypes().empty())
1991       break;
1992     bool IsSigned = Opc == Intrinsic::fptosi_sat;
1993     auto LT = getTypeLegalizationCost(ICA.getArgTypes()[0]);
1994     EVT MTy = TLI->getValueType(DL, ICA.getReturnType());
1995     // Check for the legal types, with the corect subtarget features.
1996     if ((ST->hasVFP2Base() && LT.second == MVT::f32 && MTy == MVT::i32) ||
1997         (ST->hasFP64() && LT.second == MVT::f64 && MTy == MVT::i32) ||
1998         (ST->hasFullFP16() && LT.second == MVT::f16 && MTy == MVT::i32))
1999       return LT.first;
2000 
2001     // Equally for MVE vector types
2002     if (ST->hasMVEFloatOps() &&
2003         (LT.second == MVT::v4f32 || LT.second == MVT::v8f16) &&
2004         LT.second.getScalarSizeInBits() == MTy.getScalarSizeInBits())
2005       return LT.first * ST->getMVEVectorCostFactor(CostKind);
2006 
2007     // If we can we use a legal convert followed by a min+max
2008     if (((ST->hasVFP2Base() && LT.second == MVT::f32) ||
2009          (ST->hasFP64() && LT.second == MVT::f64) ||
2010          (ST->hasFullFP16() && LT.second == MVT::f16) ||
2011          (ST->hasMVEFloatOps() &&
2012           (LT.second == MVT::v4f32 || LT.second == MVT::v8f16))) &&
2013         LT.second.getScalarSizeInBits() >= MTy.getScalarSizeInBits()) {
2014       Type *LegalTy = Type::getIntNTy(ICA.getReturnType()->getContext(),
2015                                       LT.second.getScalarSizeInBits());
2016       InstructionCost Cost =
2017           LT.second.isVector() ? ST->getMVEVectorCostFactor(CostKind) : 1;
2018       IntrinsicCostAttributes Attrs1(IsSigned ? Intrinsic::smin
2019                                               : Intrinsic::umin,
2020                                      LegalTy, {LegalTy, LegalTy});
2021       Cost += getIntrinsicInstrCost(Attrs1, CostKind);
2022       IntrinsicCostAttributes Attrs2(IsSigned ? Intrinsic::smax
2023                                               : Intrinsic::umax,
2024                                      LegalTy, {LegalTy, LegalTy});
2025       Cost += getIntrinsicInstrCost(Attrs2, CostKind);
2026       return LT.first * Cost;
2027     }
2028     // Otherwise we need to follow the default expansion that clamps the value
2029     // using a float min/max with a fcmp+sel for nan handling when signed.
2030     Type *FPTy = ICA.getArgTypes()[0];
2031     Type *RetTy = ICA.getReturnType();
2032     IntrinsicCostAttributes Attrs1(Intrinsic::minnum, FPTy, {FPTy, FPTy});
2033     InstructionCost Cost = getIntrinsicInstrCost(Attrs1, CostKind);
2034     IntrinsicCostAttributes Attrs2(Intrinsic::maxnum, FPTy, {FPTy, FPTy});
2035     Cost += getIntrinsicInstrCost(Attrs2, CostKind);
2036     Cost +=
2037         getCastInstrCost(IsSigned ? Instruction::FPToSI : Instruction::FPToUI,
2038                          RetTy, FPTy, TTI::CastContextHint::None, CostKind);
2039     if (IsSigned) {
2040       Type *CondTy = RetTy->getWithNewBitWidth(1);
2041       Cost += getCmpSelInstrCost(BinaryOperator::FCmp, FPTy, CondTy,
2042                                  CmpInst::FCMP_UNO, CostKind);
2043       Cost += getCmpSelInstrCost(BinaryOperator::Select, RetTy, CondTy,
2044                                  CmpInst::FCMP_UNO, CostKind);
2045     }
2046     return Cost;
2047   }
2048   }
2049 
2050   return BaseT::getIntrinsicInstrCost(ICA, CostKind);
2051 }
2052 
2053 bool ARMTTIImpl::isLoweredToCall(const Function *F) {
2054   if (!F->isIntrinsic())
2055     return BaseT::isLoweredToCall(F);
2056 
2057   // Assume all Arm-specific intrinsics map to an instruction.
2058   if (F->getName().starts_with("llvm.arm"))
2059     return false;
2060 
2061   switch (F->getIntrinsicID()) {
2062   default: break;
2063   case Intrinsic::powi:
2064   case Intrinsic::sin:
2065   case Intrinsic::cos:
2066   case Intrinsic::pow:
2067   case Intrinsic::log:
2068   case Intrinsic::log10:
2069   case Intrinsic::log2:
2070   case Intrinsic::exp:
2071   case Intrinsic::exp2:
2072     return true;
2073   case Intrinsic::sqrt:
2074   case Intrinsic::fabs:
2075   case Intrinsic::copysign:
2076   case Intrinsic::floor:
2077   case Intrinsic::ceil:
2078   case Intrinsic::trunc:
2079   case Intrinsic::rint:
2080   case Intrinsic::nearbyint:
2081   case Intrinsic::round:
2082   case Intrinsic::canonicalize:
2083   case Intrinsic::lround:
2084   case Intrinsic::llround:
2085   case Intrinsic::lrint:
2086   case Intrinsic::llrint:
2087     if (F->getReturnType()->isDoubleTy() && !ST->hasFP64())
2088       return true;
2089     if (F->getReturnType()->isHalfTy() && !ST->hasFullFP16())
2090       return true;
2091     // Some operations can be handled by vector instructions and assume
2092     // unsupported vectors will be expanded into supported scalar ones.
2093     // TODO Handle scalar operations properly.
2094     return !ST->hasFPARMv8Base() && !ST->hasVFP2Base();
2095   case Intrinsic::masked_store:
2096   case Intrinsic::masked_load:
2097   case Intrinsic::masked_gather:
2098   case Intrinsic::masked_scatter:
2099     return !ST->hasMVEIntegerOps();
2100   case Intrinsic::sadd_with_overflow:
2101   case Intrinsic::uadd_with_overflow:
2102   case Intrinsic::ssub_with_overflow:
2103   case Intrinsic::usub_with_overflow:
2104   case Intrinsic::sadd_sat:
2105   case Intrinsic::uadd_sat:
2106   case Intrinsic::ssub_sat:
2107   case Intrinsic::usub_sat:
2108     return false;
2109   }
2110 
2111   return BaseT::isLoweredToCall(F);
2112 }
2113 
2114 bool ARMTTIImpl::maybeLoweredToCall(Instruction &I) {
2115   unsigned ISD = TLI->InstructionOpcodeToISD(I.getOpcode());
2116   EVT VT = TLI->getValueType(DL, I.getType(), true);
2117   if (TLI->getOperationAction(ISD, VT) == TargetLowering::LibCall)
2118     return true;
2119 
2120   // Check if an intrinsic will be lowered to a call and assume that any
2121   // other CallInst will generate a bl.
2122   if (auto *Call = dyn_cast<CallInst>(&I)) {
2123     if (auto *II = dyn_cast<IntrinsicInst>(Call)) {
2124       switch(II->getIntrinsicID()) {
2125         case Intrinsic::memcpy:
2126         case Intrinsic::memset:
2127         case Intrinsic::memmove:
2128           return getNumMemOps(II) == -1;
2129         default:
2130           if (const Function *F = Call->getCalledFunction())
2131             return isLoweredToCall(F);
2132       }
2133     }
2134     return true;
2135   }
2136 
2137   // FPv5 provides conversions between integer, double-precision,
2138   // single-precision, and half-precision formats.
2139   switch (I.getOpcode()) {
2140   default:
2141     break;
2142   case Instruction::FPToSI:
2143   case Instruction::FPToUI:
2144   case Instruction::SIToFP:
2145   case Instruction::UIToFP:
2146   case Instruction::FPTrunc:
2147   case Instruction::FPExt:
2148     return !ST->hasFPARMv8Base();
2149   }
2150 
2151   // FIXME: Unfortunately the approach of checking the Operation Action does
2152   // not catch all cases of Legalization that use library calls. Our
2153   // Legalization step categorizes some transformations into library calls as
2154   // Custom, Expand or even Legal when doing type legalization. So for now
2155   // we have to special case for instance the SDIV of 64bit integers and the
2156   // use of floating point emulation.
2157   if (VT.isInteger() && VT.getSizeInBits() >= 64) {
2158     switch (ISD) {
2159     default:
2160       break;
2161     case ISD::SDIV:
2162     case ISD::UDIV:
2163     case ISD::SREM:
2164     case ISD::UREM:
2165     case ISD::SDIVREM:
2166     case ISD::UDIVREM:
2167       return true;
2168     }
2169   }
2170 
2171   // Assume all other non-float operations are supported.
2172   if (!VT.isFloatingPoint())
2173     return false;
2174 
2175   // We'll need a library call to handle most floats when using soft.
2176   if (TLI->useSoftFloat()) {
2177     switch (I.getOpcode()) {
2178     default:
2179       return true;
2180     case Instruction::Alloca:
2181     case Instruction::Load:
2182     case Instruction::Store:
2183     case Instruction::Select:
2184     case Instruction::PHI:
2185       return false;
2186     }
2187   }
2188 
2189   // We'll need a libcall to perform double precision operations on a single
2190   // precision only FPU.
2191   if (I.getType()->isDoubleTy() && !ST->hasFP64())
2192     return true;
2193 
2194   // Likewise for half precision arithmetic.
2195   if (I.getType()->isHalfTy() && !ST->hasFullFP16())
2196     return true;
2197 
2198   return false;
2199 }
2200 
2201 bool ARMTTIImpl::isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE,
2202                                           AssumptionCache &AC,
2203                                           TargetLibraryInfo *LibInfo,
2204                                           HardwareLoopInfo &HWLoopInfo) {
2205   // Low-overhead branches are only supported in the 'low-overhead branch'
2206   // extension of v8.1-m.
2207   if (!ST->hasLOB() || DisableLowOverheadLoops) {
2208     LLVM_DEBUG(dbgs() << "ARMHWLoops: Disabled\n");
2209     return false;
2210   }
2211 
2212   if (!SE.hasLoopInvariantBackedgeTakenCount(L)) {
2213     LLVM_DEBUG(dbgs() << "ARMHWLoops: No BETC\n");
2214     return false;
2215   }
2216 
2217   const SCEV *BackedgeTakenCount = SE.getBackedgeTakenCount(L);
2218   if (isa<SCEVCouldNotCompute>(BackedgeTakenCount)) {
2219     LLVM_DEBUG(dbgs() << "ARMHWLoops: Uncomputable BETC\n");
2220     return false;
2221   }
2222 
2223   const SCEV *TripCountSCEV =
2224     SE.getAddExpr(BackedgeTakenCount,
2225                   SE.getOne(BackedgeTakenCount->getType()));
2226 
2227   // We need to store the trip count in LR, a 32-bit register.
2228   if (SE.getUnsignedRangeMax(TripCountSCEV).getBitWidth() > 32) {
2229     LLVM_DEBUG(dbgs() << "ARMHWLoops: Trip count does not fit into 32bits\n");
2230     return false;
2231   }
2232 
2233   // Making a call will trash LR and clear LO_BRANCH_INFO, so there's little
2234   // point in generating a hardware loop if that's going to happen.
2235 
2236   auto IsHardwareLoopIntrinsic = [](Instruction &I) {
2237     if (auto *Call = dyn_cast<IntrinsicInst>(&I)) {
2238       switch (Call->getIntrinsicID()) {
2239       default:
2240         break;
2241       case Intrinsic::start_loop_iterations:
2242       case Intrinsic::test_start_loop_iterations:
2243       case Intrinsic::loop_decrement:
2244       case Intrinsic::loop_decrement_reg:
2245         return true;
2246       }
2247     }
2248     return false;
2249   };
2250 
2251   // Scan the instructions to see if there's any that we know will turn into a
2252   // call or if this loop is already a low-overhead loop or will become a tail
2253   // predicated loop.
2254   bool IsTailPredLoop = false;
2255   auto ScanLoop = [&](Loop *L) {
2256     for (auto *BB : L->getBlocks()) {
2257       for (auto &I : *BB) {
2258         if (maybeLoweredToCall(I) || IsHardwareLoopIntrinsic(I) ||
2259             isa<InlineAsm>(I)) {
2260           LLVM_DEBUG(dbgs() << "ARMHWLoops: Bad instruction: " << I << "\n");
2261           return false;
2262         }
2263         if (auto *II = dyn_cast<IntrinsicInst>(&I))
2264           IsTailPredLoop |=
2265               II->getIntrinsicID() == Intrinsic::get_active_lane_mask ||
2266               II->getIntrinsicID() == Intrinsic::arm_mve_vctp8 ||
2267               II->getIntrinsicID() == Intrinsic::arm_mve_vctp16 ||
2268               II->getIntrinsicID() == Intrinsic::arm_mve_vctp32 ||
2269               II->getIntrinsicID() == Intrinsic::arm_mve_vctp64;
2270       }
2271     }
2272     return true;
2273   };
2274 
2275   // Visit inner loops.
2276   for (auto *Inner : *L)
2277     if (!ScanLoop(Inner))
2278       return false;
2279 
2280   if (!ScanLoop(L))
2281     return false;
2282 
2283   // TODO: Check whether the trip count calculation is expensive. If L is the
2284   // inner loop but we know it has a low trip count, calculating that trip
2285   // count (in the parent loop) may be detrimental.
2286 
2287   LLVMContext &C = L->getHeader()->getContext();
2288   HWLoopInfo.CounterInReg = true;
2289   HWLoopInfo.IsNestingLegal = false;
2290   HWLoopInfo.PerformEntryTest = AllowWLSLoops && !IsTailPredLoop;
2291   HWLoopInfo.CountType = Type::getInt32Ty(C);
2292   HWLoopInfo.LoopDecrement = ConstantInt::get(HWLoopInfo.CountType, 1);
2293   return true;
2294 }
2295 
2296 static bool canTailPredicateInstruction(Instruction &I, int &ICmpCount) {
2297   // We don't allow icmp's, and because we only look at single block loops,
2298   // we simply count the icmps, i.e. there should only be 1 for the backedge.
2299   if (isa<ICmpInst>(&I) && ++ICmpCount > 1)
2300     return false;
2301   // FIXME: This is a workaround for poor cost modelling. Min/Max intrinsics are
2302   // not currently canonical, but soon will be. Code without them uses icmp, and
2303   // so is not tail predicated as per the condition above. In order to get the
2304   // same performance we treat min and max the same as an icmp for tailpred
2305   // purposes for the moment (we often rely on non-tailpred and higher VF's to
2306   // pick more optimial instructions like VQDMULH. They need to be recognized
2307   // directly by the vectorizer).
2308   if (auto *II = dyn_cast<IntrinsicInst>(&I))
2309     if ((II->getIntrinsicID() == Intrinsic::smin ||
2310          II->getIntrinsicID() == Intrinsic::smax ||
2311          II->getIntrinsicID() == Intrinsic::umin ||
2312          II->getIntrinsicID() == Intrinsic::umax) &&
2313         ++ICmpCount > 1)
2314       return false;
2315 
2316   if (isa<FCmpInst>(&I))
2317     return false;
2318 
2319   // We could allow extending/narrowing FP loads/stores, but codegen is
2320   // too inefficient so reject this for now.
2321   if (isa<FPExtInst>(&I) || isa<FPTruncInst>(&I))
2322     return false;
2323 
2324   // Extends have to be extending-loads
2325   if (isa<SExtInst>(&I) || isa<ZExtInst>(&I) )
2326     if (!I.getOperand(0)->hasOneUse() || !isa<LoadInst>(I.getOperand(0)))
2327       return false;
2328 
2329   // Truncs have to be narrowing-stores
2330   if (isa<TruncInst>(&I) )
2331     if (!I.hasOneUse() || !isa<StoreInst>(*I.user_begin()))
2332       return false;
2333 
2334   return true;
2335 }
2336 
2337 // To set up a tail-predicated loop, we need to know the total number of
2338 // elements processed by that loop. Thus, we need to determine the element
2339 // size and:
2340 // 1) it should be uniform for all operations in the vector loop, so we
2341 //    e.g. don't want any widening/narrowing operations.
2342 // 2) it should be smaller than i64s because we don't have vector operations
2343 //    that work on i64s.
2344 // 3) we don't want elements to be reversed or shuffled, to make sure the
2345 //    tail-predication masks/predicates the right lanes.
2346 //
2347 static bool canTailPredicateLoop(Loop *L, LoopInfo *LI, ScalarEvolution &SE,
2348                                  const DataLayout &DL,
2349                                  const LoopAccessInfo *LAI) {
2350   LLVM_DEBUG(dbgs() << "Tail-predication: checking allowed instructions\n");
2351 
2352   // If there are live-out values, it is probably a reduction. We can predicate
2353   // most reduction operations freely under MVE using a combination of
2354   // prefer-predicated-reduction-select and inloop reductions. We limit this to
2355   // floating point and integer reductions, but don't check for operators
2356   // specifically here. If the value ends up not being a reduction (and so the
2357   // vectorizer cannot tailfold the loop), we should fall back to standard
2358   // vectorization automatically.
2359   SmallVector< Instruction *, 8 > LiveOuts;
2360   LiveOuts = llvm::findDefsUsedOutsideOfLoop(L);
2361   bool ReductionsDisabled =
2362       EnableTailPredication == TailPredication::EnabledNoReductions ||
2363       EnableTailPredication == TailPredication::ForceEnabledNoReductions;
2364 
2365   for (auto *I : LiveOuts) {
2366     if (!I->getType()->isIntegerTy() && !I->getType()->isFloatTy() &&
2367         !I->getType()->isHalfTy()) {
2368       LLVM_DEBUG(dbgs() << "Don't tail-predicate loop with non-integer/float "
2369                            "live-out value\n");
2370       return false;
2371     }
2372     if (ReductionsDisabled) {
2373       LLVM_DEBUG(dbgs() << "Reductions not enabled\n");
2374       return false;
2375     }
2376   }
2377 
2378   // Next, check that all instructions can be tail-predicated.
2379   PredicatedScalarEvolution PSE = LAI->getPSE();
2380   SmallVector<Instruction *, 16> LoadStores;
2381   int ICmpCount = 0;
2382 
2383   for (BasicBlock *BB : L->blocks()) {
2384     for (Instruction &I : BB->instructionsWithoutDebug()) {
2385       if (isa<PHINode>(&I))
2386         continue;
2387       if (!canTailPredicateInstruction(I, ICmpCount)) {
2388         LLVM_DEBUG(dbgs() << "Instruction not allowed: "; I.dump());
2389         return false;
2390       }
2391 
2392       Type *T  = I.getType();
2393       if (T->getScalarSizeInBits() > 32) {
2394         LLVM_DEBUG(dbgs() << "Unsupported Type: "; T->dump());
2395         return false;
2396       }
2397       if (isa<StoreInst>(I) || isa<LoadInst>(I)) {
2398         Value *Ptr = getLoadStorePointerOperand(&I);
2399         Type *AccessTy = getLoadStoreType(&I);
2400         int64_t NextStride = getPtrStride(PSE, AccessTy, Ptr, L).value_or(0);
2401         if (NextStride == 1) {
2402           // TODO: for now only allow consecutive strides of 1. We could support
2403           // other strides as long as it is uniform, but let's keep it simple
2404           // for now.
2405           continue;
2406         } else if (NextStride == -1 ||
2407                    (NextStride == 2 && MVEMaxSupportedInterleaveFactor >= 2) ||
2408                    (NextStride == 4 && MVEMaxSupportedInterleaveFactor >= 4)) {
2409           LLVM_DEBUG(dbgs()
2410                      << "Consecutive strides of 2 found, vld2/vstr2 can't "
2411                         "be tail-predicated\n.");
2412           return false;
2413           // TODO: don't tail predicate if there is a reversed load?
2414         } else if (EnableMaskedGatherScatters) {
2415           // Gather/scatters do allow loading from arbitrary strides, at
2416           // least if they are loop invariant.
2417           // TODO: Loop variant strides should in theory work, too, but
2418           // this requires further testing.
2419           const SCEV *PtrScev = PSE.getSE()->getSCEV(Ptr);
2420           if (auto AR = dyn_cast<SCEVAddRecExpr>(PtrScev)) {
2421             const SCEV *Step = AR->getStepRecurrence(*PSE.getSE());
2422             if (PSE.getSE()->isLoopInvariant(Step, L))
2423               continue;
2424           }
2425         }
2426         LLVM_DEBUG(dbgs() << "Bad stride found, can't "
2427                              "tail-predicate\n.");
2428         return false;
2429       }
2430     }
2431   }
2432 
2433   LLVM_DEBUG(dbgs() << "tail-predication: all instructions allowed!\n");
2434   return true;
2435 }
2436 
2437 bool ARMTTIImpl::preferPredicateOverEpilogue(TailFoldingInfo *TFI) {
2438   if (!EnableTailPredication) {
2439     LLVM_DEBUG(dbgs() << "Tail-predication not enabled.\n");
2440     return false;
2441   }
2442 
2443   // Creating a predicated vector loop is the first step for generating a
2444   // tail-predicated hardware loop, for which we need the MVE masked
2445   // load/stores instructions:
2446   if (!ST->hasMVEIntegerOps())
2447     return false;
2448 
2449   LoopVectorizationLegality *LVL = TFI->LVL;
2450   Loop *L = LVL->getLoop();
2451 
2452   // For now, restrict this to single block loops.
2453   if (L->getNumBlocks() > 1) {
2454     LLVM_DEBUG(dbgs() << "preferPredicateOverEpilogue: not a single block "
2455                          "loop.\n");
2456     return false;
2457   }
2458 
2459   assert(L->isInnermost() && "preferPredicateOverEpilogue: inner-loop expected");
2460 
2461   LoopInfo *LI = LVL->getLoopInfo();
2462   HardwareLoopInfo HWLoopInfo(L);
2463   if (!HWLoopInfo.canAnalyze(*LI)) {
2464     LLVM_DEBUG(dbgs() << "preferPredicateOverEpilogue: hardware-loop is not "
2465                          "analyzable.\n");
2466     return false;
2467   }
2468 
2469   AssumptionCache *AC = LVL->getAssumptionCache();
2470   ScalarEvolution *SE = LVL->getScalarEvolution();
2471 
2472   // This checks if we have the low-overhead branch architecture
2473   // extension, and if we will create a hardware-loop:
2474   if (!isHardwareLoopProfitable(L, *SE, *AC, TFI->TLI, HWLoopInfo)) {
2475     LLVM_DEBUG(dbgs() << "preferPredicateOverEpilogue: hardware-loop is not "
2476                          "profitable.\n");
2477     return false;
2478   }
2479 
2480   DominatorTree *DT = LVL->getDominatorTree();
2481   if (!HWLoopInfo.isHardwareLoopCandidate(*SE, *LI, *DT)) {
2482     LLVM_DEBUG(dbgs() << "preferPredicateOverEpilogue: hardware-loop is not "
2483                          "a candidate.\n");
2484     return false;
2485   }
2486 
2487   return canTailPredicateLoop(L, LI, *SE, DL, LVL->getLAI());
2488 }
2489 
2490 TailFoldingStyle
2491 ARMTTIImpl::getPreferredTailFoldingStyle(bool IVUpdateMayOverflow) const {
2492   if (!ST->hasMVEIntegerOps() || !EnableTailPredication)
2493     return TailFoldingStyle::DataWithoutLaneMask;
2494 
2495   // Intrinsic @llvm.get.active.lane.mask is supported.
2496   // It is used in the MVETailPredication pass, which requires the number of
2497   // elements processed by this vector loop to setup the tail-predicated
2498   // loop.
2499   return TailFoldingStyle::Data;
2500 }
2501 void ARMTTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
2502                                          TTI::UnrollingPreferences &UP,
2503                                          OptimizationRemarkEmitter *ORE) {
2504   // Enable Upper bound unrolling universally, providing that we do not see an
2505   // active lane mask, which will be better kept as a loop to become tail
2506   // predicated than to be conditionally unrolled.
2507   UP.UpperBound =
2508       !ST->hasMVEIntegerOps() || !any_of(*L->getHeader(), [](Instruction &I) {
2509         return isa<IntrinsicInst>(I) &&
2510                cast<IntrinsicInst>(I).getIntrinsicID() ==
2511                    Intrinsic::get_active_lane_mask;
2512       });
2513 
2514   // Only currently enable these preferences for M-Class cores.
2515   if (!ST->isMClass())
2516     return BasicTTIImplBase::getUnrollingPreferences(L, SE, UP, ORE);
2517 
2518   // Disable loop unrolling for Oz and Os.
2519   UP.OptSizeThreshold = 0;
2520   UP.PartialOptSizeThreshold = 0;
2521   if (L->getHeader()->getParent()->hasOptSize())
2522     return;
2523 
2524   SmallVector<BasicBlock*, 4> ExitingBlocks;
2525   L->getExitingBlocks(ExitingBlocks);
2526   LLVM_DEBUG(dbgs() << "Loop has:\n"
2527                     << "Blocks: " << L->getNumBlocks() << "\n"
2528                     << "Exit blocks: " << ExitingBlocks.size() << "\n");
2529 
2530   // Only allow another exit other than the latch. This acts as an early exit
2531   // as it mirrors the profitability calculation of the runtime unroller.
2532   if (ExitingBlocks.size() > 2)
2533     return;
2534 
2535   // Limit the CFG of the loop body for targets with a branch predictor.
2536   // Allowing 4 blocks permits if-then-else diamonds in the body.
2537   if (ST->hasBranchPredictor() && L->getNumBlocks() > 4)
2538     return;
2539 
2540   // Don't unroll vectorized loops, including the remainder loop
2541   if (getBooleanLoopAttribute(L, "llvm.loop.isvectorized"))
2542     return;
2543 
2544   // Scan the loop: don't unroll loops with calls as this could prevent
2545   // inlining.
2546   InstructionCost Cost = 0;
2547   for (auto *BB : L->getBlocks()) {
2548     for (auto &I : *BB) {
2549       // Don't unroll vectorised loop. MVE does not benefit from it as much as
2550       // scalar code.
2551       if (I.getType()->isVectorTy())
2552         return;
2553 
2554       if (isa<CallInst>(I) || isa<InvokeInst>(I)) {
2555         if (const Function *F = cast<CallBase>(I).getCalledFunction()) {
2556           if (!isLoweredToCall(F))
2557             continue;
2558         }
2559         return;
2560       }
2561 
2562       SmallVector<const Value*, 4> Operands(I.operand_values());
2563       Cost += getInstructionCost(&I, Operands,
2564                                  TargetTransformInfo::TCK_SizeAndLatency);
2565     }
2566   }
2567 
2568   // On v6m cores, there are very few registers available. We can easily end up
2569   // spilling and reloading more registers in an unrolled loop. Look at the
2570   // number of LCSSA phis as a rough measure of how many registers will need to
2571   // be live out of the loop, reducing the default unroll count if more than 1
2572   // value is needed.  In the long run, all of this should be being learnt by a
2573   // machine.
2574   unsigned UnrollCount = 4;
2575   if (ST->isThumb1Only()) {
2576     unsigned ExitingValues = 0;
2577     SmallVector<BasicBlock *, 4> ExitBlocks;
2578     L->getExitBlocks(ExitBlocks);
2579     for (auto *Exit : ExitBlocks) {
2580       // Count the number of LCSSA phis. Exclude values coming from GEP's as
2581       // only the last is expected to be needed for address operands.
2582       unsigned LiveOuts = count_if(Exit->phis(), [](auto &PH) {
2583         return PH.getNumOperands() != 1 ||
2584                !isa<GetElementPtrInst>(PH.getOperand(0));
2585       });
2586       ExitingValues = ExitingValues < LiveOuts ? LiveOuts : ExitingValues;
2587     }
2588     if (ExitingValues)
2589       UnrollCount /= ExitingValues;
2590     if (UnrollCount <= 1)
2591       return;
2592   }
2593 
2594   LLVM_DEBUG(dbgs() << "Cost of loop: " << Cost << "\n");
2595   LLVM_DEBUG(dbgs() << "Default Runtime Unroll Count: " << UnrollCount << "\n");
2596 
2597   UP.Partial = true;
2598   UP.Runtime = true;
2599   UP.UnrollRemainder = true;
2600   UP.DefaultUnrollRuntimeCount = UnrollCount;
2601   UP.UnrollAndJam = true;
2602   UP.UnrollAndJamInnerLoopThreshold = 60;
2603 
2604   // Force unrolling small loops can be very useful because of the branch
2605   // taken cost of the backedge.
2606   if (Cost < 12)
2607     UP.Force = true;
2608 }
2609 
2610 void ARMTTIImpl::getPeelingPreferences(Loop *L, ScalarEvolution &SE,
2611                                        TTI::PeelingPreferences &PP) {
2612   BaseT::getPeelingPreferences(L, SE, PP);
2613 }
2614 
2615 bool ARMTTIImpl::preferInLoopReduction(unsigned Opcode, Type *Ty,
2616                                        TTI::ReductionFlags Flags) const {
2617   if (!ST->hasMVEIntegerOps())
2618     return false;
2619 
2620   unsigned ScalarBits = Ty->getScalarSizeInBits();
2621   switch (Opcode) {
2622   case Instruction::Add:
2623     return ScalarBits <= 64;
2624   default:
2625     return false;
2626   }
2627 }
2628 
2629 bool ARMTTIImpl::preferPredicatedReductionSelect(
2630     unsigned Opcode, Type *Ty, TTI::ReductionFlags Flags) const {
2631   if (!ST->hasMVEIntegerOps())
2632     return false;
2633   return true;
2634 }
2635 
2636 InstructionCost ARMTTIImpl::getScalingFactorCost(Type *Ty, GlobalValue *BaseGV,
2637                                                  StackOffset BaseOffset,
2638                                                  bool HasBaseReg, int64_t Scale,
2639                                                  unsigned AddrSpace) const {
2640   TargetLoweringBase::AddrMode AM;
2641   AM.BaseGV = BaseGV;
2642   AM.BaseOffs = BaseOffset.getFixed();
2643   AM.HasBaseReg = HasBaseReg;
2644   AM.Scale = Scale;
2645   AM.ScalableOffset = BaseOffset.getScalable();
2646   if (getTLI()->isLegalAddressingMode(DL, AM, Ty, AddrSpace)) {
2647     if (ST->hasFPAO())
2648       return AM.Scale < 0 ? 1 : 0; // positive offsets execute faster
2649     return 0;
2650   }
2651   return -1;
2652 }
2653 
2654 bool ARMTTIImpl::hasArmWideBranch(bool Thumb) const {
2655   if (Thumb) {
2656     // B.W is available in any Thumb2-supporting target, and also in every
2657     // version of Armv8-M, even Baseline which does not include the rest of
2658     // Thumb2.
2659     return ST->isThumb2() || ST->hasV8MBaselineOps();
2660   } else {
2661     // B is available in all versions of the Arm ISA, so the only question is
2662     // whether that ISA is available at all.
2663     return ST->hasARMOps();
2664   }
2665 }
2666 
2667 /// Check if Ext1 and Ext2 are extends of the same type, doubling the bitwidth
2668 /// of the vector elements.
2669 static bool areExtractExts(Value *Ext1, Value *Ext2) {
2670   using namespace PatternMatch;
2671 
2672   auto areExtDoubled = [](Instruction *Ext) {
2673     return Ext->getType()->getScalarSizeInBits() ==
2674            2 * Ext->getOperand(0)->getType()->getScalarSizeInBits();
2675   };
2676 
2677   if (!match(Ext1, m_ZExtOrSExt(m_Value())) ||
2678       !match(Ext2, m_ZExtOrSExt(m_Value())) ||
2679       !areExtDoubled(cast<Instruction>(Ext1)) ||
2680       !areExtDoubled(cast<Instruction>(Ext2)))
2681     return false;
2682 
2683   return true;
2684 }
2685 
2686 /// Check if sinking \p I's operands to I's basic block is profitable, because
2687 /// the operands can be folded into a target instruction, e.g.
2688 /// sext/zext can be folded into vsubl.
2689 bool ARMTTIImpl::isProfitableToSinkOperands(Instruction *I,
2690                                             SmallVectorImpl<Use *> &Ops) const {
2691   using namespace PatternMatch;
2692 
2693   if (!I->getType()->isVectorTy())
2694     return false;
2695 
2696   if (ST->hasNEON()) {
2697     switch (I->getOpcode()) {
2698     case Instruction::Sub:
2699     case Instruction::Add: {
2700       if (!areExtractExts(I->getOperand(0), I->getOperand(1)))
2701         return false;
2702       Ops.push_back(&I->getOperandUse(0));
2703       Ops.push_back(&I->getOperandUse(1));
2704       return true;
2705     }
2706     default:
2707       return false;
2708     }
2709   }
2710 
2711   if (!ST->hasMVEIntegerOps())
2712     return false;
2713 
2714   auto IsFMSMul = [&](Instruction *I) {
2715     if (!I->hasOneUse())
2716       return false;
2717     auto *Sub = cast<Instruction>(*I->users().begin());
2718     return Sub->getOpcode() == Instruction::FSub && Sub->getOperand(1) == I;
2719   };
2720   auto IsFMS = [&](Instruction *I) {
2721     if (match(I->getOperand(0), m_FNeg(m_Value())) ||
2722         match(I->getOperand(1), m_FNeg(m_Value())))
2723       return true;
2724     return false;
2725   };
2726 
2727   auto IsSinker = [&](Instruction *I, int Operand) {
2728     switch (I->getOpcode()) {
2729     case Instruction::Add:
2730     case Instruction::Mul:
2731     case Instruction::FAdd:
2732     case Instruction::ICmp:
2733     case Instruction::FCmp:
2734       return true;
2735     case Instruction::FMul:
2736       return !IsFMSMul(I);
2737     case Instruction::Sub:
2738     case Instruction::FSub:
2739     case Instruction::Shl:
2740     case Instruction::LShr:
2741     case Instruction::AShr:
2742       return Operand == 1;
2743     case Instruction::Call:
2744       if (auto *II = dyn_cast<IntrinsicInst>(I)) {
2745         switch (II->getIntrinsicID()) {
2746         case Intrinsic::fma:
2747           return !IsFMS(I);
2748         case Intrinsic::sadd_sat:
2749         case Intrinsic::uadd_sat:
2750         case Intrinsic::arm_mve_add_predicated:
2751         case Intrinsic::arm_mve_mul_predicated:
2752         case Intrinsic::arm_mve_qadd_predicated:
2753         case Intrinsic::arm_mve_vhadd:
2754         case Intrinsic::arm_mve_hadd_predicated:
2755         case Intrinsic::arm_mve_vqdmull:
2756         case Intrinsic::arm_mve_vqdmull_predicated:
2757         case Intrinsic::arm_mve_vqdmulh:
2758         case Intrinsic::arm_mve_qdmulh_predicated:
2759         case Intrinsic::arm_mve_vqrdmulh:
2760         case Intrinsic::arm_mve_qrdmulh_predicated:
2761         case Intrinsic::arm_mve_fma_predicated:
2762           return true;
2763         case Intrinsic::ssub_sat:
2764         case Intrinsic::usub_sat:
2765         case Intrinsic::arm_mve_sub_predicated:
2766         case Intrinsic::arm_mve_qsub_predicated:
2767         case Intrinsic::arm_mve_hsub_predicated:
2768         case Intrinsic::arm_mve_vhsub:
2769           return Operand == 1;
2770         default:
2771           return false;
2772         }
2773       }
2774       return false;
2775     default:
2776       return false;
2777     }
2778   };
2779 
2780   for (auto OpIdx : enumerate(I->operands())) {
2781     Instruction *Op = dyn_cast<Instruction>(OpIdx.value().get());
2782     // Make sure we are not already sinking this operand
2783     if (!Op || any_of(Ops, [&](Use *U) { return U->get() == Op; }))
2784       continue;
2785 
2786     Instruction *Shuffle = Op;
2787     if (Shuffle->getOpcode() == Instruction::BitCast)
2788       Shuffle = dyn_cast<Instruction>(Shuffle->getOperand(0));
2789     // We are looking for a splat that can be sunk.
2790     if (!Shuffle || !match(Shuffle, m_Shuffle(m_InsertElt(m_Undef(), m_Value(),
2791                                                           m_ZeroInt()),
2792                                               m_Undef(), m_ZeroMask())))
2793       continue;
2794     if (!IsSinker(I, OpIdx.index()))
2795       continue;
2796 
2797     // All uses of the shuffle should be sunk to avoid duplicating it across gpr
2798     // and vector registers
2799     for (Use &U : Op->uses()) {
2800       Instruction *Insn = cast<Instruction>(U.getUser());
2801       if (!IsSinker(Insn, U.getOperandNo()))
2802         return false;
2803     }
2804 
2805     Ops.push_back(&Shuffle->getOperandUse(0));
2806     if (Shuffle != Op)
2807       Ops.push_back(&Op->getOperandUse(0));
2808     Ops.push_back(&OpIdx.value());
2809   }
2810   return true;
2811 }
2812 
2813 unsigned ARMTTIImpl::getNumBytesToPadGlobalArray(unsigned Size,
2814                                                  Type *ArrayType) const {
2815   if (!UseWidenGlobalArrays) {
2816     LLVM_DEBUG(dbgs() << "Padding global arrays disabled\n");
2817     return false;
2818   }
2819 
2820   // Don't modify none integer array types
2821   if (!ArrayType || !ArrayType->isArrayTy() ||
2822       !ArrayType->getArrayElementType()->isIntegerTy())
2823     return 0;
2824 
2825   // We pad to 4 byte boundaries
2826   if (Size % 4 == 0)
2827     return 0;
2828 
2829   unsigned NumBytesToPad = 4 - (Size % 4);
2830   unsigned NewSize = Size + NumBytesToPad;
2831 
2832   // Max number of bytes that memcpy allows for lowering to load/stores before
2833   // it uses library function (__aeabi_memcpy).
2834   unsigned MaxMemIntrinsicSize = getMaxMemIntrinsicInlineSizeThreshold();
2835 
2836   if (NewSize > MaxMemIntrinsicSize)
2837     return 0;
2838 
2839   return NumBytesToPad;
2840 }
2841