1 //===- ARMTargetTransformInfo.cpp - ARM specific TTI ----------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8
9 #include "ARMTargetTransformInfo.h"
10 #include "ARMSubtarget.h"
11 #include "MCTargetDesc/ARMAddressingModes.h"
12 #include "llvm/ADT/APInt.h"
13 #include "llvm/ADT/SmallVector.h"
14 #include "llvm/Analysis/LoopInfo.h"
15 #include "llvm/CodeGen/CostTable.h"
16 #include "llvm/CodeGen/ISDOpcodes.h"
17 #include "llvm/CodeGen/ValueTypes.h"
18 #include "llvm/IR/BasicBlock.h"
19 #include "llvm/IR/DataLayout.h"
20 #include "llvm/IR/DerivedTypes.h"
21 #include "llvm/IR/Instruction.h"
22 #include "llvm/IR/Instructions.h"
23 #include "llvm/IR/Intrinsics.h"
24 #include "llvm/IR/IntrinsicInst.h"
25 #include "llvm/IR/IntrinsicsARM.h"
26 #include "llvm/IR/PatternMatch.h"
27 #include "llvm/IR/Type.h"
28 #include "llvm/MC/SubtargetFeature.h"
29 #include "llvm/Support/Casting.h"
30 #include "llvm/Support/KnownBits.h"
31 #include "llvm/Support/MachineValueType.h"
32 #include "llvm/Target/TargetMachine.h"
33 #include "llvm/Transforms/InstCombine/InstCombiner.h"
34 #include "llvm/Transforms/Utils/Local.h"
35 #include "llvm/Transforms/Utils/LoopUtils.h"
36 #include <algorithm>
37 #include <cassert>
38 #include <cstdint>
39 #include <utility>
40
41 using namespace llvm;
42
43 #define DEBUG_TYPE "armtti"
44
45 static cl::opt<bool> EnableMaskedLoadStores(
46 "enable-arm-maskedldst", cl::Hidden, cl::init(true),
47 cl::desc("Enable the generation of masked loads and stores"));
48
49 static cl::opt<bool> DisableLowOverheadLoops(
50 "disable-arm-loloops", cl::Hidden, cl::init(false),
51 cl::desc("Disable the generation of low-overhead loops"));
52
53 static cl::opt<bool>
54 AllowWLSLoops("allow-arm-wlsloops", cl::Hidden, cl::init(true),
55 cl::desc("Enable the generation of WLS loops"));
56
57 extern cl::opt<TailPredication::Mode> EnableTailPredication;
58
59 extern cl::opt<bool> EnableMaskedGatherScatters;
60
61 extern cl::opt<unsigned> MVEMaxSupportedInterleaveFactor;
62
63 /// Convert a vector load intrinsic into a simple llvm load instruction.
64 /// This is beneficial when the underlying object being addressed comes
65 /// from a constant, since we get constant-folding for free.
simplifyNeonVld1(const IntrinsicInst & II,unsigned MemAlign,InstCombiner::BuilderTy & Builder)66 static Value *simplifyNeonVld1(const IntrinsicInst &II, unsigned MemAlign,
67 InstCombiner::BuilderTy &Builder) {
68 auto *IntrAlign = dyn_cast<ConstantInt>(II.getArgOperand(1));
69
70 if (!IntrAlign)
71 return nullptr;
72
73 unsigned Alignment = IntrAlign->getLimitedValue() < MemAlign
74 ? MemAlign
75 : IntrAlign->getLimitedValue();
76
77 if (!isPowerOf2_32(Alignment))
78 return nullptr;
79
80 auto *BCastInst = Builder.CreateBitCast(II.getArgOperand(0),
81 PointerType::get(II.getType(), 0));
82 return Builder.CreateAlignedLoad(II.getType(), BCastInst, Align(Alignment));
83 }
84
areInlineCompatible(const Function * Caller,const Function * Callee) const85 bool ARMTTIImpl::areInlineCompatible(const Function *Caller,
86 const Function *Callee) const {
87 const TargetMachine &TM = getTLI()->getTargetMachine();
88 const FeatureBitset &CallerBits =
89 TM.getSubtargetImpl(*Caller)->getFeatureBits();
90 const FeatureBitset &CalleeBits =
91 TM.getSubtargetImpl(*Callee)->getFeatureBits();
92
93 // To inline a callee, all features not in the allowed list must match exactly.
94 bool MatchExact = (CallerBits & ~InlineFeaturesAllowed) ==
95 (CalleeBits & ~InlineFeaturesAllowed);
96 // For features in the allowed list, the callee's features must be a subset of
97 // the callers'.
98 bool MatchSubset = ((CallerBits & CalleeBits) & InlineFeaturesAllowed) ==
99 (CalleeBits & InlineFeaturesAllowed);
100 return MatchExact && MatchSubset;
101 }
102
103 TTI::AddressingModeKind
getPreferredAddressingMode(const Loop * L,ScalarEvolution * SE) const104 ARMTTIImpl::getPreferredAddressingMode(const Loop *L,
105 ScalarEvolution *SE) const {
106 if (ST->hasMVEIntegerOps())
107 return TTI::AMK_PostIndexed;
108
109 if (L->getHeader()->getParent()->hasOptSize())
110 return TTI::AMK_None;
111
112 if (ST->isMClass() && ST->isThumb2() &&
113 L->getNumBlocks() == 1)
114 return TTI::AMK_PreIndexed;
115
116 return TTI::AMK_None;
117 }
118
119 Optional<Instruction *>
instCombineIntrinsic(InstCombiner & IC,IntrinsicInst & II) const120 ARMTTIImpl::instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II) const {
121 using namespace PatternMatch;
122 Intrinsic::ID IID = II.getIntrinsicID();
123 switch (IID) {
124 default:
125 break;
126 case Intrinsic::arm_neon_vld1: {
127 Align MemAlign =
128 getKnownAlignment(II.getArgOperand(0), IC.getDataLayout(), &II,
129 &IC.getAssumptionCache(), &IC.getDominatorTree());
130 if (Value *V = simplifyNeonVld1(II, MemAlign.value(), IC.Builder)) {
131 return IC.replaceInstUsesWith(II, V);
132 }
133 break;
134 }
135
136 case Intrinsic::arm_neon_vld2:
137 case Intrinsic::arm_neon_vld3:
138 case Intrinsic::arm_neon_vld4:
139 case Intrinsic::arm_neon_vld2lane:
140 case Intrinsic::arm_neon_vld3lane:
141 case Intrinsic::arm_neon_vld4lane:
142 case Intrinsic::arm_neon_vst1:
143 case Intrinsic::arm_neon_vst2:
144 case Intrinsic::arm_neon_vst3:
145 case Intrinsic::arm_neon_vst4:
146 case Intrinsic::arm_neon_vst2lane:
147 case Intrinsic::arm_neon_vst3lane:
148 case Intrinsic::arm_neon_vst4lane: {
149 Align MemAlign =
150 getKnownAlignment(II.getArgOperand(0), IC.getDataLayout(), &II,
151 &IC.getAssumptionCache(), &IC.getDominatorTree());
152 unsigned AlignArg = II.getNumArgOperands() - 1;
153 Value *AlignArgOp = II.getArgOperand(AlignArg);
154 MaybeAlign Align = cast<ConstantInt>(AlignArgOp)->getMaybeAlignValue();
155 if (Align && *Align < MemAlign) {
156 return IC.replaceOperand(
157 II, AlignArg,
158 ConstantInt::get(Type::getInt32Ty(II.getContext()), MemAlign.value(),
159 false));
160 }
161 break;
162 }
163
164 case Intrinsic::arm_mve_pred_i2v: {
165 Value *Arg = II.getArgOperand(0);
166 Value *ArgArg;
167 if (match(Arg, PatternMatch::m_Intrinsic<Intrinsic::arm_mve_pred_v2i>(
168 PatternMatch::m_Value(ArgArg))) &&
169 II.getType() == ArgArg->getType()) {
170 return IC.replaceInstUsesWith(II, ArgArg);
171 }
172 Constant *XorMask;
173 if (match(Arg, m_Xor(PatternMatch::m_Intrinsic<Intrinsic::arm_mve_pred_v2i>(
174 PatternMatch::m_Value(ArgArg)),
175 PatternMatch::m_Constant(XorMask))) &&
176 II.getType() == ArgArg->getType()) {
177 if (auto *CI = dyn_cast<ConstantInt>(XorMask)) {
178 if (CI->getValue().trunc(16).isAllOnesValue()) {
179 auto TrueVector = IC.Builder.CreateVectorSplat(
180 cast<FixedVectorType>(II.getType())->getNumElements(),
181 IC.Builder.getTrue());
182 return BinaryOperator::Create(Instruction::Xor, ArgArg, TrueVector);
183 }
184 }
185 }
186 KnownBits ScalarKnown(32);
187 if (IC.SimplifyDemandedBits(&II, 0, APInt::getLowBitsSet(32, 16),
188 ScalarKnown, 0)) {
189 return &II;
190 }
191 break;
192 }
193 case Intrinsic::arm_mve_pred_v2i: {
194 Value *Arg = II.getArgOperand(0);
195 Value *ArgArg;
196 if (match(Arg, PatternMatch::m_Intrinsic<Intrinsic::arm_mve_pred_i2v>(
197 PatternMatch::m_Value(ArgArg)))) {
198 return IC.replaceInstUsesWith(II, ArgArg);
199 }
200 if (!II.getMetadata(LLVMContext::MD_range)) {
201 Type *IntTy32 = Type::getInt32Ty(II.getContext());
202 Metadata *M[] = {
203 ConstantAsMetadata::get(ConstantInt::get(IntTy32, 0)),
204 ConstantAsMetadata::get(ConstantInt::get(IntTy32, 0xFFFF))};
205 II.setMetadata(LLVMContext::MD_range, MDNode::get(II.getContext(), M));
206 return &II;
207 }
208 break;
209 }
210 case Intrinsic::arm_mve_vadc:
211 case Intrinsic::arm_mve_vadc_predicated: {
212 unsigned CarryOp =
213 (II.getIntrinsicID() == Intrinsic::arm_mve_vadc_predicated) ? 3 : 2;
214 assert(II.getArgOperand(CarryOp)->getType()->getScalarSizeInBits() == 32 &&
215 "Bad type for intrinsic!");
216
217 KnownBits CarryKnown(32);
218 if (IC.SimplifyDemandedBits(&II, CarryOp, APInt::getOneBitSet(32, 29),
219 CarryKnown)) {
220 return &II;
221 }
222 break;
223 }
224 case Intrinsic::arm_mve_vmldava: {
225 Instruction *I = cast<Instruction>(&II);
226 if (I->hasOneUse()) {
227 auto *User = cast<Instruction>(*I->user_begin());
228 Value *OpZ;
229 if (match(User, m_c_Add(m_Specific(I), m_Value(OpZ))) &&
230 match(I->getOperand(3), m_Zero())) {
231 Value *OpX = I->getOperand(4);
232 Value *OpY = I->getOperand(5);
233 Type *OpTy = OpX->getType();
234
235 IC.Builder.SetInsertPoint(User);
236 Value *V =
237 IC.Builder.CreateIntrinsic(Intrinsic::arm_mve_vmldava, {OpTy},
238 {I->getOperand(0), I->getOperand(1),
239 I->getOperand(2), OpZ, OpX, OpY});
240
241 IC.replaceInstUsesWith(*User, V);
242 return IC.eraseInstFromFunction(*User);
243 }
244 }
245 return None;
246 }
247 }
248 return None;
249 }
250
getIntImmCost(const APInt & Imm,Type * Ty,TTI::TargetCostKind CostKind)251 InstructionCost ARMTTIImpl::getIntImmCost(const APInt &Imm, Type *Ty,
252 TTI::TargetCostKind CostKind) {
253 assert(Ty->isIntegerTy());
254
255 unsigned Bits = Ty->getPrimitiveSizeInBits();
256 if (Bits == 0 || Imm.getActiveBits() >= 64)
257 return 4;
258
259 int64_t SImmVal = Imm.getSExtValue();
260 uint64_t ZImmVal = Imm.getZExtValue();
261 if (!ST->isThumb()) {
262 if ((SImmVal >= 0 && SImmVal < 65536) ||
263 (ARM_AM::getSOImmVal(ZImmVal) != -1) ||
264 (ARM_AM::getSOImmVal(~ZImmVal) != -1))
265 return 1;
266 return ST->hasV6T2Ops() ? 2 : 3;
267 }
268 if (ST->isThumb2()) {
269 if ((SImmVal >= 0 && SImmVal < 65536) ||
270 (ARM_AM::getT2SOImmVal(ZImmVal) != -1) ||
271 (ARM_AM::getT2SOImmVal(~ZImmVal) != -1))
272 return 1;
273 return ST->hasV6T2Ops() ? 2 : 3;
274 }
275 // Thumb1, any i8 imm cost 1.
276 if (Bits == 8 || (SImmVal >= 0 && SImmVal < 256))
277 return 1;
278 if ((~SImmVal < 256) || ARM_AM::isThumbImmShiftedVal(ZImmVal))
279 return 2;
280 // Load from constantpool.
281 return 3;
282 }
283
284 // Constants smaller than 256 fit in the immediate field of
285 // Thumb1 instructions so we return a zero cost and 1 otherwise.
getIntImmCodeSizeCost(unsigned Opcode,unsigned Idx,const APInt & Imm,Type * Ty)286 int ARMTTIImpl::getIntImmCodeSizeCost(unsigned Opcode, unsigned Idx,
287 const APInt &Imm, Type *Ty) {
288 if (Imm.isNonNegative() && Imm.getLimitedValue() < 256)
289 return 0;
290
291 return 1;
292 }
293
294 // Checks whether Inst is part of a min(max()) or max(min()) pattern
295 // that will match to an SSAT instruction
isSSATMinMaxPattern(Instruction * Inst,const APInt & Imm)296 static bool isSSATMinMaxPattern(Instruction *Inst, const APInt &Imm) {
297 Value *LHS, *RHS;
298 ConstantInt *C;
299 SelectPatternFlavor InstSPF = matchSelectPattern(Inst, LHS, RHS).Flavor;
300
301 if (InstSPF == SPF_SMAX &&
302 PatternMatch::match(RHS, PatternMatch::m_ConstantInt(C)) &&
303 C->getValue() == Imm && Imm.isNegative() && (-Imm).isPowerOf2()) {
304
305 auto isSSatMin = [&](Value *MinInst) {
306 if (isa<SelectInst>(MinInst)) {
307 Value *MinLHS, *MinRHS;
308 ConstantInt *MinC;
309 SelectPatternFlavor MinSPF =
310 matchSelectPattern(MinInst, MinLHS, MinRHS).Flavor;
311 if (MinSPF == SPF_SMIN &&
312 PatternMatch::match(MinRHS, PatternMatch::m_ConstantInt(MinC)) &&
313 MinC->getValue() == ((-Imm) - 1))
314 return true;
315 }
316 return false;
317 };
318
319 if (isSSatMin(Inst->getOperand(1)) ||
320 (Inst->hasNUses(2) && (isSSatMin(*Inst->user_begin()) ||
321 isSSatMin(*(++Inst->user_begin())))))
322 return true;
323 }
324 return false;
325 }
326
getIntImmCostInst(unsigned Opcode,unsigned Idx,const APInt & Imm,Type * Ty,TTI::TargetCostKind CostKind,Instruction * Inst)327 InstructionCost ARMTTIImpl::getIntImmCostInst(unsigned Opcode, unsigned Idx,
328 const APInt &Imm, Type *Ty,
329 TTI::TargetCostKind CostKind,
330 Instruction *Inst) {
331 // Division by a constant can be turned into multiplication, but only if we
332 // know it's constant. So it's not so much that the immediate is cheap (it's
333 // not), but that the alternative is worse.
334 // FIXME: this is probably unneeded with GlobalISel.
335 if ((Opcode == Instruction::SDiv || Opcode == Instruction::UDiv ||
336 Opcode == Instruction::SRem || Opcode == Instruction::URem) &&
337 Idx == 1)
338 return 0;
339
340 if (Opcode == Instruction::And) {
341 // UXTB/UXTH
342 if (Imm == 255 || Imm == 65535)
343 return 0;
344 // Conversion to BIC is free, and means we can use ~Imm instead.
345 return std::min(getIntImmCost(Imm, Ty, CostKind),
346 getIntImmCost(~Imm, Ty, CostKind));
347 }
348
349 if (Opcode == Instruction::Add)
350 // Conversion to SUB is free, and means we can use -Imm instead.
351 return std::min(getIntImmCost(Imm, Ty, CostKind),
352 getIntImmCost(-Imm, Ty, CostKind));
353
354 if (Opcode == Instruction::ICmp && Imm.isNegative() &&
355 Ty->getIntegerBitWidth() == 32) {
356 int64_t NegImm = -Imm.getSExtValue();
357 if (ST->isThumb2() && NegImm < 1<<12)
358 // icmp X, #-C -> cmn X, #C
359 return 0;
360 if (ST->isThumb() && NegImm < 1<<8)
361 // icmp X, #-C -> adds X, #C
362 return 0;
363 }
364
365 // xor a, -1 can always be folded to MVN
366 if (Opcode == Instruction::Xor && Imm.isAllOnesValue())
367 return 0;
368
369 // Ensures negative constant of min(max()) or max(min()) patterns that
370 // match to SSAT instructions don't get hoisted
371 if (Inst && ((ST->hasV6Ops() && !ST->isThumb()) || ST->isThumb2()) &&
372 Ty->getIntegerBitWidth() <= 32) {
373 if (isSSATMinMaxPattern(Inst, Imm) ||
374 (isa<ICmpInst>(Inst) && Inst->hasOneUse() &&
375 isSSATMinMaxPattern(cast<Instruction>(*Inst->user_begin()), Imm)))
376 return 0;
377 }
378
379 return getIntImmCost(Imm, Ty, CostKind);
380 }
381
getCFInstrCost(unsigned Opcode,TTI::TargetCostKind CostKind,const Instruction * I)382 InstructionCost ARMTTIImpl::getCFInstrCost(unsigned Opcode,
383 TTI::TargetCostKind CostKind,
384 const Instruction *I) {
385 if (CostKind == TTI::TCK_RecipThroughput &&
386 (ST->hasNEON() || ST->hasMVEIntegerOps())) {
387 // FIXME: The vectorizer is highly sensistive to the cost of these
388 // instructions, which suggests that it may be using the costs incorrectly.
389 // But, for now, just make them free to avoid performance regressions for
390 // vector targets.
391 return 0;
392 }
393 return BaseT::getCFInstrCost(Opcode, CostKind, I);
394 }
395
getCastInstrCost(unsigned Opcode,Type * Dst,Type * Src,TTI::CastContextHint CCH,TTI::TargetCostKind CostKind,const Instruction * I)396 InstructionCost ARMTTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst,
397 Type *Src,
398 TTI::CastContextHint CCH,
399 TTI::TargetCostKind CostKind,
400 const Instruction *I) {
401 int ISD = TLI->InstructionOpcodeToISD(Opcode);
402 assert(ISD && "Invalid opcode");
403
404 // TODO: Allow non-throughput costs that aren't binary.
405 auto AdjustCost = [&CostKind](InstructionCost Cost) -> InstructionCost {
406 if (CostKind != TTI::TCK_RecipThroughput)
407 return Cost == 0 ? 0 : 1;
408 return Cost;
409 };
410 auto IsLegalFPType = [this](EVT VT) {
411 EVT EltVT = VT.getScalarType();
412 return (EltVT == MVT::f32 && ST->hasVFP2Base()) ||
413 (EltVT == MVT::f64 && ST->hasFP64()) ||
414 (EltVT == MVT::f16 && ST->hasFullFP16());
415 };
416
417 EVT SrcTy = TLI->getValueType(DL, Src);
418 EVT DstTy = TLI->getValueType(DL, Dst);
419
420 if (!SrcTy.isSimple() || !DstTy.isSimple())
421 return AdjustCost(
422 BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I));
423
424 // Extending masked load/Truncating masked stores is expensive because we
425 // currently don't split them. This means that we'll likely end up
426 // loading/storing each element individually (hence the high cost).
427 if ((ST->hasMVEIntegerOps() &&
428 (Opcode == Instruction::Trunc || Opcode == Instruction::ZExt ||
429 Opcode == Instruction::SExt)) ||
430 (ST->hasMVEFloatOps() &&
431 (Opcode == Instruction::FPExt || Opcode == Instruction::FPTrunc) &&
432 IsLegalFPType(SrcTy) && IsLegalFPType(DstTy)))
433 if (CCH == TTI::CastContextHint::Masked && DstTy.getSizeInBits() > 128)
434 return 2 * DstTy.getVectorNumElements() *
435 ST->getMVEVectorCostFactor(CostKind);
436
437 // The extend of other kinds of load is free
438 if (CCH == TTI::CastContextHint::Normal ||
439 CCH == TTI::CastContextHint::Masked) {
440 static const TypeConversionCostTblEntry LoadConversionTbl[] = {
441 {ISD::SIGN_EXTEND, MVT::i32, MVT::i16, 0},
442 {ISD::ZERO_EXTEND, MVT::i32, MVT::i16, 0},
443 {ISD::SIGN_EXTEND, MVT::i32, MVT::i8, 0},
444 {ISD::ZERO_EXTEND, MVT::i32, MVT::i8, 0},
445 {ISD::SIGN_EXTEND, MVT::i16, MVT::i8, 0},
446 {ISD::ZERO_EXTEND, MVT::i16, MVT::i8, 0},
447 {ISD::SIGN_EXTEND, MVT::i64, MVT::i32, 1},
448 {ISD::ZERO_EXTEND, MVT::i64, MVT::i32, 1},
449 {ISD::SIGN_EXTEND, MVT::i64, MVT::i16, 1},
450 {ISD::ZERO_EXTEND, MVT::i64, MVT::i16, 1},
451 {ISD::SIGN_EXTEND, MVT::i64, MVT::i8, 1},
452 {ISD::ZERO_EXTEND, MVT::i64, MVT::i8, 1},
453 };
454 if (const auto *Entry = ConvertCostTableLookup(
455 LoadConversionTbl, ISD, DstTy.getSimpleVT(), SrcTy.getSimpleVT()))
456 return AdjustCost(Entry->Cost);
457
458 static const TypeConversionCostTblEntry MVELoadConversionTbl[] = {
459 {ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i16, 0},
460 {ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i16, 0},
461 {ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i8, 0},
462 {ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i8, 0},
463 {ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i8, 0},
464 {ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i8, 0},
465 // The following extend from a legal type to an illegal type, so need to
466 // split the load. This introduced an extra load operation, but the
467 // extend is still "free".
468 {ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 1},
469 {ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 1},
470 {ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 3},
471 {ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 3},
472 {ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 1},
473 {ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 1},
474 };
475 if (SrcTy.isVector() && ST->hasMVEIntegerOps()) {
476 if (const auto *Entry =
477 ConvertCostTableLookup(MVELoadConversionTbl, ISD,
478 DstTy.getSimpleVT(), SrcTy.getSimpleVT()))
479 return Entry->Cost * ST->getMVEVectorCostFactor(CostKind);
480 }
481
482 static const TypeConversionCostTblEntry MVEFLoadConversionTbl[] = {
483 // FPExtends are similar but also require the VCVT instructions.
484 {ISD::FP_EXTEND, MVT::v4f32, MVT::v4f16, 1},
485 {ISD::FP_EXTEND, MVT::v8f32, MVT::v8f16, 3},
486 };
487 if (SrcTy.isVector() && ST->hasMVEFloatOps()) {
488 if (const auto *Entry =
489 ConvertCostTableLookup(MVEFLoadConversionTbl, ISD,
490 DstTy.getSimpleVT(), SrcTy.getSimpleVT()))
491 return Entry->Cost * ST->getMVEVectorCostFactor(CostKind);
492 }
493
494 // The truncate of a store is free. This is the mirror of extends above.
495 static const TypeConversionCostTblEntry MVEStoreConversionTbl[] = {
496 {ISD::TRUNCATE, MVT::v4i32, MVT::v4i16, 0},
497 {ISD::TRUNCATE, MVT::v4i32, MVT::v4i8, 0},
498 {ISD::TRUNCATE, MVT::v8i16, MVT::v8i8, 0},
499 {ISD::TRUNCATE, MVT::v8i32, MVT::v8i16, 1},
500 {ISD::TRUNCATE, MVT::v8i32, MVT::v8i8, 1},
501 {ISD::TRUNCATE, MVT::v16i32, MVT::v16i8, 3},
502 {ISD::TRUNCATE, MVT::v16i16, MVT::v16i8, 1},
503 };
504 if (SrcTy.isVector() && ST->hasMVEIntegerOps()) {
505 if (const auto *Entry =
506 ConvertCostTableLookup(MVEStoreConversionTbl, ISD,
507 SrcTy.getSimpleVT(), DstTy.getSimpleVT()))
508 return Entry->Cost * ST->getMVEVectorCostFactor(CostKind);
509 }
510
511 static const TypeConversionCostTblEntry MVEFStoreConversionTbl[] = {
512 {ISD::FP_ROUND, MVT::v4f32, MVT::v4f16, 1},
513 {ISD::FP_ROUND, MVT::v8f32, MVT::v8f16, 3},
514 };
515 if (SrcTy.isVector() && ST->hasMVEFloatOps()) {
516 if (const auto *Entry =
517 ConvertCostTableLookup(MVEFStoreConversionTbl, ISD,
518 SrcTy.getSimpleVT(), DstTy.getSimpleVT()))
519 return Entry->Cost * ST->getMVEVectorCostFactor(CostKind);
520 }
521 }
522
523 // NEON vector operations that can extend their inputs.
524 if ((ISD == ISD::SIGN_EXTEND || ISD == ISD::ZERO_EXTEND) &&
525 I && I->hasOneUse() && ST->hasNEON() && SrcTy.isVector()) {
526 static const TypeConversionCostTblEntry NEONDoubleWidthTbl[] = {
527 // vaddl
528 { ISD::ADD, MVT::v4i32, MVT::v4i16, 0 },
529 { ISD::ADD, MVT::v8i16, MVT::v8i8, 0 },
530 // vsubl
531 { ISD::SUB, MVT::v4i32, MVT::v4i16, 0 },
532 { ISD::SUB, MVT::v8i16, MVT::v8i8, 0 },
533 // vmull
534 { ISD::MUL, MVT::v4i32, MVT::v4i16, 0 },
535 { ISD::MUL, MVT::v8i16, MVT::v8i8, 0 },
536 // vshll
537 { ISD::SHL, MVT::v4i32, MVT::v4i16, 0 },
538 { ISD::SHL, MVT::v8i16, MVT::v8i8, 0 },
539 };
540
541 auto *User = cast<Instruction>(*I->user_begin());
542 int UserISD = TLI->InstructionOpcodeToISD(User->getOpcode());
543 if (auto *Entry = ConvertCostTableLookup(NEONDoubleWidthTbl, UserISD,
544 DstTy.getSimpleVT(),
545 SrcTy.getSimpleVT())) {
546 return AdjustCost(Entry->Cost);
547 }
548 }
549
550 // Single to/from double precision conversions.
551 if (Src->isVectorTy() && ST->hasNEON() &&
552 ((ISD == ISD::FP_ROUND && SrcTy.getScalarType() == MVT::f64 &&
553 DstTy.getScalarType() == MVT::f32) ||
554 (ISD == ISD::FP_EXTEND && SrcTy.getScalarType() == MVT::f32 &&
555 DstTy.getScalarType() == MVT::f64))) {
556 static const CostTblEntry NEONFltDblTbl[] = {
557 // Vector fptrunc/fpext conversions.
558 {ISD::FP_ROUND, MVT::v2f64, 2},
559 {ISD::FP_EXTEND, MVT::v2f32, 2},
560 {ISD::FP_EXTEND, MVT::v4f32, 4}};
561
562 std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, Src);
563 if (const auto *Entry = CostTableLookup(NEONFltDblTbl, ISD, LT.second))
564 return AdjustCost(LT.first * Entry->Cost);
565 }
566
567 // Some arithmetic, load and store operations have specific instructions
568 // to cast up/down their types automatically at no extra cost.
569 // TODO: Get these tables to know at least what the related operations are.
570 static const TypeConversionCostTblEntry NEONVectorConversionTbl[] = {
571 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i16, 1 },
572 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i16, 1 },
573 { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i32, 1 },
574 { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i32, 1 },
575 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 0 },
576 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i32, 1 },
577
578 // The number of vmovl instructions for the extension.
579 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i8, 1 },
580 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i8, 1 },
581 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i8, 2 },
582 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i8, 2 },
583 { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i8, 3 },
584 { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i8, 3 },
585 { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i16, 2 },
586 { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i16, 2 },
587 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 3 },
588 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 },
589 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 3 },
590 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 3 },
591 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i8, 7 },
592 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i8, 7 },
593 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i16, 6 },
594 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i16, 6 },
595 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 6 },
596 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 6 },
597
598 // Operations that we legalize using splitting.
599 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i32, 6 },
600 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 3 },
601
602 // Vector float <-> i32 conversions.
603 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 },
604 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 },
605
606 { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i8, 3 },
607 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i8, 3 },
608 { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i16, 2 },
609 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i16, 2 },
610 { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i32, 1 },
611 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i32, 1 },
612 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i1, 3 },
613 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i1, 3 },
614 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i8, 3 },
615 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i8, 3 },
616 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 },
617 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 },
618 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i16, 4 },
619 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 4 },
620 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i32, 2 },
621 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 2 },
622 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i16, 8 },
623 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i16, 8 },
624 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i32, 4 },
625 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i32, 4 },
626
627 { ISD::FP_TO_SINT, MVT::v4i32, MVT::v4f32, 1 },
628 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f32, 1 },
629 { ISD::FP_TO_SINT, MVT::v4i8, MVT::v4f32, 3 },
630 { ISD::FP_TO_UINT, MVT::v4i8, MVT::v4f32, 3 },
631 { ISD::FP_TO_SINT, MVT::v4i16, MVT::v4f32, 2 },
632 { ISD::FP_TO_UINT, MVT::v4i16, MVT::v4f32, 2 },
633
634 // Vector double <-> i32 conversions.
635 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i32, 2 },
636 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i32, 2 },
637
638 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i8, 4 },
639 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i8, 4 },
640 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i16, 3 },
641 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i16, 3 },
642 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i32, 2 },
643 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i32, 2 },
644
645 { ISD::FP_TO_SINT, MVT::v2i32, MVT::v2f64, 2 },
646 { ISD::FP_TO_UINT, MVT::v2i32, MVT::v2f64, 2 },
647 { ISD::FP_TO_SINT, MVT::v8i16, MVT::v8f32, 4 },
648 { ISD::FP_TO_UINT, MVT::v8i16, MVT::v8f32, 4 },
649 { ISD::FP_TO_SINT, MVT::v16i16, MVT::v16f32, 8 },
650 { ISD::FP_TO_UINT, MVT::v16i16, MVT::v16f32, 8 }
651 };
652
653 if (SrcTy.isVector() && ST->hasNEON()) {
654 if (const auto *Entry = ConvertCostTableLookup(NEONVectorConversionTbl, ISD,
655 DstTy.getSimpleVT(),
656 SrcTy.getSimpleVT()))
657 return AdjustCost(Entry->Cost);
658 }
659
660 // Scalar float to integer conversions.
661 static const TypeConversionCostTblEntry NEONFloatConversionTbl[] = {
662 { ISD::FP_TO_SINT, MVT::i1, MVT::f32, 2 },
663 { ISD::FP_TO_UINT, MVT::i1, MVT::f32, 2 },
664 { ISD::FP_TO_SINT, MVT::i1, MVT::f64, 2 },
665 { ISD::FP_TO_UINT, MVT::i1, MVT::f64, 2 },
666 { ISD::FP_TO_SINT, MVT::i8, MVT::f32, 2 },
667 { ISD::FP_TO_UINT, MVT::i8, MVT::f32, 2 },
668 { ISD::FP_TO_SINT, MVT::i8, MVT::f64, 2 },
669 { ISD::FP_TO_UINT, MVT::i8, MVT::f64, 2 },
670 { ISD::FP_TO_SINT, MVT::i16, MVT::f32, 2 },
671 { ISD::FP_TO_UINT, MVT::i16, MVT::f32, 2 },
672 { ISD::FP_TO_SINT, MVT::i16, MVT::f64, 2 },
673 { ISD::FP_TO_UINT, MVT::i16, MVT::f64, 2 },
674 { ISD::FP_TO_SINT, MVT::i32, MVT::f32, 2 },
675 { ISD::FP_TO_UINT, MVT::i32, MVT::f32, 2 },
676 { ISD::FP_TO_SINT, MVT::i32, MVT::f64, 2 },
677 { ISD::FP_TO_UINT, MVT::i32, MVT::f64, 2 },
678 { ISD::FP_TO_SINT, MVT::i64, MVT::f32, 10 },
679 { ISD::FP_TO_UINT, MVT::i64, MVT::f32, 10 },
680 { ISD::FP_TO_SINT, MVT::i64, MVT::f64, 10 },
681 { ISD::FP_TO_UINT, MVT::i64, MVT::f64, 10 }
682 };
683 if (SrcTy.isFloatingPoint() && ST->hasNEON()) {
684 if (const auto *Entry = ConvertCostTableLookup(NEONFloatConversionTbl, ISD,
685 DstTy.getSimpleVT(),
686 SrcTy.getSimpleVT()))
687 return AdjustCost(Entry->Cost);
688 }
689
690 // Scalar integer to float conversions.
691 static const TypeConversionCostTblEntry NEONIntegerConversionTbl[] = {
692 { ISD::SINT_TO_FP, MVT::f32, MVT::i1, 2 },
693 { ISD::UINT_TO_FP, MVT::f32, MVT::i1, 2 },
694 { ISD::SINT_TO_FP, MVT::f64, MVT::i1, 2 },
695 { ISD::UINT_TO_FP, MVT::f64, MVT::i1, 2 },
696 { ISD::SINT_TO_FP, MVT::f32, MVT::i8, 2 },
697 { ISD::UINT_TO_FP, MVT::f32, MVT::i8, 2 },
698 { ISD::SINT_TO_FP, MVT::f64, MVT::i8, 2 },
699 { ISD::UINT_TO_FP, MVT::f64, MVT::i8, 2 },
700 { ISD::SINT_TO_FP, MVT::f32, MVT::i16, 2 },
701 { ISD::UINT_TO_FP, MVT::f32, MVT::i16, 2 },
702 { ISD::SINT_TO_FP, MVT::f64, MVT::i16, 2 },
703 { ISD::UINT_TO_FP, MVT::f64, MVT::i16, 2 },
704 { ISD::SINT_TO_FP, MVT::f32, MVT::i32, 2 },
705 { ISD::UINT_TO_FP, MVT::f32, MVT::i32, 2 },
706 { ISD::SINT_TO_FP, MVT::f64, MVT::i32, 2 },
707 { ISD::UINT_TO_FP, MVT::f64, MVT::i32, 2 },
708 { ISD::SINT_TO_FP, MVT::f32, MVT::i64, 10 },
709 { ISD::UINT_TO_FP, MVT::f32, MVT::i64, 10 },
710 { ISD::SINT_TO_FP, MVT::f64, MVT::i64, 10 },
711 { ISD::UINT_TO_FP, MVT::f64, MVT::i64, 10 }
712 };
713
714 if (SrcTy.isInteger() && ST->hasNEON()) {
715 if (const auto *Entry = ConvertCostTableLookup(NEONIntegerConversionTbl,
716 ISD, DstTy.getSimpleVT(),
717 SrcTy.getSimpleVT()))
718 return AdjustCost(Entry->Cost);
719 }
720
721 // MVE extend costs, taken from codegen tests. i8->i16 or i16->i32 is one
722 // instruction, i8->i32 is two. i64 zexts are an VAND with a constant, sext
723 // are linearised so take more.
724 static const TypeConversionCostTblEntry MVEVectorConversionTbl[] = {
725 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i8, 1 },
726 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i8, 1 },
727 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i8, 2 },
728 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i8, 2 },
729 { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i8, 10 },
730 { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i8, 2 },
731 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i16, 1 },
732 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i16, 1 },
733 { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i16, 10 },
734 { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i16, 2 },
735 { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i32, 8 },
736 { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i32, 2 },
737 };
738
739 if (SrcTy.isVector() && ST->hasMVEIntegerOps()) {
740 if (const auto *Entry = ConvertCostTableLookup(MVEVectorConversionTbl,
741 ISD, DstTy.getSimpleVT(),
742 SrcTy.getSimpleVT()))
743 return Entry->Cost * ST->getMVEVectorCostFactor(CostKind);
744 }
745
746 if (ISD == ISD::FP_ROUND || ISD == ISD::FP_EXTEND) {
747 // As general rule, fp converts that were not matched above are scalarized
748 // and cost 1 vcvt for each lane, so long as the instruction is available.
749 // If not it will become a series of function calls.
750 const InstructionCost CallCost =
751 getCallInstrCost(nullptr, Dst, {Src}, CostKind);
752 int Lanes = 1;
753 if (SrcTy.isFixedLengthVector())
754 Lanes = SrcTy.getVectorNumElements();
755
756 if (IsLegalFPType(SrcTy) && IsLegalFPType(DstTy))
757 return Lanes;
758 else
759 return Lanes * CallCost;
760 }
761
762 if (ISD == ISD::TRUNCATE && ST->hasMVEIntegerOps() &&
763 SrcTy.isFixedLengthVector()) {
764 // Treat a truncate with larger than legal source (128bits for MVE) as
765 // expensive, 2 instructions per lane.
766 if ((SrcTy.getScalarType() == MVT::i8 ||
767 SrcTy.getScalarType() == MVT::i16 ||
768 SrcTy.getScalarType() == MVT::i32) &&
769 SrcTy.getSizeInBits() > 128 &&
770 SrcTy.getSizeInBits() > DstTy.getSizeInBits())
771 return SrcTy.getVectorNumElements() * 2;
772 }
773
774 // Scalar integer conversion costs.
775 static const TypeConversionCostTblEntry ARMIntegerConversionTbl[] = {
776 // i16 -> i64 requires two dependent operations.
777 { ISD::SIGN_EXTEND, MVT::i64, MVT::i16, 2 },
778
779 // Truncates on i64 are assumed to be free.
780 { ISD::TRUNCATE, MVT::i32, MVT::i64, 0 },
781 { ISD::TRUNCATE, MVT::i16, MVT::i64, 0 },
782 { ISD::TRUNCATE, MVT::i8, MVT::i64, 0 },
783 { ISD::TRUNCATE, MVT::i1, MVT::i64, 0 }
784 };
785
786 if (SrcTy.isInteger()) {
787 if (const auto *Entry = ConvertCostTableLookup(ARMIntegerConversionTbl, ISD,
788 DstTy.getSimpleVT(),
789 SrcTy.getSimpleVT()))
790 return AdjustCost(Entry->Cost);
791 }
792
793 int BaseCost = ST->hasMVEIntegerOps() && Src->isVectorTy()
794 ? ST->getMVEVectorCostFactor(CostKind)
795 : 1;
796 return AdjustCost(
797 BaseCost * BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I));
798 }
799
getVectorInstrCost(unsigned Opcode,Type * ValTy,unsigned Index)800 InstructionCost ARMTTIImpl::getVectorInstrCost(unsigned Opcode, Type *ValTy,
801 unsigned Index) {
802 // Penalize inserting into an D-subregister. We end up with a three times
803 // lower estimated throughput on swift.
804 if (ST->hasSlowLoadDSubregister() && Opcode == Instruction::InsertElement &&
805 ValTy->isVectorTy() && ValTy->getScalarSizeInBits() <= 32)
806 return 3;
807
808 if (ST->hasNEON() && (Opcode == Instruction::InsertElement ||
809 Opcode == Instruction::ExtractElement)) {
810 // Cross-class copies are expensive on many microarchitectures,
811 // so assume they are expensive by default.
812 if (cast<VectorType>(ValTy)->getElementType()->isIntegerTy())
813 return 3;
814
815 // Even if it's not a cross class copy, this likely leads to mixing
816 // of NEON and VFP code and should be therefore penalized.
817 if (ValTy->isVectorTy() &&
818 ValTy->getScalarSizeInBits() <= 32)
819 return std::max<InstructionCost>(
820 BaseT::getVectorInstrCost(Opcode, ValTy, Index), 2U);
821 }
822
823 if (ST->hasMVEIntegerOps() && (Opcode == Instruction::InsertElement ||
824 Opcode == Instruction::ExtractElement)) {
825 // Integer cross-lane moves are more expensive than float, which can
826 // sometimes just be vmovs. Integer involve being passes to GPR registers,
827 // causing more of a delay.
828 std::pair<InstructionCost, MVT> LT =
829 getTLI()->getTypeLegalizationCost(DL, ValTy->getScalarType());
830 return LT.first * (ValTy->getScalarType()->isIntegerTy() ? 4 : 1);
831 }
832
833 return BaseT::getVectorInstrCost(Opcode, ValTy, Index);
834 }
835
getCmpSelInstrCost(unsigned Opcode,Type * ValTy,Type * CondTy,CmpInst::Predicate VecPred,TTI::TargetCostKind CostKind,const Instruction * I)836 InstructionCost ARMTTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
837 Type *CondTy,
838 CmpInst::Predicate VecPred,
839 TTI::TargetCostKind CostKind,
840 const Instruction *I) {
841 int ISD = TLI->InstructionOpcodeToISD(Opcode);
842
843 // Thumb scalar code size cost for select.
844 if (CostKind == TTI::TCK_CodeSize && ISD == ISD::SELECT &&
845 ST->isThumb() && !ValTy->isVectorTy()) {
846 // Assume expensive structs.
847 if (TLI->getValueType(DL, ValTy, true) == MVT::Other)
848 return TTI::TCC_Expensive;
849
850 // Select costs can vary because they:
851 // - may require one or more conditional mov (including an IT),
852 // - can't operate directly on immediates,
853 // - require live flags, which we can't copy around easily.
854 InstructionCost Cost = TLI->getTypeLegalizationCost(DL, ValTy).first;
855
856 // Possible IT instruction for Thumb2, or more for Thumb1.
857 ++Cost;
858
859 // i1 values may need rematerialising by using mov immediates and/or
860 // flag setting instructions.
861 if (ValTy->isIntegerTy(1))
862 ++Cost;
863
864 return Cost;
865 }
866
867 // If this is a vector min/max/abs, use the cost of that intrinsic directly
868 // instead. Hopefully when min/max intrinsics are more prevalent this code
869 // will not be needed.
870 const Instruction *Sel = I;
871 if ((Opcode == Instruction::ICmp || Opcode == Instruction::FCmp) && Sel &&
872 Sel->hasOneUse())
873 Sel = cast<Instruction>(Sel->user_back());
874 if (Sel && ValTy->isVectorTy() &&
875 (ValTy->isIntOrIntVectorTy() || ValTy->isFPOrFPVectorTy())) {
876 const Value *LHS, *RHS;
877 SelectPatternFlavor SPF = matchSelectPattern(Sel, LHS, RHS).Flavor;
878 unsigned IID = 0;
879 switch (SPF) {
880 case SPF_ABS:
881 IID = Intrinsic::abs;
882 break;
883 case SPF_SMIN:
884 IID = Intrinsic::smin;
885 break;
886 case SPF_SMAX:
887 IID = Intrinsic::smax;
888 break;
889 case SPF_UMIN:
890 IID = Intrinsic::umin;
891 break;
892 case SPF_UMAX:
893 IID = Intrinsic::umax;
894 break;
895 case SPF_FMINNUM:
896 IID = Intrinsic::minnum;
897 break;
898 case SPF_FMAXNUM:
899 IID = Intrinsic::maxnum;
900 break;
901 default:
902 break;
903 }
904 if (IID) {
905 // The ICmp is free, the select gets the cost of the min/max/etc
906 if (Sel != I)
907 return 0;
908 IntrinsicCostAttributes CostAttrs(IID, ValTy, {ValTy, ValTy});
909 return getIntrinsicInstrCost(CostAttrs, CostKind);
910 }
911 }
912
913 // On NEON a vector select gets lowered to vbsl.
914 if (ST->hasNEON() && ValTy->isVectorTy() && ISD == ISD::SELECT && CondTy) {
915 // Lowering of some vector selects is currently far from perfect.
916 static const TypeConversionCostTblEntry NEONVectorSelectTbl[] = {
917 { ISD::SELECT, MVT::v4i1, MVT::v4i64, 4*4 + 1*2 + 1 },
918 { ISD::SELECT, MVT::v8i1, MVT::v8i64, 50 },
919 { ISD::SELECT, MVT::v16i1, MVT::v16i64, 100 }
920 };
921
922 EVT SelCondTy = TLI->getValueType(DL, CondTy);
923 EVT SelValTy = TLI->getValueType(DL, ValTy);
924 if (SelCondTy.isSimple() && SelValTy.isSimple()) {
925 if (const auto *Entry = ConvertCostTableLookup(NEONVectorSelectTbl, ISD,
926 SelCondTy.getSimpleVT(),
927 SelValTy.getSimpleVT()))
928 return Entry->Cost;
929 }
930
931 std::pair<InstructionCost, MVT> LT =
932 TLI->getTypeLegalizationCost(DL, ValTy);
933 return LT.first;
934 }
935
936 if (ST->hasMVEIntegerOps() && ValTy->isVectorTy() &&
937 (Opcode == Instruction::ICmp || Opcode == Instruction::FCmp) &&
938 cast<FixedVectorType>(ValTy)->getNumElements() > 1) {
939 FixedVectorType *VecValTy = cast<FixedVectorType>(ValTy);
940 FixedVectorType *VecCondTy = dyn_cast_or_null<FixedVectorType>(CondTy);
941 if (!VecCondTy)
942 VecCondTy = cast<FixedVectorType>(CmpInst::makeCmpResultType(VecValTy));
943
944 // If we don't have mve.fp any fp operations will need to be scalarized.
945 if (Opcode == Instruction::FCmp && !ST->hasMVEFloatOps()) {
946 // One scalaization insert, one scalarization extract and the cost of the
947 // fcmps.
948 return BaseT::getScalarizationOverhead(VecValTy, false, true) +
949 BaseT::getScalarizationOverhead(VecCondTy, true, false) +
950 VecValTy->getNumElements() *
951 getCmpSelInstrCost(Opcode, ValTy->getScalarType(),
952 VecCondTy->getScalarType(), VecPred, CostKind,
953 I);
954 }
955
956 std::pair<InstructionCost, MVT> LT =
957 TLI->getTypeLegalizationCost(DL, ValTy);
958 int BaseCost = ST->getMVEVectorCostFactor(CostKind);
959 // There are two types - the input that specifies the type of the compare
960 // and the output vXi1 type. Because we don't know how the output will be
961 // split, we may need an expensive shuffle to get two in sync. This has the
962 // effect of making larger than legal compares (v8i32 for example)
963 // expensive.
964 if (LT.second.getVectorNumElements() > 2) {
965 if (LT.first > 1)
966 return LT.first * BaseCost +
967 BaseT::getScalarizationOverhead(VecCondTy, true, false);
968 return BaseCost;
969 }
970 }
971
972 // Default to cheap (throughput/size of 1 instruction) but adjust throughput
973 // for "multiple beats" potentially needed by MVE instructions.
974 int BaseCost = 1;
975 if (ST->hasMVEIntegerOps() && ValTy->isVectorTy())
976 BaseCost = ST->getMVEVectorCostFactor(CostKind);
977
978 return BaseCost *
979 BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind, I);
980 }
981
getAddressComputationCost(Type * Ty,ScalarEvolution * SE,const SCEV * Ptr)982 InstructionCost ARMTTIImpl::getAddressComputationCost(Type *Ty,
983 ScalarEvolution *SE,
984 const SCEV *Ptr) {
985 // Address computations in vectorized code with non-consecutive addresses will
986 // likely result in more instructions compared to scalar code where the
987 // computation can more often be merged into the index mode. The resulting
988 // extra micro-ops can significantly decrease throughput.
989 unsigned NumVectorInstToHideOverhead = 10;
990 int MaxMergeDistance = 64;
991
992 if (ST->hasNEON()) {
993 if (Ty->isVectorTy() && SE &&
994 !BaseT::isConstantStridedAccessLessThan(SE, Ptr, MaxMergeDistance + 1))
995 return NumVectorInstToHideOverhead;
996
997 // In many cases the address computation is not merged into the instruction
998 // addressing mode.
999 return 1;
1000 }
1001 return BaseT::getAddressComputationCost(Ty, SE, Ptr);
1002 }
1003
isProfitableLSRChainElement(Instruction * I)1004 bool ARMTTIImpl::isProfitableLSRChainElement(Instruction *I) {
1005 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
1006 // If a VCTP is part of a chain, it's already profitable and shouldn't be
1007 // optimized, else LSR may block tail-predication.
1008 switch (II->getIntrinsicID()) {
1009 case Intrinsic::arm_mve_vctp8:
1010 case Intrinsic::arm_mve_vctp16:
1011 case Intrinsic::arm_mve_vctp32:
1012 case Intrinsic::arm_mve_vctp64:
1013 return true;
1014 default:
1015 break;
1016 }
1017 }
1018 return false;
1019 }
1020
isLegalMaskedLoad(Type * DataTy,Align Alignment)1021 bool ARMTTIImpl::isLegalMaskedLoad(Type *DataTy, Align Alignment) {
1022 if (!EnableMaskedLoadStores || !ST->hasMVEIntegerOps())
1023 return false;
1024
1025 if (auto *VecTy = dyn_cast<FixedVectorType>(DataTy)) {
1026 // Don't support v2i1 yet.
1027 if (VecTy->getNumElements() == 2)
1028 return false;
1029
1030 // We don't support extending fp types.
1031 unsigned VecWidth = DataTy->getPrimitiveSizeInBits();
1032 if (VecWidth != 128 && VecTy->getElementType()->isFloatingPointTy())
1033 return false;
1034 }
1035
1036 unsigned EltWidth = DataTy->getScalarSizeInBits();
1037 return (EltWidth == 32 && Alignment >= 4) ||
1038 (EltWidth == 16 && Alignment >= 2) || (EltWidth == 8);
1039 }
1040
isLegalMaskedGather(Type * Ty,Align Alignment)1041 bool ARMTTIImpl::isLegalMaskedGather(Type *Ty, Align Alignment) {
1042 if (!EnableMaskedGatherScatters || !ST->hasMVEIntegerOps())
1043 return false;
1044
1045 // This method is called in 2 places:
1046 // - from the vectorizer with a scalar type, in which case we need to get
1047 // this as good as we can with the limited info we have (and rely on the cost
1048 // model for the rest).
1049 // - from the masked intrinsic lowering pass with the actual vector type.
1050 // For MVE, we have a custom lowering pass that will already have custom
1051 // legalised any gathers that we can to MVE intrinsics, and want to expand all
1052 // the rest. The pass runs before the masked intrinsic lowering pass, so if we
1053 // are here, we know we want to expand.
1054 if (isa<VectorType>(Ty))
1055 return false;
1056
1057 unsigned EltWidth = Ty->getScalarSizeInBits();
1058 return ((EltWidth == 32 && Alignment >= 4) ||
1059 (EltWidth == 16 && Alignment >= 2) || EltWidth == 8);
1060 }
1061
1062 /// Given a memcpy/memset/memmove instruction, return the number of memory
1063 /// operations performed, via querying findOptimalMemOpLowering. Returns -1 if a
1064 /// call is used.
getNumMemOps(const IntrinsicInst * I) const1065 int ARMTTIImpl::getNumMemOps(const IntrinsicInst *I) const {
1066 MemOp MOp;
1067 unsigned DstAddrSpace = ~0u;
1068 unsigned SrcAddrSpace = ~0u;
1069 const Function *F = I->getParent()->getParent();
1070
1071 if (const auto *MC = dyn_cast<MemTransferInst>(I)) {
1072 ConstantInt *C = dyn_cast<ConstantInt>(MC->getLength());
1073 // If 'size' is not a constant, a library call will be generated.
1074 if (!C)
1075 return -1;
1076
1077 const unsigned Size = C->getValue().getZExtValue();
1078 const Align DstAlign = *MC->getDestAlign();
1079 const Align SrcAlign = *MC->getSourceAlign();
1080
1081 MOp = MemOp::Copy(Size, /*DstAlignCanChange*/ false, DstAlign, SrcAlign,
1082 /*IsVolatile*/ false);
1083 DstAddrSpace = MC->getDestAddressSpace();
1084 SrcAddrSpace = MC->getSourceAddressSpace();
1085 }
1086 else if (const auto *MS = dyn_cast<MemSetInst>(I)) {
1087 ConstantInt *C = dyn_cast<ConstantInt>(MS->getLength());
1088 // If 'size' is not a constant, a library call will be generated.
1089 if (!C)
1090 return -1;
1091
1092 const unsigned Size = C->getValue().getZExtValue();
1093 const Align DstAlign = *MS->getDestAlign();
1094
1095 MOp = MemOp::Set(Size, /*DstAlignCanChange*/ false, DstAlign,
1096 /*IsZeroMemset*/ false, /*IsVolatile*/ false);
1097 DstAddrSpace = MS->getDestAddressSpace();
1098 }
1099 else
1100 llvm_unreachable("Expected a memcpy/move or memset!");
1101
1102 unsigned Limit, Factor = 2;
1103 switch(I->getIntrinsicID()) {
1104 case Intrinsic::memcpy:
1105 Limit = TLI->getMaxStoresPerMemcpy(F->hasMinSize());
1106 break;
1107 case Intrinsic::memmove:
1108 Limit = TLI->getMaxStoresPerMemmove(F->hasMinSize());
1109 break;
1110 case Intrinsic::memset:
1111 Limit = TLI->getMaxStoresPerMemset(F->hasMinSize());
1112 Factor = 1;
1113 break;
1114 default:
1115 llvm_unreachable("Expected a memcpy/move or memset!");
1116 }
1117
1118 // MemOps will be poplulated with a list of data types that needs to be
1119 // loaded and stored. That's why we multiply the number of elements by 2 to
1120 // get the cost for this memcpy.
1121 std::vector<EVT> MemOps;
1122 if (getTLI()->findOptimalMemOpLowering(
1123 MemOps, Limit, MOp, DstAddrSpace,
1124 SrcAddrSpace, F->getAttributes()))
1125 return MemOps.size() * Factor;
1126
1127 // If we can't find an optimal memop lowering, return the default cost
1128 return -1;
1129 }
1130
getMemcpyCost(const Instruction * I)1131 InstructionCost ARMTTIImpl::getMemcpyCost(const Instruction *I) {
1132 int NumOps = getNumMemOps(cast<IntrinsicInst>(I));
1133
1134 // To model the cost of a library call, we assume 1 for the call, and
1135 // 3 for the argument setup.
1136 if (NumOps == -1)
1137 return 4;
1138 return NumOps;
1139 }
1140
getShuffleCost(TTI::ShuffleKind Kind,VectorType * Tp,ArrayRef<int> Mask,int Index,VectorType * SubTp)1141 InstructionCost ARMTTIImpl::getShuffleCost(TTI::ShuffleKind Kind,
1142 VectorType *Tp, ArrayRef<int> Mask,
1143 int Index, VectorType *SubTp) {
1144 Kind = improveShuffleKindFromMask(Kind, Mask);
1145 if (ST->hasNEON()) {
1146 if (Kind == TTI::SK_Broadcast) {
1147 static const CostTblEntry NEONDupTbl[] = {
1148 // VDUP handles these cases.
1149 {ISD::VECTOR_SHUFFLE, MVT::v2i32, 1},
1150 {ISD::VECTOR_SHUFFLE, MVT::v2f32, 1},
1151 {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1},
1152 {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1},
1153 {ISD::VECTOR_SHUFFLE, MVT::v4i16, 1},
1154 {ISD::VECTOR_SHUFFLE, MVT::v8i8, 1},
1155
1156 {ISD::VECTOR_SHUFFLE, MVT::v4i32, 1},
1157 {ISD::VECTOR_SHUFFLE, MVT::v4f32, 1},
1158 {ISD::VECTOR_SHUFFLE, MVT::v8i16, 1},
1159 {ISD::VECTOR_SHUFFLE, MVT::v16i8, 1}};
1160
1161 std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp);
1162 if (const auto *Entry =
1163 CostTableLookup(NEONDupTbl, ISD::VECTOR_SHUFFLE, LT.second))
1164 return LT.first * Entry->Cost;
1165 }
1166 if (Kind == TTI::SK_Reverse) {
1167 static const CostTblEntry NEONShuffleTbl[] = {
1168 // Reverse shuffle cost one instruction if we are shuffling within a
1169 // double word (vrev) or two if we shuffle a quad word (vrev, vext).
1170 {ISD::VECTOR_SHUFFLE, MVT::v2i32, 1},
1171 {ISD::VECTOR_SHUFFLE, MVT::v2f32, 1},
1172 {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1},
1173 {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1},
1174 {ISD::VECTOR_SHUFFLE, MVT::v4i16, 1},
1175 {ISD::VECTOR_SHUFFLE, MVT::v8i8, 1},
1176
1177 {ISD::VECTOR_SHUFFLE, MVT::v4i32, 2},
1178 {ISD::VECTOR_SHUFFLE, MVT::v4f32, 2},
1179 {ISD::VECTOR_SHUFFLE, MVT::v8i16, 2},
1180 {ISD::VECTOR_SHUFFLE, MVT::v16i8, 2}};
1181
1182 std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp);
1183 if (const auto *Entry =
1184 CostTableLookup(NEONShuffleTbl, ISD::VECTOR_SHUFFLE, LT.second))
1185 return LT.first * Entry->Cost;
1186 }
1187 if (Kind == TTI::SK_Select) {
1188 static const CostTblEntry NEONSelShuffleTbl[] = {
1189 // Select shuffle cost table for ARM. Cost is the number of
1190 // instructions
1191 // required to create the shuffled vector.
1192
1193 {ISD::VECTOR_SHUFFLE, MVT::v2f32, 1},
1194 {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1},
1195 {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1},
1196 {ISD::VECTOR_SHUFFLE, MVT::v2i32, 1},
1197
1198 {ISD::VECTOR_SHUFFLE, MVT::v4i32, 2},
1199 {ISD::VECTOR_SHUFFLE, MVT::v4f32, 2},
1200 {ISD::VECTOR_SHUFFLE, MVT::v4i16, 2},
1201
1202 {ISD::VECTOR_SHUFFLE, MVT::v8i16, 16},
1203
1204 {ISD::VECTOR_SHUFFLE, MVT::v16i8, 32}};
1205
1206 std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp);
1207 if (const auto *Entry = CostTableLookup(NEONSelShuffleTbl,
1208 ISD::VECTOR_SHUFFLE, LT.second))
1209 return LT.first * Entry->Cost;
1210 }
1211 }
1212 if (ST->hasMVEIntegerOps()) {
1213 if (Kind == TTI::SK_Broadcast) {
1214 static const CostTblEntry MVEDupTbl[] = {
1215 // VDUP handles these cases.
1216 {ISD::VECTOR_SHUFFLE, MVT::v4i32, 1},
1217 {ISD::VECTOR_SHUFFLE, MVT::v8i16, 1},
1218 {ISD::VECTOR_SHUFFLE, MVT::v16i8, 1},
1219 {ISD::VECTOR_SHUFFLE, MVT::v4f32, 1},
1220 {ISD::VECTOR_SHUFFLE, MVT::v8f16, 1}};
1221
1222 std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp);
1223 if (const auto *Entry = CostTableLookup(MVEDupTbl, ISD::VECTOR_SHUFFLE,
1224 LT.second))
1225 return LT.first * Entry->Cost *
1226 ST->getMVEVectorCostFactor(TTI::TCK_RecipThroughput);
1227 }
1228
1229 if (!Mask.empty()) {
1230 std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp);
1231 if (Mask.size() <= LT.second.getVectorNumElements() &&
1232 (isVREVMask(Mask, LT.second, 16) || isVREVMask(Mask, LT.second, 32) ||
1233 isVREVMask(Mask, LT.second, 64)))
1234 return ST->getMVEVectorCostFactor(TTI::TCK_RecipThroughput) * LT.first;
1235 }
1236 }
1237
1238 int BaseCost = ST->hasMVEIntegerOps() && Tp->isVectorTy()
1239 ? ST->getMVEVectorCostFactor(TTI::TCK_RecipThroughput)
1240 : 1;
1241 return BaseCost * BaseT::getShuffleCost(Kind, Tp, Mask, Index, SubTp);
1242 }
1243
getArithmeticInstrCost(unsigned Opcode,Type * Ty,TTI::TargetCostKind CostKind,TTI::OperandValueKind Op1Info,TTI::OperandValueKind Op2Info,TTI::OperandValueProperties Opd1PropInfo,TTI::OperandValueProperties Opd2PropInfo,ArrayRef<const Value * > Args,const Instruction * CxtI)1244 InstructionCost ARMTTIImpl::getArithmeticInstrCost(
1245 unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind,
1246 TTI::OperandValueKind Op1Info, TTI::OperandValueKind Op2Info,
1247 TTI::OperandValueProperties Opd1PropInfo,
1248 TTI::OperandValueProperties Opd2PropInfo, ArrayRef<const Value *> Args,
1249 const Instruction *CxtI) {
1250 int ISDOpcode = TLI->InstructionOpcodeToISD(Opcode);
1251 if (ST->isThumb() && CostKind == TTI::TCK_CodeSize && Ty->isIntegerTy(1)) {
1252 // Make operations on i1 relatively expensive as this often involves
1253 // combining predicates. AND and XOR should be easier to handle with IT
1254 // blocks.
1255 switch (ISDOpcode) {
1256 default:
1257 break;
1258 case ISD::AND:
1259 case ISD::XOR:
1260 return 2;
1261 case ISD::OR:
1262 return 3;
1263 }
1264 }
1265
1266 std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
1267
1268 if (ST->hasNEON()) {
1269 const unsigned FunctionCallDivCost = 20;
1270 const unsigned ReciprocalDivCost = 10;
1271 static const CostTblEntry CostTbl[] = {
1272 // Division.
1273 // These costs are somewhat random. Choose a cost of 20 to indicate that
1274 // vectorizing devision (added function call) is going to be very expensive.
1275 // Double registers types.
1276 { ISD::SDIV, MVT::v1i64, 1 * FunctionCallDivCost},
1277 { ISD::UDIV, MVT::v1i64, 1 * FunctionCallDivCost},
1278 { ISD::SREM, MVT::v1i64, 1 * FunctionCallDivCost},
1279 { ISD::UREM, MVT::v1i64, 1 * FunctionCallDivCost},
1280 { ISD::SDIV, MVT::v2i32, 2 * FunctionCallDivCost},
1281 { ISD::UDIV, MVT::v2i32, 2 * FunctionCallDivCost},
1282 { ISD::SREM, MVT::v2i32, 2 * FunctionCallDivCost},
1283 { ISD::UREM, MVT::v2i32, 2 * FunctionCallDivCost},
1284 { ISD::SDIV, MVT::v4i16, ReciprocalDivCost},
1285 { ISD::UDIV, MVT::v4i16, ReciprocalDivCost},
1286 { ISD::SREM, MVT::v4i16, 4 * FunctionCallDivCost},
1287 { ISD::UREM, MVT::v4i16, 4 * FunctionCallDivCost},
1288 { ISD::SDIV, MVT::v8i8, ReciprocalDivCost},
1289 { ISD::UDIV, MVT::v8i8, ReciprocalDivCost},
1290 { ISD::SREM, MVT::v8i8, 8 * FunctionCallDivCost},
1291 { ISD::UREM, MVT::v8i8, 8 * FunctionCallDivCost},
1292 // Quad register types.
1293 { ISD::SDIV, MVT::v2i64, 2 * FunctionCallDivCost},
1294 { ISD::UDIV, MVT::v2i64, 2 * FunctionCallDivCost},
1295 { ISD::SREM, MVT::v2i64, 2 * FunctionCallDivCost},
1296 { ISD::UREM, MVT::v2i64, 2 * FunctionCallDivCost},
1297 { ISD::SDIV, MVT::v4i32, 4 * FunctionCallDivCost},
1298 { ISD::UDIV, MVT::v4i32, 4 * FunctionCallDivCost},
1299 { ISD::SREM, MVT::v4i32, 4 * FunctionCallDivCost},
1300 { ISD::UREM, MVT::v4i32, 4 * FunctionCallDivCost},
1301 { ISD::SDIV, MVT::v8i16, 8 * FunctionCallDivCost},
1302 { ISD::UDIV, MVT::v8i16, 8 * FunctionCallDivCost},
1303 { ISD::SREM, MVT::v8i16, 8 * FunctionCallDivCost},
1304 { ISD::UREM, MVT::v8i16, 8 * FunctionCallDivCost},
1305 { ISD::SDIV, MVT::v16i8, 16 * FunctionCallDivCost},
1306 { ISD::UDIV, MVT::v16i8, 16 * FunctionCallDivCost},
1307 { ISD::SREM, MVT::v16i8, 16 * FunctionCallDivCost},
1308 { ISD::UREM, MVT::v16i8, 16 * FunctionCallDivCost},
1309 // Multiplication.
1310 };
1311
1312 if (const auto *Entry = CostTableLookup(CostTbl, ISDOpcode, LT.second))
1313 return LT.first * Entry->Cost;
1314
1315 InstructionCost Cost = BaseT::getArithmeticInstrCost(
1316 Opcode, Ty, CostKind, Op1Info, Op2Info, Opd1PropInfo, Opd2PropInfo);
1317
1318 // This is somewhat of a hack. The problem that we are facing is that SROA
1319 // creates a sequence of shift, and, or instructions to construct values.
1320 // These sequences are recognized by the ISel and have zero-cost. Not so for
1321 // the vectorized code. Because we have support for v2i64 but not i64 those
1322 // sequences look particularly beneficial to vectorize.
1323 // To work around this we increase the cost of v2i64 operations to make them
1324 // seem less beneficial.
1325 if (LT.second == MVT::v2i64 &&
1326 Op2Info == TargetTransformInfo::OK_UniformConstantValue)
1327 Cost += 4;
1328
1329 return Cost;
1330 }
1331
1332 // If this operation is a shift on arm/thumb2, it might well be folded into
1333 // the following instruction, hence having a cost of 0.
1334 auto LooksLikeAFreeShift = [&]() {
1335 if (ST->isThumb1Only() || Ty->isVectorTy())
1336 return false;
1337
1338 if (!CxtI || !CxtI->hasOneUse() || !CxtI->isShift())
1339 return false;
1340 if (Op2Info != TargetTransformInfo::OK_UniformConstantValue)
1341 return false;
1342
1343 // Folded into a ADC/ADD/AND/BIC/CMP/EOR/MVN/ORR/ORN/RSB/SBC/SUB
1344 switch (cast<Instruction>(CxtI->user_back())->getOpcode()) {
1345 case Instruction::Add:
1346 case Instruction::Sub:
1347 case Instruction::And:
1348 case Instruction::Xor:
1349 case Instruction::Or:
1350 case Instruction::ICmp:
1351 return true;
1352 default:
1353 return false;
1354 }
1355 };
1356 if (LooksLikeAFreeShift())
1357 return 0;
1358
1359 // Default to cheap (throughput/size of 1 instruction) but adjust throughput
1360 // for "multiple beats" potentially needed by MVE instructions.
1361 int BaseCost = 1;
1362 if (ST->hasMVEIntegerOps() && Ty->isVectorTy())
1363 BaseCost = ST->getMVEVectorCostFactor(CostKind);
1364
1365 // The rest of this mostly follows what is done in BaseT::getArithmeticInstrCost,
1366 // without treating floats as more expensive that scalars or increasing the
1367 // costs for custom operations. The results is also multiplied by the
1368 // MVEVectorCostFactor where appropriate.
1369 if (TLI->isOperationLegalOrCustomOrPromote(ISDOpcode, LT.second))
1370 return LT.first * BaseCost;
1371
1372 // Else this is expand, assume that we need to scalarize this op.
1373 if (auto *VTy = dyn_cast<FixedVectorType>(Ty)) {
1374 unsigned Num = VTy->getNumElements();
1375 InstructionCost Cost =
1376 getArithmeticInstrCost(Opcode, Ty->getScalarType(), CostKind);
1377 // Return the cost of multiple scalar invocation plus the cost of
1378 // inserting and extracting the values.
1379 SmallVector<Type *> Tys(Args.size(), Ty);
1380 return BaseT::getScalarizationOverhead(VTy, Args, Tys) + Num * Cost;
1381 }
1382
1383 return BaseCost;
1384 }
1385
getMemoryOpCost(unsigned Opcode,Type * Src,MaybeAlign Alignment,unsigned AddressSpace,TTI::TargetCostKind CostKind,const Instruction * I)1386 InstructionCost ARMTTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src,
1387 MaybeAlign Alignment,
1388 unsigned AddressSpace,
1389 TTI::TargetCostKind CostKind,
1390 const Instruction *I) {
1391 // TODO: Handle other cost kinds.
1392 if (CostKind != TTI::TCK_RecipThroughput)
1393 return 1;
1394
1395 // Type legalization can't handle structs
1396 if (TLI->getValueType(DL, Src, true) == MVT::Other)
1397 return BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
1398 CostKind);
1399
1400 if (ST->hasNEON() && Src->isVectorTy() &&
1401 (Alignment && *Alignment != Align(16)) &&
1402 cast<VectorType>(Src)->getElementType()->isDoubleTy()) {
1403 // Unaligned loads/stores are extremely inefficient.
1404 // We need 4 uops for vst.1/vld.1 vs 1uop for vldr/vstr.
1405 std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, Src);
1406 return LT.first * 4;
1407 }
1408
1409 // MVE can optimize a fpext(load(4xhalf)) using an extending integer load.
1410 // Same for stores.
1411 if (ST->hasMVEFloatOps() && isa<FixedVectorType>(Src) && I &&
1412 ((Opcode == Instruction::Load && I->hasOneUse() &&
1413 isa<FPExtInst>(*I->user_begin())) ||
1414 (Opcode == Instruction::Store && isa<FPTruncInst>(I->getOperand(0))))) {
1415 FixedVectorType *SrcVTy = cast<FixedVectorType>(Src);
1416 Type *DstTy =
1417 Opcode == Instruction::Load
1418 ? (*I->user_begin())->getType()
1419 : cast<Instruction>(I->getOperand(0))->getOperand(0)->getType();
1420 if (SrcVTy->getNumElements() == 4 && SrcVTy->getScalarType()->isHalfTy() &&
1421 DstTy->getScalarType()->isFloatTy())
1422 return ST->getMVEVectorCostFactor(CostKind);
1423 }
1424
1425 int BaseCost = ST->hasMVEIntegerOps() && Src->isVectorTy()
1426 ? ST->getMVEVectorCostFactor(CostKind)
1427 : 1;
1428 return BaseCost * BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
1429 CostKind, I);
1430 }
1431
1432 InstructionCost
getMaskedMemoryOpCost(unsigned Opcode,Type * Src,Align Alignment,unsigned AddressSpace,TTI::TargetCostKind CostKind)1433 ARMTTIImpl::getMaskedMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment,
1434 unsigned AddressSpace,
1435 TTI::TargetCostKind CostKind) {
1436 if (ST->hasMVEIntegerOps()) {
1437 if (Opcode == Instruction::Load && isLegalMaskedLoad(Src, Alignment))
1438 return ST->getMVEVectorCostFactor(CostKind);
1439 if (Opcode == Instruction::Store && isLegalMaskedStore(Src, Alignment))
1440 return ST->getMVEVectorCostFactor(CostKind);
1441 }
1442 if (!isa<FixedVectorType>(Src))
1443 return BaseT::getMaskedMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
1444 CostKind);
1445 // Scalar cost, which is currently very high due to the efficiency of the
1446 // generated code.
1447 return cast<FixedVectorType>(Src)->getNumElements() * 8;
1448 }
1449
getInterleavedMemoryOpCost(unsigned Opcode,Type * VecTy,unsigned Factor,ArrayRef<unsigned> Indices,Align Alignment,unsigned AddressSpace,TTI::TargetCostKind CostKind,bool UseMaskForCond,bool UseMaskForGaps)1450 InstructionCost ARMTTIImpl::getInterleavedMemoryOpCost(
1451 unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
1452 Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind,
1453 bool UseMaskForCond, bool UseMaskForGaps) {
1454 assert(Factor >= 2 && "Invalid interleave factor");
1455 assert(isa<VectorType>(VecTy) && "Expect a vector type");
1456
1457 // vldN/vstN doesn't support vector types of i64/f64 element.
1458 bool EltIs64Bits = DL.getTypeSizeInBits(VecTy->getScalarType()) == 64;
1459
1460 if (Factor <= TLI->getMaxSupportedInterleaveFactor() && !EltIs64Bits &&
1461 !UseMaskForCond && !UseMaskForGaps) {
1462 unsigned NumElts = cast<FixedVectorType>(VecTy)->getNumElements();
1463 auto *SubVecTy =
1464 FixedVectorType::get(VecTy->getScalarType(), NumElts / Factor);
1465
1466 // vldN/vstN only support legal vector types of size 64 or 128 in bits.
1467 // Accesses having vector types that are a multiple of 128 bits can be
1468 // matched to more than one vldN/vstN instruction.
1469 int BaseCost =
1470 ST->hasMVEIntegerOps() ? ST->getMVEVectorCostFactor(CostKind) : 1;
1471 if (NumElts % Factor == 0 &&
1472 TLI->isLegalInterleavedAccessType(Factor, SubVecTy, Alignment, DL))
1473 return Factor * BaseCost * TLI->getNumInterleavedAccesses(SubVecTy, DL);
1474
1475 // Some smaller than legal interleaved patterns are cheap as we can make
1476 // use of the vmovn or vrev patterns to interleave a standard load. This is
1477 // true for v4i8, v8i8 and v4i16 at least (but not for v4f16 as it is
1478 // promoted differently). The cost of 2 here is then a load and vrev or
1479 // vmovn.
1480 if (ST->hasMVEIntegerOps() && Factor == 2 && NumElts / Factor > 2 &&
1481 VecTy->isIntOrIntVectorTy() &&
1482 DL.getTypeSizeInBits(SubVecTy).getFixedSize() <= 64)
1483 return 2 * BaseCost;
1484 }
1485
1486 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
1487 Alignment, AddressSpace, CostKind,
1488 UseMaskForCond, UseMaskForGaps);
1489 }
1490
getGatherScatterOpCost(unsigned Opcode,Type * DataTy,const Value * Ptr,bool VariableMask,Align Alignment,TTI::TargetCostKind CostKind,const Instruction * I)1491 InstructionCost ARMTTIImpl::getGatherScatterOpCost(
1492 unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask,
1493 Align Alignment, TTI::TargetCostKind CostKind, const Instruction *I) {
1494 using namespace PatternMatch;
1495 if (!ST->hasMVEIntegerOps() || !EnableMaskedGatherScatters)
1496 return BaseT::getGatherScatterOpCost(Opcode, DataTy, Ptr, VariableMask,
1497 Alignment, CostKind, I);
1498
1499 assert(DataTy->isVectorTy() && "Can't do gather/scatters on scalar!");
1500 auto *VTy = cast<FixedVectorType>(DataTy);
1501
1502 // TODO: Splitting, once we do that.
1503
1504 unsigned NumElems = VTy->getNumElements();
1505 unsigned EltSize = VTy->getScalarSizeInBits();
1506 std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, DataTy);
1507
1508 // For now, it is assumed that for the MVE gather instructions the loads are
1509 // all effectively serialised. This means the cost is the scalar cost
1510 // multiplied by the number of elements being loaded. This is possibly very
1511 // conservative, but even so we still end up vectorising loops because the
1512 // cost per iteration for many loops is lower than for scalar loops.
1513 InstructionCost VectorCost =
1514 NumElems * LT.first * ST->getMVEVectorCostFactor(CostKind);
1515 // The scalarization cost should be a lot higher. We use the number of vector
1516 // elements plus the scalarization overhead.
1517 InstructionCost ScalarCost =
1518 NumElems * LT.first + BaseT::getScalarizationOverhead(VTy, true, false) +
1519 BaseT::getScalarizationOverhead(VTy, false, true);
1520
1521 if (EltSize < 8 || Alignment < EltSize / 8)
1522 return ScalarCost;
1523
1524 unsigned ExtSize = EltSize;
1525 // Check whether there's a single user that asks for an extended type
1526 if (I != nullptr) {
1527 // Dependent of the caller of this function, a gather instruction will
1528 // either have opcode Instruction::Load or be a call to the masked_gather
1529 // intrinsic
1530 if ((I->getOpcode() == Instruction::Load ||
1531 match(I, m_Intrinsic<Intrinsic::masked_gather>())) &&
1532 I->hasOneUse()) {
1533 const User *Us = *I->users().begin();
1534 if (isa<ZExtInst>(Us) || isa<SExtInst>(Us)) {
1535 // only allow valid type combinations
1536 unsigned TypeSize =
1537 cast<Instruction>(Us)->getType()->getScalarSizeInBits();
1538 if (((TypeSize == 32 && (EltSize == 8 || EltSize == 16)) ||
1539 (TypeSize == 16 && EltSize == 8)) &&
1540 TypeSize * NumElems == 128) {
1541 ExtSize = TypeSize;
1542 }
1543 }
1544 }
1545 // Check whether the input data needs to be truncated
1546 TruncInst *T;
1547 if ((I->getOpcode() == Instruction::Store ||
1548 match(I, m_Intrinsic<Intrinsic::masked_scatter>())) &&
1549 (T = dyn_cast<TruncInst>(I->getOperand(0)))) {
1550 // Only allow valid type combinations
1551 unsigned TypeSize = T->getOperand(0)->getType()->getScalarSizeInBits();
1552 if (((EltSize == 16 && TypeSize == 32) ||
1553 (EltSize == 8 && (TypeSize == 32 || TypeSize == 16))) &&
1554 TypeSize * NumElems == 128)
1555 ExtSize = TypeSize;
1556 }
1557 }
1558
1559 if (ExtSize * NumElems != 128 || NumElems < 4)
1560 return ScalarCost;
1561
1562 // Any (aligned) i32 gather will not need to be scalarised.
1563 if (ExtSize == 32)
1564 return VectorCost;
1565 // For smaller types, we need to ensure that the gep's inputs are correctly
1566 // extended from a small enough value. Other sizes (including i64) are
1567 // scalarized for now.
1568 if (ExtSize != 8 && ExtSize != 16)
1569 return ScalarCost;
1570
1571 if (const auto *BC = dyn_cast<BitCastInst>(Ptr))
1572 Ptr = BC->getOperand(0);
1573 if (const auto *GEP = dyn_cast<GetElementPtrInst>(Ptr)) {
1574 if (GEP->getNumOperands() != 2)
1575 return ScalarCost;
1576 unsigned Scale = DL.getTypeAllocSize(GEP->getResultElementType());
1577 // Scale needs to be correct (which is only relevant for i16s).
1578 if (Scale != 1 && Scale * 8 != ExtSize)
1579 return ScalarCost;
1580 // And we need to zext (not sext) the indexes from a small enough type.
1581 if (const auto *ZExt = dyn_cast<ZExtInst>(GEP->getOperand(1))) {
1582 if (ZExt->getOperand(0)->getType()->getScalarSizeInBits() <= ExtSize)
1583 return VectorCost;
1584 }
1585 return ScalarCost;
1586 }
1587 return ScalarCost;
1588 }
1589
1590 InstructionCost
getArithmeticReductionCost(unsigned Opcode,VectorType * ValTy,bool IsPairwiseForm,TTI::TargetCostKind CostKind)1591 ARMTTIImpl::getArithmeticReductionCost(unsigned Opcode, VectorType *ValTy,
1592 bool IsPairwiseForm,
1593 TTI::TargetCostKind CostKind) {
1594 EVT ValVT = TLI->getValueType(DL, ValTy);
1595 int ISD = TLI->InstructionOpcodeToISD(Opcode);
1596 if (!ST->hasMVEIntegerOps() || !ValVT.isSimple() || ISD != ISD::ADD)
1597 return BaseT::getArithmeticReductionCost(Opcode, ValTy, IsPairwiseForm,
1598 CostKind);
1599
1600 std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy);
1601
1602 static const CostTblEntry CostTblAdd[]{
1603 {ISD::ADD, MVT::v16i8, 1},
1604 {ISD::ADD, MVT::v8i16, 1},
1605 {ISD::ADD, MVT::v4i32, 1},
1606 };
1607 if (const auto *Entry = CostTableLookup(CostTblAdd, ISD, LT.second))
1608 return Entry->Cost * ST->getMVEVectorCostFactor(CostKind) * LT.first;
1609
1610 return BaseT::getArithmeticReductionCost(Opcode, ValTy, IsPairwiseForm,
1611 CostKind);
1612 }
1613
1614 InstructionCost
getExtendedAddReductionCost(bool IsMLA,bool IsUnsigned,Type * ResTy,VectorType * ValTy,TTI::TargetCostKind CostKind)1615 ARMTTIImpl::getExtendedAddReductionCost(bool IsMLA, bool IsUnsigned,
1616 Type *ResTy, VectorType *ValTy,
1617 TTI::TargetCostKind CostKind) {
1618 EVT ValVT = TLI->getValueType(DL, ValTy);
1619 EVT ResVT = TLI->getValueType(DL, ResTy);
1620 if (ST->hasMVEIntegerOps() && ValVT.isSimple() && ResVT.isSimple()) {
1621 std::pair<InstructionCost, MVT> LT =
1622 TLI->getTypeLegalizationCost(DL, ValTy);
1623 if ((LT.second == MVT::v16i8 && ResVT.getSizeInBits() <= 32) ||
1624 (LT.second == MVT::v8i16 &&
1625 ResVT.getSizeInBits() <= (IsMLA ? 64 : 32)) ||
1626 (LT.second == MVT::v4i32 && ResVT.getSizeInBits() <= 64))
1627 return ST->getMVEVectorCostFactor(CostKind) * LT.first;
1628 }
1629
1630 return BaseT::getExtendedAddReductionCost(IsMLA, IsUnsigned, ResTy, ValTy,
1631 CostKind);
1632 }
1633
1634 InstructionCost
getIntrinsicInstrCost(const IntrinsicCostAttributes & ICA,TTI::TargetCostKind CostKind)1635 ARMTTIImpl::getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
1636 TTI::TargetCostKind CostKind) {
1637 switch (ICA.getID()) {
1638 case Intrinsic::get_active_lane_mask:
1639 // Currently we make a somewhat optimistic assumption that
1640 // active_lane_mask's are always free. In reality it may be freely folded
1641 // into a tail predicated loop, expanded into a VCPT or expanded into a lot
1642 // of add/icmp code. We may need to improve this in the future, but being
1643 // able to detect if it is free or not involves looking at a lot of other
1644 // code. We currently assume that the vectorizer inserted these, and knew
1645 // what it was doing in adding one.
1646 if (ST->hasMVEIntegerOps())
1647 return 0;
1648 break;
1649 case Intrinsic::sadd_sat:
1650 case Intrinsic::ssub_sat:
1651 case Intrinsic::uadd_sat:
1652 case Intrinsic::usub_sat: {
1653 if (!ST->hasMVEIntegerOps())
1654 break;
1655 Type *VT = ICA.getReturnType();
1656
1657 std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, VT);
1658 if (LT.second == MVT::v4i32 || LT.second == MVT::v8i16 ||
1659 LT.second == MVT::v16i8) {
1660 // This is a base cost of 1 for the vqadd, plus 3 extract shifts if we
1661 // need to extend the type, as it uses shr(qadd(shl, shl)).
1662 unsigned Instrs =
1663 LT.second.getScalarSizeInBits() == VT->getScalarSizeInBits() ? 1 : 4;
1664 return LT.first * ST->getMVEVectorCostFactor(CostKind) * Instrs;
1665 }
1666 break;
1667 }
1668 case Intrinsic::abs:
1669 case Intrinsic::smin:
1670 case Intrinsic::smax:
1671 case Intrinsic::umin:
1672 case Intrinsic::umax: {
1673 if (!ST->hasMVEIntegerOps())
1674 break;
1675 Type *VT = ICA.getReturnType();
1676
1677 std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, VT);
1678 if (LT.second == MVT::v4i32 || LT.second == MVT::v8i16 ||
1679 LT.second == MVT::v16i8)
1680 return LT.first * ST->getMVEVectorCostFactor(CostKind);
1681 break;
1682 }
1683 case Intrinsic::minnum:
1684 case Intrinsic::maxnum: {
1685 if (!ST->hasMVEFloatOps())
1686 break;
1687 Type *VT = ICA.getReturnType();
1688 std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, VT);
1689 if (LT.second == MVT::v4f32 || LT.second == MVT::v8f16)
1690 return LT.first * ST->getMVEVectorCostFactor(CostKind);
1691 break;
1692 }
1693 }
1694
1695 return BaseT::getIntrinsicInstrCost(ICA, CostKind);
1696 }
1697
isLoweredToCall(const Function * F)1698 bool ARMTTIImpl::isLoweredToCall(const Function *F) {
1699 if (!F->isIntrinsic())
1700 BaseT::isLoweredToCall(F);
1701
1702 // Assume all Arm-specific intrinsics map to an instruction.
1703 if (F->getName().startswith("llvm.arm"))
1704 return false;
1705
1706 switch (F->getIntrinsicID()) {
1707 default: break;
1708 case Intrinsic::powi:
1709 case Intrinsic::sin:
1710 case Intrinsic::cos:
1711 case Intrinsic::pow:
1712 case Intrinsic::log:
1713 case Intrinsic::log10:
1714 case Intrinsic::log2:
1715 case Intrinsic::exp:
1716 case Intrinsic::exp2:
1717 return true;
1718 case Intrinsic::sqrt:
1719 case Intrinsic::fabs:
1720 case Intrinsic::copysign:
1721 case Intrinsic::floor:
1722 case Intrinsic::ceil:
1723 case Intrinsic::trunc:
1724 case Intrinsic::rint:
1725 case Intrinsic::nearbyint:
1726 case Intrinsic::round:
1727 case Intrinsic::canonicalize:
1728 case Intrinsic::lround:
1729 case Intrinsic::llround:
1730 case Intrinsic::lrint:
1731 case Intrinsic::llrint:
1732 if (F->getReturnType()->isDoubleTy() && !ST->hasFP64())
1733 return true;
1734 if (F->getReturnType()->isHalfTy() && !ST->hasFullFP16())
1735 return true;
1736 // Some operations can be handled by vector instructions and assume
1737 // unsupported vectors will be expanded into supported scalar ones.
1738 // TODO Handle scalar operations properly.
1739 return !ST->hasFPARMv8Base() && !ST->hasVFP2Base();
1740 case Intrinsic::masked_store:
1741 case Intrinsic::masked_load:
1742 case Intrinsic::masked_gather:
1743 case Intrinsic::masked_scatter:
1744 return !ST->hasMVEIntegerOps();
1745 case Intrinsic::sadd_with_overflow:
1746 case Intrinsic::uadd_with_overflow:
1747 case Intrinsic::ssub_with_overflow:
1748 case Intrinsic::usub_with_overflow:
1749 case Intrinsic::sadd_sat:
1750 case Intrinsic::uadd_sat:
1751 case Intrinsic::ssub_sat:
1752 case Intrinsic::usub_sat:
1753 return false;
1754 }
1755
1756 return BaseT::isLoweredToCall(F);
1757 }
1758
maybeLoweredToCall(Instruction & I)1759 bool ARMTTIImpl::maybeLoweredToCall(Instruction &I) {
1760 unsigned ISD = TLI->InstructionOpcodeToISD(I.getOpcode());
1761 EVT VT = TLI->getValueType(DL, I.getType(), true);
1762 if (TLI->getOperationAction(ISD, VT) == TargetLowering::LibCall)
1763 return true;
1764
1765 // Check if an intrinsic will be lowered to a call and assume that any
1766 // other CallInst will generate a bl.
1767 if (auto *Call = dyn_cast<CallInst>(&I)) {
1768 if (auto *II = dyn_cast<IntrinsicInst>(Call)) {
1769 switch(II->getIntrinsicID()) {
1770 case Intrinsic::memcpy:
1771 case Intrinsic::memset:
1772 case Intrinsic::memmove:
1773 return getNumMemOps(II) == -1;
1774 default:
1775 if (const Function *F = Call->getCalledFunction())
1776 return isLoweredToCall(F);
1777 }
1778 }
1779 return true;
1780 }
1781
1782 // FPv5 provides conversions between integer, double-precision,
1783 // single-precision, and half-precision formats.
1784 switch (I.getOpcode()) {
1785 default:
1786 break;
1787 case Instruction::FPToSI:
1788 case Instruction::FPToUI:
1789 case Instruction::SIToFP:
1790 case Instruction::UIToFP:
1791 case Instruction::FPTrunc:
1792 case Instruction::FPExt:
1793 return !ST->hasFPARMv8Base();
1794 }
1795
1796 // FIXME: Unfortunately the approach of checking the Operation Action does
1797 // not catch all cases of Legalization that use library calls. Our
1798 // Legalization step categorizes some transformations into library calls as
1799 // Custom, Expand or even Legal when doing type legalization. So for now
1800 // we have to special case for instance the SDIV of 64bit integers and the
1801 // use of floating point emulation.
1802 if (VT.isInteger() && VT.getSizeInBits() >= 64) {
1803 switch (ISD) {
1804 default:
1805 break;
1806 case ISD::SDIV:
1807 case ISD::UDIV:
1808 case ISD::SREM:
1809 case ISD::UREM:
1810 case ISD::SDIVREM:
1811 case ISD::UDIVREM:
1812 return true;
1813 }
1814 }
1815
1816 // Assume all other non-float operations are supported.
1817 if (!VT.isFloatingPoint())
1818 return false;
1819
1820 // We'll need a library call to handle most floats when using soft.
1821 if (TLI->useSoftFloat()) {
1822 switch (I.getOpcode()) {
1823 default:
1824 return true;
1825 case Instruction::Alloca:
1826 case Instruction::Load:
1827 case Instruction::Store:
1828 case Instruction::Select:
1829 case Instruction::PHI:
1830 return false;
1831 }
1832 }
1833
1834 // We'll need a libcall to perform double precision operations on a single
1835 // precision only FPU.
1836 if (I.getType()->isDoubleTy() && !ST->hasFP64())
1837 return true;
1838
1839 // Likewise for half precision arithmetic.
1840 if (I.getType()->isHalfTy() && !ST->hasFullFP16())
1841 return true;
1842
1843 return false;
1844 }
1845
isHardwareLoopProfitable(Loop * L,ScalarEvolution & SE,AssumptionCache & AC,TargetLibraryInfo * LibInfo,HardwareLoopInfo & HWLoopInfo)1846 bool ARMTTIImpl::isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE,
1847 AssumptionCache &AC,
1848 TargetLibraryInfo *LibInfo,
1849 HardwareLoopInfo &HWLoopInfo) {
1850 // Low-overhead branches are only supported in the 'low-overhead branch'
1851 // extension of v8.1-m.
1852 if (!ST->hasLOB() || DisableLowOverheadLoops) {
1853 LLVM_DEBUG(dbgs() << "ARMHWLoops: Disabled\n");
1854 return false;
1855 }
1856
1857 if (!SE.hasLoopInvariantBackedgeTakenCount(L)) {
1858 LLVM_DEBUG(dbgs() << "ARMHWLoops: No BETC\n");
1859 return false;
1860 }
1861
1862 const SCEV *BackedgeTakenCount = SE.getBackedgeTakenCount(L);
1863 if (isa<SCEVCouldNotCompute>(BackedgeTakenCount)) {
1864 LLVM_DEBUG(dbgs() << "ARMHWLoops: Uncomputable BETC\n");
1865 return false;
1866 }
1867
1868 const SCEV *TripCountSCEV =
1869 SE.getAddExpr(BackedgeTakenCount,
1870 SE.getOne(BackedgeTakenCount->getType()));
1871
1872 // We need to store the trip count in LR, a 32-bit register.
1873 if (SE.getUnsignedRangeMax(TripCountSCEV).getBitWidth() > 32) {
1874 LLVM_DEBUG(dbgs() << "ARMHWLoops: Trip count does not fit into 32bits\n");
1875 return false;
1876 }
1877
1878 // Making a call will trash LR and clear LO_BRANCH_INFO, so there's little
1879 // point in generating a hardware loop if that's going to happen.
1880
1881 auto IsHardwareLoopIntrinsic = [](Instruction &I) {
1882 if (auto *Call = dyn_cast<IntrinsicInst>(&I)) {
1883 switch (Call->getIntrinsicID()) {
1884 default:
1885 break;
1886 case Intrinsic::start_loop_iterations:
1887 case Intrinsic::test_start_loop_iterations:
1888 case Intrinsic::loop_decrement:
1889 case Intrinsic::loop_decrement_reg:
1890 return true;
1891 }
1892 }
1893 return false;
1894 };
1895
1896 // Scan the instructions to see if there's any that we know will turn into a
1897 // call or if this loop is already a low-overhead loop or will become a tail
1898 // predicated loop.
1899 bool IsTailPredLoop = false;
1900 auto ScanLoop = [&](Loop *L) {
1901 for (auto *BB : L->getBlocks()) {
1902 for (auto &I : *BB) {
1903 if (maybeLoweredToCall(I) || IsHardwareLoopIntrinsic(I) ||
1904 isa<InlineAsm>(I)) {
1905 LLVM_DEBUG(dbgs() << "ARMHWLoops: Bad instruction: " << I << "\n");
1906 return false;
1907 }
1908 if (auto *II = dyn_cast<IntrinsicInst>(&I))
1909 IsTailPredLoop |=
1910 II->getIntrinsicID() == Intrinsic::get_active_lane_mask ||
1911 II->getIntrinsicID() == Intrinsic::arm_mve_vctp8 ||
1912 II->getIntrinsicID() == Intrinsic::arm_mve_vctp16 ||
1913 II->getIntrinsicID() == Intrinsic::arm_mve_vctp32 ||
1914 II->getIntrinsicID() == Intrinsic::arm_mve_vctp64;
1915 }
1916 }
1917 return true;
1918 };
1919
1920 // Visit inner loops.
1921 for (auto Inner : *L)
1922 if (!ScanLoop(Inner))
1923 return false;
1924
1925 if (!ScanLoop(L))
1926 return false;
1927
1928 // TODO: Check whether the trip count calculation is expensive. If L is the
1929 // inner loop but we know it has a low trip count, calculating that trip
1930 // count (in the parent loop) may be detrimental.
1931
1932 LLVMContext &C = L->getHeader()->getContext();
1933 HWLoopInfo.CounterInReg = true;
1934 HWLoopInfo.IsNestingLegal = false;
1935 HWLoopInfo.PerformEntryTest = AllowWLSLoops && !IsTailPredLoop;
1936 HWLoopInfo.CountType = Type::getInt32Ty(C);
1937 HWLoopInfo.LoopDecrement = ConstantInt::get(HWLoopInfo.CountType, 1);
1938 return true;
1939 }
1940
canTailPredicateInstruction(Instruction & I,int & ICmpCount)1941 static bool canTailPredicateInstruction(Instruction &I, int &ICmpCount) {
1942 // We don't allow icmp's, and because we only look at single block loops,
1943 // we simply count the icmps, i.e. there should only be 1 for the backedge.
1944 if (isa<ICmpInst>(&I) && ++ICmpCount > 1)
1945 return false;
1946
1947 if (isa<FCmpInst>(&I))
1948 return false;
1949
1950 // We could allow extending/narrowing FP loads/stores, but codegen is
1951 // too inefficient so reject this for now.
1952 if (isa<FPExtInst>(&I) || isa<FPTruncInst>(&I))
1953 return false;
1954
1955 // Extends have to be extending-loads
1956 if (isa<SExtInst>(&I) || isa<ZExtInst>(&I) )
1957 if (!I.getOperand(0)->hasOneUse() || !isa<LoadInst>(I.getOperand(0)))
1958 return false;
1959
1960 // Truncs have to be narrowing-stores
1961 if (isa<TruncInst>(&I) )
1962 if (!I.hasOneUse() || !isa<StoreInst>(*I.user_begin()))
1963 return false;
1964
1965 return true;
1966 }
1967
1968 // To set up a tail-predicated loop, we need to know the total number of
1969 // elements processed by that loop. Thus, we need to determine the element
1970 // size and:
1971 // 1) it should be uniform for all operations in the vector loop, so we
1972 // e.g. don't want any widening/narrowing operations.
1973 // 2) it should be smaller than i64s because we don't have vector operations
1974 // that work on i64s.
1975 // 3) we don't want elements to be reversed or shuffled, to make sure the
1976 // tail-predication masks/predicates the right lanes.
1977 //
canTailPredicateLoop(Loop * L,LoopInfo * LI,ScalarEvolution & SE,const DataLayout & DL,const LoopAccessInfo * LAI)1978 static bool canTailPredicateLoop(Loop *L, LoopInfo *LI, ScalarEvolution &SE,
1979 const DataLayout &DL,
1980 const LoopAccessInfo *LAI) {
1981 LLVM_DEBUG(dbgs() << "Tail-predication: checking allowed instructions\n");
1982
1983 // If there are live-out values, it is probably a reduction. We can predicate
1984 // most reduction operations freely under MVE using a combination of
1985 // prefer-predicated-reduction-select and inloop reductions. We limit this to
1986 // floating point and integer reductions, but don't check for operators
1987 // specifically here. If the value ends up not being a reduction (and so the
1988 // vectorizer cannot tailfold the loop), we should fall back to standard
1989 // vectorization automatically.
1990 SmallVector< Instruction *, 8 > LiveOuts;
1991 LiveOuts = llvm::findDefsUsedOutsideOfLoop(L);
1992 bool ReductionsDisabled =
1993 EnableTailPredication == TailPredication::EnabledNoReductions ||
1994 EnableTailPredication == TailPredication::ForceEnabledNoReductions;
1995
1996 for (auto *I : LiveOuts) {
1997 if (!I->getType()->isIntegerTy() && !I->getType()->isFloatTy() &&
1998 !I->getType()->isHalfTy()) {
1999 LLVM_DEBUG(dbgs() << "Don't tail-predicate loop with non-integer/float "
2000 "live-out value\n");
2001 return false;
2002 }
2003 if (ReductionsDisabled) {
2004 LLVM_DEBUG(dbgs() << "Reductions not enabled\n");
2005 return false;
2006 }
2007 }
2008
2009 // Next, check that all instructions can be tail-predicated.
2010 PredicatedScalarEvolution PSE = LAI->getPSE();
2011 SmallVector<Instruction *, 16> LoadStores;
2012 int ICmpCount = 0;
2013
2014 for (BasicBlock *BB : L->blocks()) {
2015 for (Instruction &I : BB->instructionsWithoutDebug()) {
2016 if (isa<PHINode>(&I))
2017 continue;
2018 if (!canTailPredicateInstruction(I, ICmpCount)) {
2019 LLVM_DEBUG(dbgs() << "Instruction not allowed: "; I.dump());
2020 return false;
2021 }
2022
2023 Type *T = I.getType();
2024 if (T->isPointerTy())
2025 T = T->getPointerElementType();
2026
2027 if (T->getScalarSizeInBits() > 32) {
2028 LLVM_DEBUG(dbgs() << "Unsupported Type: "; T->dump());
2029 return false;
2030 }
2031 if (isa<StoreInst>(I) || isa<LoadInst>(I)) {
2032 Value *Ptr = isa<LoadInst>(I) ? I.getOperand(0) : I.getOperand(1);
2033 int64_t NextStride = getPtrStride(PSE, Ptr, L);
2034 if (NextStride == 1) {
2035 // TODO: for now only allow consecutive strides of 1. We could support
2036 // other strides as long as it is uniform, but let's keep it simple
2037 // for now.
2038 continue;
2039 } else if (NextStride == -1 ||
2040 (NextStride == 2 && MVEMaxSupportedInterleaveFactor >= 2) ||
2041 (NextStride == 4 && MVEMaxSupportedInterleaveFactor >= 4)) {
2042 LLVM_DEBUG(dbgs()
2043 << "Consecutive strides of 2 found, vld2/vstr2 can't "
2044 "be tail-predicated\n.");
2045 return false;
2046 // TODO: don't tail predicate if there is a reversed load?
2047 } else if (EnableMaskedGatherScatters) {
2048 // Gather/scatters do allow loading from arbitrary strides, at
2049 // least if they are loop invariant.
2050 // TODO: Loop variant strides should in theory work, too, but
2051 // this requires further testing.
2052 const SCEV *PtrScev =
2053 replaceSymbolicStrideSCEV(PSE, llvm::ValueToValueMap(), Ptr);
2054 if (auto AR = dyn_cast<SCEVAddRecExpr>(PtrScev)) {
2055 const SCEV *Step = AR->getStepRecurrence(*PSE.getSE());
2056 if (PSE.getSE()->isLoopInvariant(Step, L))
2057 continue;
2058 }
2059 }
2060 LLVM_DEBUG(dbgs() << "Bad stride found, can't "
2061 "tail-predicate\n.");
2062 return false;
2063 }
2064 }
2065 }
2066
2067 LLVM_DEBUG(dbgs() << "tail-predication: all instructions allowed!\n");
2068 return true;
2069 }
2070
preferPredicateOverEpilogue(Loop * L,LoopInfo * LI,ScalarEvolution & SE,AssumptionCache & AC,TargetLibraryInfo * TLI,DominatorTree * DT,const LoopAccessInfo * LAI)2071 bool ARMTTIImpl::preferPredicateOverEpilogue(Loop *L, LoopInfo *LI,
2072 ScalarEvolution &SE,
2073 AssumptionCache &AC,
2074 TargetLibraryInfo *TLI,
2075 DominatorTree *DT,
2076 const LoopAccessInfo *LAI) {
2077 if (!EnableTailPredication) {
2078 LLVM_DEBUG(dbgs() << "Tail-predication not enabled.\n");
2079 return false;
2080 }
2081
2082 // Creating a predicated vector loop is the first step for generating a
2083 // tail-predicated hardware loop, for which we need the MVE masked
2084 // load/stores instructions:
2085 if (!ST->hasMVEIntegerOps())
2086 return false;
2087
2088 // For now, restrict this to single block loops.
2089 if (L->getNumBlocks() > 1) {
2090 LLVM_DEBUG(dbgs() << "preferPredicateOverEpilogue: not a single block "
2091 "loop.\n");
2092 return false;
2093 }
2094
2095 assert(L->isInnermost() && "preferPredicateOverEpilogue: inner-loop expected");
2096
2097 HardwareLoopInfo HWLoopInfo(L);
2098 if (!HWLoopInfo.canAnalyze(*LI)) {
2099 LLVM_DEBUG(dbgs() << "preferPredicateOverEpilogue: hardware-loop is not "
2100 "analyzable.\n");
2101 return false;
2102 }
2103
2104 // This checks if we have the low-overhead branch architecture
2105 // extension, and if we will create a hardware-loop:
2106 if (!isHardwareLoopProfitable(L, SE, AC, TLI, HWLoopInfo)) {
2107 LLVM_DEBUG(dbgs() << "preferPredicateOverEpilogue: hardware-loop is not "
2108 "profitable.\n");
2109 return false;
2110 }
2111
2112 if (!HWLoopInfo.isHardwareLoopCandidate(SE, *LI, *DT)) {
2113 LLVM_DEBUG(dbgs() << "preferPredicateOverEpilogue: hardware-loop is not "
2114 "a candidate.\n");
2115 return false;
2116 }
2117
2118 return canTailPredicateLoop(L, LI, SE, DL, LAI);
2119 }
2120
emitGetActiveLaneMask() const2121 bool ARMTTIImpl::emitGetActiveLaneMask() const {
2122 if (!ST->hasMVEIntegerOps() || !EnableTailPredication)
2123 return false;
2124
2125 // Intrinsic @llvm.get.active.lane.mask is supported.
2126 // It is used in the MVETailPredication pass, which requires the number of
2127 // elements processed by this vector loop to setup the tail-predicated
2128 // loop.
2129 return true;
2130 }
getUnrollingPreferences(Loop * L,ScalarEvolution & SE,TTI::UnrollingPreferences & UP)2131 void ARMTTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
2132 TTI::UnrollingPreferences &UP) {
2133 // Enable Upper bound unrolling universally, not dependant upon the conditions
2134 // below.
2135 UP.UpperBound = true;
2136
2137 // Only currently enable these preferences for M-Class cores.
2138 if (!ST->isMClass())
2139 return BasicTTIImplBase::getUnrollingPreferences(L, SE, UP);
2140
2141 // Disable loop unrolling for Oz and Os.
2142 UP.OptSizeThreshold = 0;
2143 UP.PartialOptSizeThreshold = 0;
2144 if (L->getHeader()->getParent()->hasOptSize())
2145 return;
2146
2147 SmallVector<BasicBlock*, 4> ExitingBlocks;
2148 L->getExitingBlocks(ExitingBlocks);
2149 LLVM_DEBUG(dbgs() << "Loop has:\n"
2150 << "Blocks: " << L->getNumBlocks() << "\n"
2151 << "Exit blocks: " << ExitingBlocks.size() << "\n");
2152
2153 // Only allow another exit other than the latch. This acts as an early exit
2154 // as it mirrors the profitability calculation of the runtime unroller.
2155 if (ExitingBlocks.size() > 2)
2156 return;
2157
2158 // Limit the CFG of the loop body for targets with a branch predictor.
2159 // Allowing 4 blocks permits if-then-else diamonds in the body.
2160 if (ST->hasBranchPredictor() && L->getNumBlocks() > 4)
2161 return;
2162
2163 // Don't unroll vectorized loops, including the remainder loop
2164 if (getBooleanLoopAttribute(L, "llvm.loop.isvectorized"))
2165 return;
2166
2167 // Scan the loop: don't unroll loops with calls as this could prevent
2168 // inlining.
2169 InstructionCost Cost = 0;
2170 for (auto *BB : L->getBlocks()) {
2171 for (auto &I : *BB) {
2172 // Don't unroll vectorised loop. MVE does not benefit from it as much as
2173 // scalar code.
2174 if (I.getType()->isVectorTy())
2175 return;
2176
2177 if (isa<CallInst>(I) || isa<InvokeInst>(I)) {
2178 if (const Function *F = cast<CallBase>(I).getCalledFunction()) {
2179 if (!isLoweredToCall(F))
2180 continue;
2181 }
2182 return;
2183 }
2184
2185 SmallVector<const Value*, 4> Operands(I.operand_values());
2186 Cost +=
2187 getUserCost(&I, Operands, TargetTransformInfo::TCK_SizeAndLatency);
2188 }
2189 }
2190
2191 LLVM_DEBUG(dbgs() << "Cost of loop: " << Cost << "\n");
2192
2193 UP.Partial = true;
2194 UP.Runtime = true;
2195 UP.UnrollRemainder = true;
2196 UP.DefaultUnrollRuntimeCount = 4;
2197 UP.UnrollAndJam = true;
2198 UP.UnrollAndJamInnerLoopThreshold = 60;
2199
2200 // Force unrolling small loops can be very useful because of the branch
2201 // taken cost of the backedge.
2202 if (Cost < 12)
2203 UP.Force = true;
2204 }
2205
getPeelingPreferences(Loop * L,ScalarEvolution & SE,TTI::PeelingPreferences & PP)2206 void ARMTTIImpl::getPeelingPreferences(Loop *L, ScalarEvolution &SE,
2207 TTI::PeelingPreferences &PP) {
2208 BaseT::getPeelingPreferences(L, SE, PP);
2209 }
2210
preferInLoopReduction(unsigned Opcode,Type * Ty,TTI::ReductionFlags Flags) const2211 bool ARMTTIImpl::preferInLoopReduction(unsigned Opcode, Type *Ty,
2212 TTI::ReductionFlags Flags) const {
2213 if (!ST->hasMVEIntegerOps())
2214 return false;
2215
2216 unsigned ScalarBits = Ty->getScalarSizeInBits();
2217 switch (Opcode) {
2218 case Instruction::Add:
2219 return ScalarBits <= 64;
2220 default:
2221 return false;
2222 }
2223 }
2224
preferPredicatedReductionSelect(unsigned Opcode,Type * Ty,TTI::ReductionFlags Flags) const2225 bool ARMTTIImpl::preferPredicatedReductionSelect(
2226 unsigned Opcode, Type *Ty, TTI::ReductionFlags Flags) const {
2227 if (!ST->hasMVEIntegerOps())
2228 return false;
2229 return true;
2230 }
2231