1 //===- AMDGPUTargetTransformInfo.cpp - AMDGPU specific TTI pass -----------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // \file
10 // This file implements a TargetTransformInfo analysis pass specific to the
11 // AMDGPU target machine. It uses the target's detailed information to provide
12 // more precise answers to certain TTI queries, while letting the target
13 // independent and default TTI implementations handle the rest.
14 //
15 //===----------------------------------------------------------------------===//
16
17 #include "AMDGPUTargetTransformInfo.h"
18 #include "AMDGPUTargetMachine.h"
19 #include "llvm/Analysis/LoopInfo.h"
20 #include "llvm/Analysis/ValueTracking.h"
21 #include "llvm/IR/IntrinsicsAMDGPU.h"
22 #include "llvm/IR/PatternMatch.h"
23 #include "llvm/Support/KnownBits.h"
24
25 using namespace llvm;
26
27 #define DEBUG_TYPE "AMDGPUtti"
28
29 static cl::opt<unsigned> UnrollThresholdPrivate(
30 "amdgpu-unroll-threshold-private",
31 cl::desc("Unroll threshold for AMDGPU if private memory used in a loop"),
32 cl::init(2700), cl::Hidden);
33
34 static cl::opt<unsigned> UnrollThresholdLocal(
35 "amdgpu-unroll-threshold-local",
36 cl::desc("Unroll threshold for AMDGPU if local memory used in a loop"),
37 cl::init(1000), cl::Hidden);
38
39 static cl::opt<unsigned> UnrollThresholdIf(
40 "amdgpu-unroll-threshold-if",
41 cl::desc("Unroll threshold increment for AMDGPU for each if statement inside loop"),
42 cl::init(200), cl::Hidden);
43
44 static cl::opt<bool> UnrollRuntimeLocal(
45 "amdgpu-unroll-runtime-local",
46 cl::desc("Allow runtime unroll for AMDGPU if local memory used in a loop"),
47 cl::init(true), cl::Hidden);
48
49 static cl::opt<bool> UseLegacyDA(
50 "amdgpu-use-legacy-divergence-analysis",
51 cl::desc("Enable legacy divergence analysis for AMDGPU"),
52 cl::init(false), cl::Hidden);
53
54 static cl::opt<unsigned> UnrollMaxBlockToAnalyze(
55 "amdgpu-unroll-max-block-to-analyze",
56 cl::desc("Inner loop block size threshold to analyze in unroll for AMDGPU"),
57 cl::init(32), cl::Hidden);
58
59 static cl::opt<unsigned> ArgAllocaCost("amdgpu-inline-arg-alloca-cost",
60 cl::Hidden, cl::init(4000),
61 cl::desc("Cost of alloca argument"));
62
63 // If the amount of scratch memory to eliminate exceeds our ability to allocate
64 // it into registers we gain nothing by aggressively inlining functions for that
65 // heuristic.
66 static cl::opt<unsigned>
67 ArgAllocaCutoff("amdgpu-inline-arg-alloca-cutoff", cl::Hidden,
68 cl::init(256),
69 cl::desc("Maximum alloca size to use for inline cost"));
70
71 // Inliner constraint to achieve reasonable compilation time.
72 static cl::opt<size_t> InlineMaxBB(
73 "amdgpu-inline-max-bb", cl::Hidden, cl::init(1100),
74 cl::desc("Maximum number of BBs allowed in a function after inlining"
75 " (compile time constraint)"));
76
dependsOnLocalPhi(const Loop * L,const Value * Cond,unsigned Depth=0)77 static bool dependsOnLocalPhi(const Loop *L, const Value *Cond,
78 unsigned Depth = 0) {
79 const Instruction *I = dyn_cast<Instruction>(Cond);
80 if (!I)
81 return false;
82
83 for (const Value *V : I->operand_values()) {
84 if (!L->contains(I))
85 continue;
86 if (const PHINode *PHI = dyn_cast<PHINode>(V)) {
87 if (llvm::none_of(L->getSubLoops(), [PHI](const Loop* SubLoop) {
88 return SubLoop->contains(PHI); }))
89 return true;
90 } else if (Depth < 10 && dependsOnLocalPhi(L, V, Depth+1))
91 return true;
92 }
93 return false;
94 }
95
AMDGPUTTIImpl(const AMDGPUTargetMachine * TM,const Function & F)96 AMDGPUTTIImpl::AMDGPUTTIImpl(const AMDGPUTargetMachine *TM, const Function &F)
97 : BaseT(TM, F.getParent()->getDataLayout()),
98 TargetTriple(TM->getTargetTriple()),
99 ST(static_cast<const GCNSubtarget *>(TM->getSubtargetImpl(F))),
100 TLI(ST->getTargetLowering()) {}
101
getUnrollingPreferences(Loop * L,ScalarEvolution & SE,TTI::UnrollingPreferences & UP)102 void AMDGPUTTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
103 TTI::UnrollingPreferences &UP) {
104 const Function &F = *L->getHeader()->getParent();
105 UP.Threshold = AMDGPU::getIntegerAttribute(F, "amdgpu-unroll-threshold", 300);
106 UP.MaxCount = std::numeric_limits<unsigned>::max();
107 UP.Partial = true;
108
109 // Conditional branch in a loop back edge needs 3 additional exec
110 // manipulations in average.
111 UP.BEInsns += 3;
112
113 // TODO: Do we want runtime unrolling?
114
115 // Maximum alloca size than can fit registers. Reserve 16 registers.
116 const unsigned MaxAlloca = (256 - 16) * 4;
117 unsigned ThresholdPrivate = UnrollThresholdPrivate;
118 unsigned ThresholdLocal = UnrollThresholdLocal;
119
120 // If this loop has the amdgpu.loop.unroll.threshold metadata we will use the
121 // provided threshold value as the default for Threshold
122 if (MDNode *LoopUnrollThreshold =
123 findOptionMDForLoop(L, "amdgpu.loop.unroll.threshold")) {
124 if (LoopUnrollThreshold->getNumOperands() == 2) {
125 ConstantInt *MetaThresholdValue = mdconst::extract_or_null<ConstantInt>(
126 LoopUnrollThreshold->getOperand(1));
127 if (MetaThresholdValue) {
128 // We will also use the supplied value for PartialThreshold for now.
129 // We may introduce additional metadata if it becomes necessary in the
130 // future.
131 UP.Threshold = MetaThresholdValue->getSExtValue();
132 UP.PartialThreshold = UP.Threshold;
133 ThresholdPrivate = std::min(ThresholdPrivate, UP.Threshold);
134 ThresholdLocal = std::min(ThresholdLocal, UP.Threshold);
135 }
136 }
137 }
138
139 unsigned MaxBoost = std::max(ThresholdPrivate, ThresholdLocal);
140 for (const BasicBlock *BB : L->getBlocks()) {
141 const DataLayout &DL = BB->getModule()->getDataLayout();
142 unsigned LocalGEPsSeen = 0;
143
144 if (llvm::any_of(L->getSubLoops(), [BB](const Loop* SubLoop) {
145 return SubLoop->contains(BB); }))
146 continue; // Block belongs to an inner loop.
147
148 for (const Instruction &I : *BB) {
149 // Unroll a loop which contains an "if" statement whose condition
150 // defined by a PHI belonging to the loop. This may help to eliminate
151 // if region and potentially even PHI itself, saving on both divergence
152 // and registers used for the PHI.
153 // Add a small bonus for each of such "if" statements.
154 if (const BranchInst *Br = dyn_cast<BranchInst>(&I)) {
155 if (UP.Threshold < MaxBoost && Br->isConditional()) {
156 BasicBlock *Succ0 = Br->getSuccessor(0);
157 BasicBlock *Succ1 = Br->getSuccessor(1);
158 if ((L->contains(Succ0) && L->isLoopExiting(Succ0)) ||
159 (L->contains(Succ1) && L->isLoopExiting(Succ1)))
160 continue;
161 if (dependsOnLocalPhi(L, Br->getCondition())) {
162 UP.Threshold += UnrollThresholdIf;
163 LLVM_DEBUG(dbgs() << "Set unroll threshold " << UP.Threshold
164 << " for loop:\n"
165 << *L << " due to " << *Br << '\n');
166 if (UP.Threshold >= MaxBoost)
167 return;
168 }
169 }
170 continue;
171 }
172
173 const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(&I);
174 if (!GEP)
175 continue;
176
177 unsigned AS = GEP->getAddressSpace();
178 unsigned Threshold = 0;
179 if (AS == AMDGPUAS::PRIVATE_ADDRESS)
180 Threshold = ThresholdPrivate;
181 else if (AS == AMDGPUAS::LOCAL_ADDRESS || AS == AMDGPUAS::REGION_ADDRESS)
182 Threshold = ThresholdLocal;
183 else
184 continue;
185
186 if (UP.Threshold >= Threshold)
187 continue;
188
189 if (AS == AMDGPUAS::PRIVATE_ADDRESS) {
190 const Value *Ptr = GEP->getPointerOperand();
191 const AllocaInst *Alloca =
192 dyn_cast<AllocaInst>(getUnderlyingObject(Ptr));
193 if (!Alloca || !Alloca->isStaticAlloca())
194 continue;
195 Type *Ty = Alloca->getAllocatedType();
196 unsigned AllocaSize = Ty->isSized() ? DL.getTypeAllocSize(Ty) : 0;
197 if (AllocaSize > MaxAlloca)
198 continue;
199 } else if (AS == AMDGPUAS::LOCAL_ADDRESS ||
200 AS == AMDGPUAS::REGION_ADDRESS) {
201 LocalGEPsSeen++;
202 // Inhibit unroll for local memory if we have seen addressing not to
203 // a variable, most likely we will be unable to combine it.
204 // Do not unroll too deep inner loops for local memory to give a chance
205 // to unroll an outer loop for a more important reason.
206 if (LocalGEPsSeen > 1 || L->getLoopDepth() > 2 ||
207 (!isa<GlobalVariable>(GEP->getPointerOperand()) &&
208 !isa<Argument>(GEP->getPointerOperand())))
209 continue;
210 LLVM_DEBUG(dbgs() << "Allow unroll runtime for loop:\n"
211 << *L << " due to LDS use.\n");
212 UP.Runtime = UnrollRuntimeLocal;
213 }
214
215 // Check if GEP depends on a value defined by this loop itself.
216 bool HasLoopDef = false;
217 for (const Value *Op : GEP->operands()) {
218 const Instruction *Inst = dyn_cast<Instruction>(Op);
219 if (!Inst || L->isLoopInvariant(Op))
220 continue;
221
222 if (llvm::any_of(L->getSubLoops(), [Inst](const Loop* SubLoop) {
223 return SubLoop->contains(Inst); }))
224 continue;
225 HasLoopDef = true;
226 break;
227 }
228 if (!HasLoopDef)
229 continue;
230
231 // We want to do whatever we can to limit the number of alloca
232 // instructions that make it through to the code generator. allocas
233 // require us to use indirect addressing, which is slow and prone to
234 // compiler bugs. If this loop does an address calculation on an
235 // alloca ptr, then we want to use a higher than normal loop unroll
236 // threshold. This will give SROA a better chance to eliminate these
237 // allocas.
238 //
239 // We also want to have more unrolling for local memory to let ds
240 // instructions with different offsets combine.
241 //
242 // Don't use the maximum allowed value here as it will make some
243 // programs way too big.
244 UP.Threshold = Threshold;
245 LLVM_DEBUG(dbgs() << "Set unroll threshold " << Threshold
246 << " for loop:\n"
247 << *L << " due to " << *GEP << '\n');
248 if (UP.Threshold >= MaxBoost)
249 return;
250 }
251
252 // If we got a GEP in a small BB from inner loop then increase max trip
253 // count to analyze for better estimation cost in unroll
254 if (L->isInnermost() && BB->size() < UnrollMaxBlockToAnalyze)
255 UP.MaxIterationsCountToAnalyze = 32;
256 }
257 }
258
getPeelingPreferences(Loop * L,ScalarEvolution & SE,TTI::PeelingPreferences & PP)259 void AMDGPUTTIImpl::getPeelingPreferences(Loop *L, ScalarEvolution &SE,
260 TTI::PeelingPreferences &PP) {
261 BaseT::getPeelingPreferences(L, SE, PP);
262 }
263
264 const FeatureBitset GCNTTIImpl::InlineFeatureIgnoreList = {
265 // Codegen control options which don't matter.
266 AMDGPU::FeatureEnableLoadStoreOpt, AMDGPU::FeatureEnableSIScheduler,
267 AMDGPU::FeatureEnableUnsafeDSOffsetFolding, AMDGPU::FeatureFlatForGlobal,
268 AMDGPU::FeaturePromoteAlloca, AMDGPU::FeatureUnalignedScratchAccess,
269 AMDGPU::FeatureUnalignedAccessMode,
270
271 AMDGPU::FeatureAutoWaitcntBeforeBarrier,
272
273 // Property of the kernel/environment which can't actually differ.
274 AMDGPU::FeatureSGPRInitBug, AMDGPU::FeatureXNACK,
275 AMDGPU::FeatureTrapHandler,
276
277 // The default assumption needs to be ecc is enabled, but no directly
278 // exposed operations depend on it, so it can be safely inlined.
279 AMDGPU::FeatureSRAMECC,
280
281 // Perf-tuning features
282 AMDGPU::FeatureFastFMAF32, AMDGPU::HalfRate64Ops};
283
GCNTTIImpl(const AMDGPUTargetMachine * TM,const Function & F)284 GCNTTIImpl::GCNTTIImpl(const AMDGPUTargetMachine *TM, const Function &F)
285 : BaseT(TM, F.getParent()->getDataLayout()),
286 ST(static_cast<const GCNSubtarget *>(TM->getSubtargetImpl(F))),
287 TLI(ST->getTargetLowering()), CommonTTI(TM, F),
288 IsGraphics(AMDGPU::isGraphics(F.getCallingConv())),
289 MaxVGPRs(ST->getMaxNumVGPRs(
290 std::max(ST->getWavesPerEU(F).first,
291 ST->getWavesPerEUForWorkGroup(
292 ST->getFlatWorkGroupSizes(F).second)))) {
293 AMDGPU::SIModeRegisterDefaults Mode(F);
294 HasFP32Denormals = Mode.allFP32Denormals();
295 HasFP64FP16Denormals = Mode.allFP64FP16Denormals();
296 }
297
getHardwareNumberOfRegisters(bool Vec) const298 unsigned GCNTTIImpl::getHardwareNumberOfRegisters(bool Vec) const {
299 // The concept of vector registers doesn't really exist. Some packed vector
300 // operations operate on the normal 32-bit registers.
301 return MaxVGPRs;
302 }
303
getNumberOfRegisters(bool Vec) const304 unsigned GCNTTIImpl::getNumberOfRegisters(bool Vec) const {
305 // This is really the number of registers to fill when vectorizing /
306 // interleaving loops, so we lie to avoid trying to use all registers.
307 return getHardwareNumberOfRegisters(Vec) >> 3;
308 }
309
getNumberOfRegisters(unsigned RCID) const310 unsigned GCNTTIImpl::getNumberOfRegisters(unsigned RCID) const {
311 const SIRegisterInfo *TRI = ST->getRegisterInfo();
312 const TargetRegisterClass *RC = TRI->getRegClass(RCID);
313 unsigned NumVGPRs = (TRI->getRegSizeInBits(*RC) + 31) / 32;
314 return getHardwareNumberOfRegisters(false) / NumVGPRs;
315 }
316
317 TypeSize
getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const318 GCNTTIImpl::getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const {
319 switch (K) {
320 case TargetTransformInfo::RGK_Scalar:
321 return TypeSize::getFixed(32);
322 case TargetTransformInfo::RGK_FixedWidthVector:
323 return TypeSize::getFixed(ST->hasPackedFP32Ops() ? 64 : 32);
324 case TargetTransformInfo::RGK_ScalableVector:
325 return TypeSize::getScalable(0);
326 }
327 llvm_unreachable("Unsupported register kind");
328 }
329
getMinVectorRegisterBitWidth() const330 unsigned GCNTTIImpl::getMinVectorRegisterBitWidth() const {
331 return 32;
332 }
333
getMaximumVF(unsigned ElemWidth,unsigned Opcode) const334 unsigned GCNTTIImpl::getMaximumVF(unsigned ElemWidth, unsigned Opcode) const {
335 if (Opcode == Instruction::Load || Opcode == Instruction::Store)
336 return 32 * 4 / ElemWidth;
337 return (ElemWidth == 16 && ST->has16BitInsts()) ? 2
338 : (ElemWidth == 32 && ST->hasPackedFP32Ops()) ? 2
339 : 1;
340 }
341
getLoadVectorFactor(unsigned VF,unsigned LoadSize,unsigned ChainSizeInBytes,VectorType * VecTy) const342 unsigned GCNTTIImpl::getLoadVectorFactor(unsigned VF, unsigned LoadSize,
343 unsigned ChainSizeInBytes,
344 VectorType *VecTy) const {
345 unsigned VecRegBitWidth = VF * LoadSize;
346 if (VecRegBitWidth > 128 && VecTy->getScalarSizeInBits() < 32)
347 // TODO: Support element-size less than 32bit?
348 return 128 / LoadSize;
349
350 return VF;
351 }
352
getStoreVectorFactor(unsigned VF,unsigned StoreSize,unsigned ChainSizeInBytes,VectorType * VecTy) const353 unsigned GCNTTIImpl::getStoreVectorFactor(unsigned VF, unsigned StoreSize,
354 unsigned ChainSizeInBytes,
355 VectorType *VecTy) const {
356 unsigned VecRegBitWidth = VF * StoreSize;
357 if (VecRegBitWidth > 128)
358 return 128 / StoreSize;
359
360 return VF;
361 }
362
getLoadStoreVecRegBitWidth(unsigned AddrSpace) const363 unsigned GCNTTIImpl::getLoadStoreVecRegBitWidth(unsigned AddrSpace) const {
364 if (AddrSpace == AMDGPUAS::GLOBAL_ADDRESS ||
365 AddrSpace == AMDGPUAS::CONSTANT_ADDRESS ||
366 AddrSpace == AMDGPUAS::CONSTANT_ADDRESS_32BIT ||
367 AddrSpace == AMDGPUAS::BUFFER_FAT_POINTER) {
368 return 512;
369 }
370
371 if (AddrSpace == AMDGPUAS::PRIVATE_ADDRESS)
372 return 8 * ST->getMaxPrivateElementSize();
373
374 // Common to flat, global, local and region. Assume for unknown addrspace.
375 return 128;
376 }
377
isLegalToVectorizeMemChain(unsigned ChainSizeInBytes,Align Alignment,unsigned AddrSpace) const378 bool GCNTTIImpl::isLegalToVectorizeMemChain(unsigned ChainSizeInBytes,
379 Align Alignment,
380 unsigned AddrSpace) const {
381 // We allow vectorization of flat stores, even though we may need to decompose
382 // them later if they may access private memory. We don't have enough context
383 // here, and legalization can handle it.
384 if (AddrSpace == AMDGPUAS::PRIVATE_ADDRESS) {
385 return (Alignment >= 4 || ST->hasUnalignedScratchAccess()) &&
386 ChainSizeInBytes <= ST->getMaxPrivateElementSize();
387 }
388 return true;
389 }
390
isLegalToVectorizeLoadChain(unsigned ChainSizeInBytes,Align Alignment,unsigned AddrSpace) const391 bool GCNTTIImpl::isLegalToVectorizeLoadChain(unsigned ChainSizeInBytes,
392 Align Alignment,
393 unsigned AddrSpace) const {
394 return isLegalToVectorizeMemChain(ChainSizeInBytes, Alignment, AddrSpace);
395 }
396
isLegalToVectorizeStoreChain(unsigned ChainSizeInBytes,Align Alignment,unsigned AddrSpace) const397 bool GCNTTIImpl::isLegalToVectorizeStoreChain(unsigned ChainSizeInBytes,
398 Align Alignment,
399 unsigned AddrSpace) const {
400 return isLegalToVectorizeMemChain(ChainSizeInBytes, Alignment, AddrSpace);
401 }
402
403 // FIXME: Really we would like to issue multiple 128-bit loads and stores per
404 // iteration. Should we report a larger size and let it legalize?
405 //
406 // FIXME: Should we use narrower types for local/region, or account for when
407 // unaligned access is legal?
408 //
409 // FIXME: This could use fine tuning and microbenchmarks.
getMemcpyLoopLoweringType(LLVMContext & Context,Value * Length,unsigned SrcAddrSpace,unsigned DestAddrSpace,unsigned SrcAlign,unsigned DestAlign) const410 Type *GCNTTIImpl::getMemcpyLoopLoweringType(LLVMContext &Context, Value *Length,
411 unsigned SrcAddrSpace,
412 unsigned DestAddrSpace,
413 unsigned SrcAlign,
414 unsigned DestAlign) const {
415 unsigned MinAlign = std::min(SrcAlign, DestAlign);
416
417 // A (multi-)dword access at an address == 2 (mod 4) will be decomposed by the
418 // hardware into byte accesses. If you assume all alignments are equally
419 // probable, it's more efficient on average to use short accesses for this
420 // case.
421 if (MinAlign == 2)
422 return Type::getInt16Ty(Context);
423
424 // Not all subtargets have 128-bit DS instructions, and we currently don't
425 // form them by default.
426 if (SrcAddrSpace == AMDGPUAS::LOCAL_ADDRESS ||
427 SrcAddrSpace == AMDGPUAS::REGION_ADDRESS ||
428 DestAddrSpace == AMDGPUAS::LOCAL_ADDRESS ||
429 DestAddrSpace == AMDGPUAS::REGION_ADDRESS) {
430 return FixedVectorType::get(Type::getInt32Ty(Context), 2);
431 }
432
433 // Global memory works best with 16-byte accesses. Private memory will also
434 // hit this, although they'll be decomposed.
435 return FixedVectorType::get(Type::getInt32Ty(Context), 4);
436 }
437
getMemcpyLoopResidualLoweringType(SmallVectorImpl<Type * > & OpsOut,LLVMContext & Context,unsigned RemainingBytes,unsigned SrcAddrSpace,unsigned DestAddrSpace,unsigned SrcAlign,unsigned DestAlign) const438 void GCNTTIImpl::getMemcpyLoopResidualLoweringType(
439 SmallVectorImpl<Type *> &OpsOut, LLVMContext &Context,
440 unsigned RemainingBytes, unsigned SrcAddrSpace, unsigned DestAddrSpace,
441 unsigned SrcAlign, unsigned DestAlign) const {
442 assert(RemainingBytes < 16);
443
444 unsigned MinAlign = std::min(SrcAlign, DestAlign);
445
446 if (MinAlign != 2) {
447 Type *I64Ty = Type::getInt64Ty(Context);
448 while (RemainingBytes >= 8) {
449 OpsOut.push_back(I64Ty);
450 RemainingBytes -= 8;
451 }
452
453 Type *I32Ty = Type::getInt32Ty(Context);
454 while (RemainingBytes >= 4) {
455 OpsOut.push_back(I32Ty);
456 RemainingBytes -= 4;
457 }
458 }
459
460 Type *I16Ty = Type::getInt16Ty(Context);
461 while (RemainingBytes >= 2) {
462 OpsOut.push_back(I16Ty);
463 RemainingBytes -= 2;
464 }
465
466 Type *I8Ty = Type::getInt8Ty(Context);
467 while (RemainingBytes) {
468 OpsOut.push_back(I8Ty);
469 --RemainingBytes;
470 }
471 }
472
getMaxInterleaveFactor(unsigned VF)473 unsigned GCNTTIImpl::getMaxInterleaveFactor(unsigned VF) {
474 // Disable unrolling if the loop is not vectorized.
475 // TODO: Enable this again.
476 if (VF == 1)
477 return 1;
478
479 return 8;
480 }
481
getTgtMemIntrinsic(IntrinsicInst * Inst,MemIntrinsicInfo & Info) const482 bool GCNTTIImpl::getTgtMemIntrinsic(IntrinsicInst *Inst,
483 MemIntrinsicInfo &Info) const {
484 switch (Inst->getIntrinsicID()) {
485 case Intrinsic::amdgcn_atomic_inc:
486 case Intrinsic::amdgcn_atomic_dec:
487 case Intrinsic::amdgcn_ds_ordered_add:
488 case Intrinsic::amdgcn_ds_ordered_swap:
489 case Intrinsic::amdgcn_ds_fadd:
490 case Intrinsic::amdgcn_ds_fmin:
491 case Intrinsic::amdgcn_ds_fmax: {
492 auto *Ordering = dyn_cast<ConstantInt>(Inst->getArgOperand(2));
493 auto *Volatile = dyn_cast<ConstantInt>(Inst->getArgOperand(4));
494 if (!Ordering || !Volatile)
495 return false; // Invalid.
496
497 unsigned OrderingVal = Ordering->getZExtValue();
498 if (OrderingVal > static_cast<unsigned>(AtomicOrdering::SequentiallyConsistent))
499 return false;
500
501 Info.PtrVal = Inst->getArgOperand(0);
502 Info.Ordering = static_cast<AtomicOrdering>(OrderingVal);
503 Info.ReadMem = true;
504 Info.WriteMem = true;
505 Info.IsVolatile = !Volatile->isNullValue();
506 return true;
507 }
508 default:
509 return false;
510 }
511 }
512
getArithmeticInstrCost(unsigned Opcode,Type * Ty,TTI::TargetCostKind CostKind,TTI::OperandValueKind Opd1Info,TTI::OperandValueKind Opd2Info,TTI::OperandValueProperties Opd1PropInfo,TTI::OperandValueProperties Opd2PropInfo,ArrayRef<const Value * > Args,const Instruction * CxtI)513 InstructionCost GCNTTIImpl::getArithmeticInstrCost(
514 unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind,
515 TTI::OperandValueKind Opd1Info, TTI::OperandValueKind Opd2Info,
516 TTI::OperandValueProperties Opd1PropInfo,
517 TTI::OperandValueProperties Opd2PropInfo, ArrayRef<const Value *> Args,
518 const Instruction *CxtI) {
519 EVT OrigTy = TLI->getValueType(DL, Ty);
520 if (!OrigTy.isSimple()) {
521 // FIXME: We're having to query the throughput cost so that the basic
522 // implementation tries to generate legalize and scalarization costs. Maybe
523 // we could hoist the scalarization code here?
524 if (CostKind != TTI::TCK_CodeSize)
525 return BaseT::getArithmeticInstrCost(Opcode, Ty, TTI::TCK_RecipThroughput,
526 Opd1Info, Opd2Info, Opd1PropInfo,
527 Opd2PropInfo, Args, CxtI);
528 // Scalarization
529
530 // Check if any of the operands are vector operands.
531 int ISD = TLI->InstructionOpcodeToISD(Opcode);
532 assert(ISD && "Invalid opcode");
533
534 std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
535
536 bool IsFloat = Ty->isFPOrFPVectorTy();
537 // Assume that floating point arithmetic operations cost twice as much as
538 // integer operations.
539 unsigned OpCost = (IsFloat ? 2 : 1);
540
541 if (TLI->isOperationLegalOrPromote(ISD, LT.second)) {
542 // The operation is legal. Assume it costs 1.
543 // TODO: Once we have extract/insert subvector cost we need to use them.
544 return LT.first * OpCost;
545 }
546
547 if (!TLI->isOperationExpand(ISD, LT.second)) {
548 // If the operation is custom lowered, then assume that the code is twice
549 // as expensive.
550 return LT.first * 2 * OpCost;
551 }
552
553 // Else, assume that we need to scalarize this op.
554 // TODO: If one of the types get legalized by splitting, handle this
555 // similarly to what getCastInstrCost() does.
556 if (auto *VTy = dyn_cast<VectorType>(Ty)) {
557 unsigned Num = cast<FixedVectorType>(VTy)->getNumElements();
558 InstructionCost Cost = getArithmeticInstrCost(
559 Opcode, VTy->getScalarType(), CostKind, Opd1Info, Opd2Info,
560 Opd1PropInfo, Opd2PropInfo, Args, CxtI);
561 // Return the cost of multiple scalar invocation plus the cost of
562 // inserting and extracting the values.
563 SmallVector<Type *> Tys(Args.size(), Ty);
564 return getScalarizationOverhead(VTy, Args, Tys) + Num * Cost;
565 }
566
567 // We don't know anything about this scalar instruction.
568 return OpCost;
569 }
570
571 // Legalize the type.
572 std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
573 int ISD = TLI->InstructionOpcodeToISD(Opcode);
574
575 // Because we don't have any legal vector operations, but the legal types, we
576 // need to account for split vectors.
577 unsigned NElts = LT.second.isVector() ?
578 LT.second.getVectorNumElements() : 1;
579
580 MVT::SimpleValueType SLT = LT.second.getScalarType().SimpleTy;
581
582 switch (ISD) {
583 case ISD::SHL:
584 case ISD::SRL:
585 case ISD::SRA:
586 if (SLT == MVT::i64)
587 return get64BitInstrCost(CostKind) * LT.first * NElts;
588
589 if (ST->has16BitInsts() && SLT == MVT::i16)
590 NElts = (NElts + 1) / 2;
591
592 // i32
593 return getFullRateInstrCost() * LT.first * NElts;
594 case ISD::ADD:
595 case ISD::SUB:
596 case ISD::AND:
597 case ISD::OR:
598 case ISD::XOR:
599 if (SLT == MVT::i64) {
600 // and, or and xor are typically split into 2 VALU instructions.
601 return 2 * getFullRateInstrCost() * LT.first * NElts;
602 }
603
604 if (ST->has16BitInsts() && SLT == MVT::i16)
605 NElts = (NElts + 1) / 2;
606
607 return LT.first * NElts * getFullRateInstrCost();
608 case ISD::MUL: {
609 const int QuarterRateCost = getQuarterRateInstrCost(CostKind);
610 if (SLT == MVT::i64) {
611 const int FullRateCost = getFullRateInstrCost();
612 return (4 * QuarterRateCost + (2 * 2) * FullRateCost) * LT.first * NElts;
613 }
614
615 if (ST->has16BitInsts() && SLT == MVT::i16)
616 NElts = (NElts + 1) / 2;
617
618 // i32
619 return QuarterRateCost * NElts * LT.first;
620 }
621 case ISD::FMUL:
622 // Check possible fuse {fadd|fsub}(a,fmul(b,c)) and return zero cost for
623 // fmul(b,c) supposing the fadd|fsub will get estimated cost for the whole
624 // fused operation.
625 if (CxtI && CxtI->hasOneUse())
626 if (const auto *FAdd = dyn_cast<BinaryOperator>(*CxtI->user_begin())) {
627 const int OPC = TLI->InstructionOpcodeToISD(FAdd->getOpcode());
628 if (OPC == ISD::FADD || OPC == ISD::FSUB) {
629 if (ST->hasMadMacF32Insts() && SLT == MVT::f32 && !HasFP32Denormals)
630 return TargetTransformInfo::TCC_Free;
631 if (ST->has16BitInsts() && SLT == MVT::f16 && !HasFP64FP16Denormals)
632 return TargetTransformInfo::TCC_Free;
633
634 // Estimate all types may be fused with contract/unsafe flags
635 const TargetOptions &Options = TLI->getTargetMachine().Options;
636 if (Options.AllowFPOpFusion == FPOpFusion::Fast ||
637 Options.UnsafeFPMath ||
638 (FAdd->hasAllowContract() && CxtI->hasAllowContract()))
639 return TargetTransformInfo::TCC_Free;
640 }
641 }
642 LLVM_FALLTHROUGH;
643 case ISD::FADD:
644 case ISD::FSUB:
645 if (ST->hasPackedFP32Ops() && SLT == MVT::f32)
646 NElts = (NElts + 1) / 2;
647 if (SLT == MVT::f64)
648 return LT.first * NElts * get64BitInstrCost(CostKind);
649
650 if (ST->has16BitInsts() && SLT == MVT::f16)
651 NElts = (NElts + 1) / 2;
652
653 if (SLT == MVT::f32 || SLT == MVT::f16)
654 return LT.first * NElts * getFullRateInstrCost();
655 break;
656 case ISD::FDIV:
657 case ISD::FREM:
658 // FIXME: frem should be handled separately. The fdiv in it is most of it,
659 // but the current lowering is also not entirely correct.
660 if (SLT == MVT::f64) {
661 int Cost = 7 * get64BitInstrCost(CostKind) +
662 getQuarterRateInstrCost(CostKind) +
663 3 * getHalfRateInstrCost(CostKind);
664 // Add cost of workaround.
665 if (!ST->hasUsableDivScaleConditionOutput())
666 Cost += 3 * getFullRateInstrCost();
667
668 return LT.first * Cost * NElts;
669 }
670
671 if (!Args.empty() && match(Args[0], PatternMatch::m_FPOne())) {
672 // TODO: This is more complicated, unsafe flags etc.
673 if ((SLT == MVT::f32 && !HasFP32Denormals) ||
674 (SLT == MVT::f16 && ST->has16BitInsts())) {
675 return LT.first * getQuarterRateInstrCost(CostKind) * NElts;
676 }
677 }
678
679 if (SLT == MVT::f16 && ST->has16BitInsts()) {
680 // 2 x v_cvt_f32_f16
681 // f32 rcp
682 // f32 fmul
683 // v_cvt_f16_f32
684 // f16 div_fixup
685 int Cost =
686 4 * getFullRateInstrCost() + 2 * getQuarterRateInstrCost(CostKind);
687 return LT.first * Cost * NElts;
688 }
689
690 if (SLT == MVT::f32 || SLT == MVT::f16) {
691 // 4 more v_cvt_* insts without f16 insts support
692 int Cost = (SLT == MVT::f16 ? 14 : 10) * getFullRateInstrCost() +
693 1 * getQuarterRateInstrCost(CostKind);
694
695 if (!HasFP32Denormals) {
696 // FP mode switches.
697 Cost += 2 * getFullRateInstrCost();
698 }
699
700 return LT.first * NElts * Cost;
701 }
702 break;
703 case ISD::FNEG:
704 // Use the backend' estimation. If fneg is not free each element will cost
705 // one additional instruction.
706 return TLI->isFNegFree(SLT) ? 0 : NElts;
707 default:
708 break;
709 }
710
711 return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Opd1Info, Opd2Info,
712 Opd1PropInfo, Opd2PropInfo, Args, CxtI);
713 }
714
715 // Return true if there's a potential benefit from using v2f16/v2i16
716 // instructions for an intrinsic, even if it requires nontrivial legalization.
intrinsicHasPackedVectorBenefit(Intrinsic::ID ID)717 static bool intrinsicHasPackedVectorBenefit(Intrinsic::ID ID) {
718 switch (ID) {
719 case Intrinsic::fma: // TODO: fmuladd
720 // There's a small benefit to using vector ops in the legalized code.
721 case Intrinsic::round:
722 case Intrinsic::uadd_sat:
723 case Intrinsic::usub_sat:
724 case Intrinsic::sadd_sat:
725 case Intrinsic::ssub_sat:
726 return true;
727 default:
728 return false;
729 }
730 }
731
732 InstructionCost
getIntrinsicInstrCost(const IntrinsicCostAttributes & ICA,TTI::TargetCostKind CostKind)733 GCNTTIImpl::getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
734 TTI::TargetCostKind CostKind) {
735 if (ICA.getID() == Intrinsic::fabs)
736 return 0;
737
738 if (!intrinsicHasPackedVectorBenefit(ICA.getID()))
739 return BaseT::getIntrinsicInstrCost(ICA, CostKind);
740
741 Type *RetTy = ICA.getReturnType();
742 EVT OrigTy = TLI->getValueType(DL, RetTy);
743 if (!OrigTy.isSimple()) {
744 if (CostKind != TTI::TCK_CodeSize)
745 return BaseT::getIntrinsicInstrCost(ICA, CostKind);
746
747 // TODO: Combine these two logic paths.
748 if (ICA.isTypeBasedOnly())
749 return getTypeBasedIntrinsicInstrCost(ICA, CostKind);
750
751 unsigned RetVF =
752 (RetTy->isVectorTy() ? cast<FixedVectorType>(RetTy)->getNumElements()
753 : 1);
754 const IntrinsicInst *I = ICA.getInst();
755 const SmallVectorImpl<const Value *> &Args = ICA.getArgs();
756 FastMathFlags FMF = ICA.getFlags();
757 // Assume that we need to scalarize this intrinsic.
758
759 // Compute the scalarization overhead based on Args for a vector
760 // intrinsic. A vectorizer will pass a scalar RetTy and VF > 1, while
761 // CostModel will pass a vector RetTy and VF is 1.
762 InstructionCost ScalarizationCost = InstructionCost::getInvalid();
763 if (RetVF > 1) {
764 ScalarizationCost = 0;
765 if (!RetTy->isVoidTy())
766 ScalarizationCost +=
767 getScalarizationOverhead(cast<VectorType>(RetTy), true, false);
768 ScalarizationCost +=
769 getOperandsScalarizationOverhead(Args, ICA.getArgTypes());
770 }
771
772 IntrinsicCostAttributes Attrs(ICA.getID(), RetTy, ICA.getArgTypes(), FMF, I,
773 ScalarizationCost);
774 return getIntrinsicInstrCost(Attrs, CostKind);
775 }
776
777 // Legalize the type.
778 std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, RetTy);
779
780 unsigned NElts = LT.second.isVector() ?
781 LT.second.getVectorNumElements() : 1;
782
783 MVT::SimpleValueType SLT = LT.second.getScalarType().SimpleTy;
784
785 if (SLT == MVT::f64)
786 return LT.first * NElts * get64BitInstrCost(CostKind);
787
788 if ((ST->has16BitInsts() && SLT == MVT::f16) ||
789 (ST->hasPackedFP32Ops() && SLT == MVT::f32))
790 NElts = (NElts + 1) / 2;
791
792 // TODO: Get more refined intrinsic costs?
793 unsigned InstRate = getQuarterRateInstrCost(CostKind);
794
795 switch (ICA.getID()) {
796 case Intrinsic::fma:
797 InstRate = ST->hasFastFMAF32() ? getHalfRateInstrCost(CostKind)
798 : getQuarterRateInstrCost(CostKind);
799 break;
800 case Intrinsic::uadd_sat:
801 case Intrinsic::usub_sat:
802 case Intrinsic::sadd_sat:
803 case Intrinsic::ssub_sat:
804 static const auto ValidSatTys = {MVT::v2i16, MVT::v4i16};
805 if (any_of(ValidSatTys, [<](MVT M) { return M == LT.second; }))
806 NElts = 1;
807 break;
808 }
809
810 return LT.first * NElts * InstRate;
811 }
812
getCFInstrCost(unsigned Opcode,TTI::TargetCostKind CostKind,const Instruction * I)813 InstructionCost GCNTTIImpl::getCFInstrCost(unsigned Opcode,
814 TTI::TargetCostKind CostKind,
815 const Instruction *I) {
816 assert((I == nullptr || I->getOpcode() == Opcode) &&
817 "Opcode should reflect passed instruction.");
818 const bool SCost =
819 (CostKind == TTI::TCK_CodeSize || CostKind == TTI::TCK_SizeAndLatency);
820 const int CBrCost = SCost ? 5 : 7;
821 switch (Opcode) {
822 case Instruction::Br: {
823 // Branch instruction takes about 4 slots on gfx900.
824 auto BI = dyn_cast_or_null<BranchInst>(I);
825 if (BI && BI->isUnconditional())
826 return SCost ? 1 : 4;
827 // Suppose conditional branch takes additional 3 exec manipulations
828 // instructions in average.
829 return CBrCost;
830 }
831 case Instruction::Switch: {
832 auto SI = dyn_cast_or_null<SwitchInst>(I);
833 // Each case (including default) takes 1 cmp + 1 cbr instructions in
834 // average.
835 return (SI ? (SI->getNumCases() + 1) : 4) * (CBrCost + 1);
836 }
837 case Instruction::Ret:
838 return SCost ? 1 : 10;
839 case Instruction::PHI:
840 // TODO: 1. A prediction phi won't be eliminated?
841 // 2. Estimate data copy instructions in this case.
842 return 1;
843 }
844 return BaseT::getCFInstrCost(Opcode, CostKind, I);
845 }
846
847 InstructionCost
getArithmeticReductionCost(unsigned Opcode,VectorType * Ty,bool IsPairwise,TTI::TargetCostKind CostKind)848 GCNTTIImpl::getArithmeticReductionCost(unsigned Opcode, VectorType *Ty,
849 bool IsPairwise,
850 TTI::TargetCostKind CostKind) {
851 EVT OrigTy = TLI->getValueType(DL, Ty);
852
853 // Computes cost on targets that have packed math instructions(which support
854 // 16-bit types only).
855 if (IsPairwise ||
856 !ST->hasVOP3PInsts() ||
857 OrigTy.getScalarSizeInBits() != 16)
858 return BaseT::getArithmeticReductionCost(Opcode, Ty, IsPairwise, CostKind);
859
860 std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
861 return LT.first * getFullRateInstrCost();
862 }
863
864 InstructionCost
getMinMaxReductionCost(VectorType * Ty,VectorType * CondTy,bool IsPairwise,bool IsUnsigned,TTI::TargetCostKind CostKind)865 GCNTTIImpl::getMinMaxReductionCost(VectorType *Ty, VectorType *CondTy,
866 bool IsPairwise, bool IsUnsigned,
867 TTI::TargetCostKind CostKind) {
868 EVT OrigTy = TLI->getValueType(DL, Ty);
869
870 // Computes cost on targets that have packed math instructions(which support
871 // 16-bit types only).
872 if (IsPairwise ||
873 !ST->hasVOP3PInsts() ||
874 OrigTy.getScalarSizeInBits() != 16)
875 return BaseT::getMinMaxReductionCost(Ty, CondTy, IsPairwise, IsUnsigned,
876 CostKind);
877
878 std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
879 return LT.first * getHalfRateInstrCost(CostKind);
880 }
881
getVectorInstrCost(unsigned Opcode,Type * ValTy,unsigned Index)882 InstructionCost GCNTTIImpl::getVectorInstrCost(unsigned Opcode, Type *ValTy,
883 unsigned Index) {
884 switch (Opcode) {
885 case Instruction::ExtractElement:
886 case Instruction::InsertElement: {
887 unsigned EltSize
888 = DL.getTypeSizeInBits(cast<VectorType>(ValTy)->getElementType());
889 if (EltSize < 32) {
890 if (EltSize == 16 && Index == 0 && ST->has16BitInsts())
891 return 0;
892 return BaseT::getVectorInstrCost(Opcode, ValTy, Index);
893 }
894
895 // Extracts are just reads of a subregister, so are free. Inserts are
896 // considered free because we don't want to have any cost for scalarizing
897 // operations, and we don't have to copy into a different register class.
898
899 // Dynamic indexing isn't free and is best avoided.
900 return Index == ~0u ? 2 : 0;
901 }
902 default:
903 return BaseT::getVectorInstrCost(Opcode, ValTy, Index);
904 }
905 }
906
907 /// Analyze if the results of inline asm are divergent. If \p Indices is empty,
908 /// this is analyzing the collective result of all output registers. Otherwise,
909 /// this is only querying a specific result index if this returns multiple
910 /// registers in a struct.
isInlineAsmSourceOfDivergence(const CallInst * CI,ArrayRef<unsigned> Indices) const911 bool GCNTTIImpl::isInlineAsmSourceOfDivergence(
912 const CallInst *CI, ArrayRef<unsigned> Indices) const {
913 // TODO: Handle complex extract indices
914 if (Indices.size() > 1)
915 return true;
916
917 const DataLayout &DL = CI->getModule()->getDataLayout();
918 const SIRegisterInfo *TRI = ST->getRegisterInfo();
919 TargetLowering::AsmOperandInfoVector TargetConstraints =
920 TLI->ParseConstraints(DL, ST->getRegisterInfo(), *CI);
921
922 const int TargetOutputIdx = Indices.empty() ? -1 : Indices[0];
923
924 int OutputIdx = 0;
925 for (auto &TC : TargetConstraints) {
926 if (TC.Type != InlineAsm::isOutput)
927 continue;
928
929 // Skip outputs we don't care about.
930 if (TargetOutputIdx != -1 && TargetOutputIdx != OutputIdx++)
931 continue;
932
933 TLI->ComputeConstraintToUse(TC, SDValue());
934
935 Register AssignedReg;
936 const TargetRegisterClass *RC;
937 std::tie(AssignedReg, RC) = TLI->getRegForInlineAsmConstraint(
938 TRI, TC.ConstraintCode, TC.ConstraintVT);
939 if (AssignedReg) {
940 // FIXME: This is a workaround for getRegForInlineAsmConstraint
941 // returning VS_32
942 RC = TRI->getPhysRegClass(AssignedReg);
943 }
944
945 // For AGPR constraints null is returned on subtargets without AGPRs, so
946 // assume divergent for null.
947 if (!RC || !TRI->isSGPRClass(RC))
948 return true;
949 }
950
951 return false;
952 }
953
954 /// \returns true if the new GPU divergence analysis is enabled.
useGPUDivergenceAnalysis() const955 bool GCNTTIImpl::useGPUDivergenceAnalysis() const {
956 return !UseLegacyDA;
957 }
958
959 /// \returns true if the result of the value could potentially be
960 /// different across workitems in a wavefront.
isSourceOfDivergence(const Value * V) const961 bool GCNTTIImpl::isSourceOfDivergence(const Value *V) const {
962 if (const Argument *A = dyn_cast<Argument>(V))
963 return !AMDGPU::isArgPassedInSGPR(A);
964
965 // Loads from the private and flat address spaces are divergent, because
966 // threads can execute the load instruction with the same inputs and get
967 // different results.
968 //
969 // All other loads are not divergent, because if threads issue loads with the
970 // same arguments, they will always get the same result.
971 if (const LoadInst *Load = dyn_cast<LoadInst>(V))
972 return Load->getPointerAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS ||
973 Load->getPointerAddressSpace() == AMDGPUAS::FLAT_ADDRESS;
974
975 // Atomics are divergent because they are executed sequentially: when an
976 // atomic operation refers to the same address in each thread, then each
977 // thread after the first sees the value written by the previous thread as
978 // original value.
979 if (isa<AtomicRMWInst>(V) || isa<AtomicCmpXchgInst>(V))
980 return true;
981
982 if (const IntrinsicInst *Intrinsic = dyn_cast<IntrinsicInst>(V))
983 return AMDGPU::isIntrinsicSourceOfDivergence(Intrinsic->getIntrinsicID());
984
985 // Assume all function calls are a source of divergence.
986 if (const CallInst *CI = dyn_cast<CallInst>(V)) {
987 if (CI->isInlineAsm())
988 return isInlineAsmSourceOfDivergence(CI);
989 return true;
990 }
991
992 // Assume all function calls are a source of divergence.
993 if (isa<InvokeInst>(V))
994 return true;
995
996 return false;
997 }
998
isAlwaysUniform(const Value * V) const999 bool GCNTTIImpl::isAlwaysUniform(const Value *V) const {
1000 if (const IntrinsicInst *Intrinsic = dyn_cast<IntrinsicInst>(V)) {
1001 switch (Intrinsic->getIntrinsicID()) {
1002 default:
1003 return false;
1004 case Intrinsic::amdgcn_readfirstlane:
1005 case Intrinsic::amdgcn_readlane:
1006 case Intrinsic::amdgcn_icmp:
1007 case Intrinsic::amdgcn_fcmp:
1008 case Intrinsic::amdgcn_ballot:
1009 case Intrinsic::amdgcn_if_break:
1010 return true;
1011 }
1012 }
1013
1014 if (const CallInst *CI = dyn_cast<CallInst>(V)) {
1015 if (CI->isInlineAsm())
1016 return !isInlineAsmSourceOfDivergence(CI);
1017 return false;
1018 }
1019
1020 const ExtractValueInst *ExtValue = dyn_cast<ExtractValueInst>(V);
1021 if (!ExtValue)
1022 return false;
1023
1024 const CallInst *CI = dyn_cast<CallInst>(ExtValue->getOperand(0));
1025 if (!CI)
1026 return false;
1027
1028 if (const IntrinsicInst *Intrinsic = dyn_cast<IntrinsicInst>(CI)) {
1029 switch (Intrinsic->getIntrinsicID()) {
1030 default:
1031 return false;
1032 case Intrinsic::amdgcn_if:
1033 case Intrinsic::amdgcn_else: {
1034 ArrayRef<unsigned> Indices = ExtValue->getIndices();
1035 return Indices.size() == 1 && Indices[0] == 1;
1036 }
1037 }
1038 }
1039
1040 // If we have inline asm returning mixed SGPR and VGPR results, we inferred
1041 // divergent for the overall struct return. We need to override it in the
1042 // case we're extracting an SGPR component here.
1043 if (CI->isInlineAsm())
1044 return !isInlineAsmSourceOfDivergence(CI, ExtValue->getIndices());
1045
1046 return false;
1047 }
1048
collectFlatAddressOperands(SmallVectorImpl<int> & OpIndexes,Intrinsic::ID IID) const1049 bool GCNTTIImpl::collectFlatAddressOperands(SmallVectorImpl<int> &OpIndexes,
1050 Intrinsic::ID IID) const {
1051 switch (IID) {
1052 case Intrinsic::amdgcn_atomic_inc:
1053 case Intrinsic::amdgcn_atomic_dec:
1054 case Intrinsic::amdgcn_ds_fadd:
1055 case Intrinsic::amdgcn_ds_fmin:
1056 case Intrinsic::amdgcn_ds_fmax:
1057 case Intrinsic::amdgcn_is_shared:
1058 case Intrinsic::amdgcn_is_private:
1059 OpIndexes.push_back(0);
1060 return true;
1061 default:
1062 return false;
1063 }
1064 }
1065
rewriteIntrinsicWithAddressSpace(IntrinsicInst * II,Value * OldV,Value * NewV) const1066 Value *GCNTTIImpl::rewriteIntrinsicWithAddressSpace(IntrinsicInst *II,
1067 Value *OldV,
1068 Value *NewV) const {
1069 auto IntrID = II->getIntrinsicID();
1070 switch (IntrID) {
1071 case Intrinsic::amdgcn_atomic_inc:
1072 case Intrinsic::amdgcn_atomic_dec:
1073 case Intrinsic::amdgcn_ds_fadd:
1074 case Intrinsic::amdgcn_ds_fmin:
1075 case Intrinsic::amdgcn_ds_fmax: {
1076 const ConstantInt *IsVolatile = cast<ConstantInt>(II->getArgOperand(4));
1077 if (!IsVolatile->isZero())
1078 return nullptr;
1079 Module *M = II->getParent()->getParent()->getParent();
1080 Type *DestTy = II->getType();
1081 Type *SrcTy = NewV->getType();
1082 Function *NewDecl =
1083 Intrinsic::getDeclaration(M, II->getIntrinsicID(), {DestTy, SrcTy});
1084 II->setArgOperand(0, NewV);
1085 II->setCalledFunction(NewDecl);
1086 return II;
1087 }
1088 case Intrinsic::amdgcn_is_shared:
1089 case Intrinsic::amdgcn_is_private: {
1090 unsigned TrueAS = IntrID == Intrinsic::amdgcn_is_shared ?
1091 AMDGPUAS::LOCAL_ADDRESS : AMDGPUAS::PRIVATE_ADDRESS;
1092 unsigned NewAS = NewV->getType()->getPointerAddressSpace();
1093 LLVMContext &Ctx = NewV->getType()->getContext();
1094 ConstantInt *NewVal = (TrueAS == NewAS) ?
1095 ConstantInt::getTrue(Ctx) : ConstantInt::getFalse(Ctx);
1096 return NewVal;
1097 }
1098 case Intrinsic::ptrmask: {
1099 unsigned OldAS = OldV->getType()->getPointerAddressSpace();
1100 unsigned NewAS = NewV->getType()->getPointerAddressSpace();
1101 Value *MaskOp = II->getArgOperand(1);
1102 Type *MaskTy = MaskOp->getType();
1103
1104 bool DoTruncate = false;
1105
1106 const GCNTargetMachine &TM =
1107 static_cast<const GCNTargetMachine &>(getTLI()->getTargetMachine());
1108 if (!TM.isNoopAddrSpaceCast(OldAS, NewAS)) {
1109 // All valid 64-bit to 32-bit casts work by chopping off the high
1110 // bits. Any masking only clearing the low bits will also apply in the new
1111 // address space.
1112 if (DL.getPointerSizeInBits(OldAS) != 64 ||
1113 DL.getPointerSizeInBits(NewAS) != 32)
1114 return nullptr;
1115
1116 // TODO: Do we need to thread more context in here?
1117 KnownBits Known = computeKnownBits(MaskOp, DL, 0, nullptr, II);
1118 if (Known.countMinLeadingOnes() < 32)
1119 return nullptr;
1120
1121 DoTruncate = true;
1122 }
1123
1124 IRBuilder<> B(II);
1125 if (DoTruncate) {
1126 MaskTy = B.getInt32Ty();
1127 MaskOp = B.CreateTrunc(MaskOp, MaskTy);
1128 }
1129
1130 return B.CreateIntrinsic(Intrinsic::ptrmask, {NewV->getType(), MaskTy},
1131 {NewV, MaskOp});
1132 }
1133 default:
1134 return nullptr;
1135 }
1136 }
1137
getShuffleCost(TTI::ShuffleKind Kind,VectorType * VT,ArrayRef<int> Mask,int Index,VectorType * SubTp)1138 InstructionCost GCNTTIImpl::getShuffleCost(TTI::ShuffleKind Kind,
1139 VectorType *VT, ArrayRef<int> Mask,
1140 int Index, VectorType *SubTp) {
1141 Kind = improveShuffleKindFromMask(Kind, Mask);
1142 if (ST->hasVOP3PInsts()) {
1143 if (cast<FixedVectorType>(VT)->getNumElements() == 2 &&
1144 DL.getTypeSizeInBits(VT->getElementType()) == 16) {
1145 // With op_sel VOP3P instructions freely can access the low half or high
1146 // half of a register, so any swizzle is free.
1147
1148 switch (Kind) {
1149 case TTI::SK_Broadcast:
1150 case TTI::SK_Reverse:
1151 case TTI::SK_PermuteSingleSrc:
1152 return 0;
1153 default:
1154 break;
1155 }
1156 }
1157 }
1158
1159 return BaseT::getShuffleCost(Kind, VT, Mask, Index, SubTp);
1160 }
1161
areInlineCompatible(const Function * Caller,const Function * Callee) const1162 bool GCNTTIImpl::areInlineCompatible(const Function *Caller,
1163 const Function *Callee) const {
1164 const TargetMachine &TM = getTLI()->getTargetMachine();
1165 const GCNSubtarget *CallerST
1166 = static_cast<const GCNSubtarget *>(TM.getSubtargetImpl(*Caller));
1167 const GCNSubtarget *CalleeST
1168 = static_cast<const GCNSubtarget *>(TM.getSubtargetImpl(*Callee));
1169
1170 const FeatureBitset &CallerBits = CallerST->getFeatureBits();
1171 const FeatureBitset &CalleeBits = CalleeST->getFeatureBits();
1172
1173 FeatureBitset RealCallerBits = CallerBits & ~InlineFeatureIgnoreList;
1174 FeatureBitset RealCalleeBits = CalleeBits & ~InlineFeatureIgnoreList;
1175 if ((RealCallerBits & RealCalleeBits) != RealCalleeBits)
1176 return false;
1177
1178 // FIXME: dx10_clamp can just take the caller setting, but there seems to be
1179 // no way to support merge for backend defined attributes.
1180 AMDGPU::SIModeRegisterDefaults CallerMode(*Caller);
1181 AMDGPU::SIModeRegisterDefaults CalleeMode(*Callee);
1182 if (!CallerMode.isInlineCompatible(CalleeMode))
1183 return false;
1184
1185 if (Callee->hasFnAttribute(Attribute::AlwaysInline) ||
1186 Callee->hasFnAttribute(Attribute::InlineHint))
1187 return true;
1188
1189 // Hack to make compile times reasonable.
1190 if (InlineMaxBB) {
1191 // Single BB does not increase total BB amount.
1192 if (Callee->size() == 1)
1193 return true;
1194 size_t BBSize = Caller->size() + Callee->size() - 1;
1195 return BBSize <= InlineMaxBB;
1196 }
1197
1198 return true;
1199 }
1200
adjustInliningThreshold(const CallBase * CB) const1201 unsigned GCNTTIImpl::adjustInliningThreshold(const CallBase *CB) const {
1202 // If we have a pointer to private array passed into a function
1203 // it will not be optimized out, leaving scratch usage.
1204 // Increase the inline threshold to allow inlining in this case.
1205 uint64_t AllocaSize = 0;
1206 SmallPtrSet<const AllocaInst *, 8> AIVisited;
1207 for (Value *PtrArg : CB->args()) {
1208 PointerType *Ty = dyn_cast<PointerType>(PtrArg->getType());
1209 if (!Ty || (Ty->getAddressSpace() != AMDGPUAS::PRIVATE_ADDRESS &&
1210 Ty->getAddressSpace() != AMDGPUAS::FLAT_ADDRESS))
1211 continue;
1212
1213 PtrArg = getUnderlyingObject(PtrArg);
1214 if (const AllocaInst *AI = dyn_cast<AllocaInst>(PtrArg)) {
1215 if (!AI->isStaticAlloca() || !AIVisited.insert(AI).second)
1216 continue;
1217 AllocaSize += DL.getTypeAllocSize(AI->getAllocatedType());
1218 // If the amount of stack memory is excessive we will not be able
1219 // to get rid of the scratch anyway, bail out.
1220 if (AllocaSize > ArgAllocaCutoff) {
1221 AllocaSize = 0;
1222 break;
1223 }
1224 }
1225 }
1226 if (AllocaSize)
1227 return ArgAllocaCost;
1228 return 0;
1229 }
1230
getUnrollingPreferences(Loop * L,ScalarEvolution & SE,TTI::UnrollingPreferences & UP)1231 void GCNTTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
1232 TTI::UnrollingPreferences &UP) {
1233 CommonTTI.getUnrollingPreferences(L, SE, UP);
1234 }
1235
getPeelingPreferences(Loop * L,ScalarEvolution & SE,TTI::PeelingPreferences & PP)1236 void GCNTTIImpl::getPeelingPreferences(Loop *L, ScalarEvolution &SE,
1237 TTI::PeelingPreferences &PP) {
1238 CommonTTI.getPeelingPreferences(L, SE, PP);
1239 }
1240
get64BitInstrCost(TTI::TargetCostKind CostKind) const1241 int GCNTTIImpl::get64BitInstrCost(TTI::TargetCostKind CostKind) const {
1242 return ST->hasFullRate64Ops()
1243 ? getFullRateInstrCost()
1244 : ST->hasHalfRate64Ops() ? getHalfRateInstrCost(CostKind)
1245 : getQuarterRateInstrCost(CostKind);
1246 }
1247
R600TTIImpl(const AMDGPUTargetMachine * TM,const Function & F)1248 R600TTIImpl::R600TTIImpl(const AMDGPUTargetMachine *TM, const Function &F)
1249 : BaseT(TM, F.getParent()->getDataLayout()),
1250 ST(static_cast<const R600Subtarget *>(TM->getSubtargetImpl(F))),
1251 TLI(ST->getTargetLowering()), CommonTTI(TM, F) {}
1252
getHardwareNumberOfRegisters(bool Vec) const1253 unsigned R600TTIImpl::getHardwareNumberOfRegisters(bool Vec) const {
1254 return 4 * 128; // XXX - 4 channels. Should these count as vector instead?
1255 }
1256
getNumberOfRegisters(bool Vec) const1257 unsigned R600TTIImpl::getNumberOfRegisters(bool Vec) const {
1258 return getHardwareNumberOfRegisters(Vec);
1259 }
1260
1261 TypeSize
getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const1262 R600TTIImpl::getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const {
1263 return TypeSize::getFixed(32);
1264 }
1265
getMinVectorRegisterBitWidth() const1266 unsigned R600TTIImpl::getMinVectorRegisterBitWidth() const {
1267 return 32;
1268 }
1269
getLoadStoreVecRegBitWidth(unsigned AddrSpace) const1270 unsigned R600TTIImpl::getLoadStoreVecRegBitWidth(unsigned AddrSpace) const {
1271 if (AddrSpace == AMDGPUAS::GLOBAL_ADDRESS ||
1272 AddrSpace == AMDGPUAS::CONSTANT_ADDRESS)
1273 return 128;
1274 if (AddrSpace == AMDGPUAS::LOCAL_ADDRESS ||
1275 AddrSpace == AMDGPUAS::REGION_ADDRESS)
1276 return 64;
1277 if (AddrSpace == AMDGPUAS::PRIVATE_ADDRESS)
1278 return 32;
1279
1280 if ((AddrSpace == AMDGPUAS::PARAM_D_ADDRESS ||
1281 AddrSpace == AMDGPUAS::PARAM_I_ADDRESS ||
1282 (AddrSpace >= AMDGPUAS::CONSTANT_BUFFER_0 &&
1283 AddrSpace <= AMDGPUAS::CONSTANT_BUFFER_15)))
1284 return 128;
1285 llvm_unreachable("unhandled address space");
1286 }
1287
isLegalToVectorizeMemChain(unsigned ChainSizeInBytes,Align Alignment,unsigned AddrSpace) const1288 bool R600TTIImpl::isLegalToVectorizeMemChain(unsigned ChainSizeInBytes,
1289 Align Alignment,
1290 unsigned AddrSpace) const {
1291 // We allow vectorization of flat stores, even though we may need to decompose
1292 // them later if they may access private memory. We don't have enough context
1293 // here, and legalization can handle it.
1294 return (AddrSpace != AMDGPUAS::PRIVATE_ADDRESS);
1295 }
1296
isLegalToVectorizeLoadChain(unsigned ChainSizeInBytes,Align Alignment,unsigned AddrSpace) const1297 bool R600TTIImpl::isLegalToVectorizeLoadChain(unsigned ChainSizeInBytes,
1298 Align Alignment,
1299 unsigned AddrSpace) const {
1300 return isLegalToVectorizeMemChain(ChainSizeInBytes, Alignment, AddrSpace);
1301 }
1302
isLegalToVectorizeStoreChain(unsigned ChainSizeInBytes,Align Alignment,unsigned AddrSpace) const1303 bool R600TTIImpl::isLegalToVectorizeStoreChain(unsigned ChainSizeInBytes,
1304 Align Alignment,
1305 unsigned AddrSpace) const {
1306 return isLegalToVectorizeMemChain(ChainSizeInBytes, Alignment, AddrSpace);
1307 }
1308
getMaxInterleaveFactor(unsigned VF)1309 unsigned R600TTIImpl::getMaxInterleaveFactor(unsigned VF) {
1310 // Disable unrolling if the loop is not vectorized.
1311 // TODO: Enable this again.
1312 if (VF == 1)
1313 return 1;
1314
1315 return 8;
1316 }
1317
getCFInstrCost(unsigned Opcode,TTI::TargetCostKind CostKind,const Instruction * I)1318 InstructionCost R600TTIImpl::getCFInstrCost(unsigned Opcode,
1319 TTI::TargetCostKind CostKind,
1320 const Instruction *I) {
1321 if (CostKind == TTI::TCK_CodeSize || CostKind == TTI::TCK_SizeAndLatency)
1322 return Opcode == Instruction::PHI ? 0 : 1;
1323
1324 // XXX - For some reason this isn't called for switch.
1325 switch (Opcode) {
1326 case Instruction::Br:
1327 case Instruction::Ret:
1328 return 10;
1329 default:
1330 return BaseT::getCFInstrCost(Opcode, CostKind, I);
1331 }
1332 }
1333
getVectorInstrCost(unsigned Opcode,Type * ValTy,unsigned Index)1334 InstructionCost R600TTIImpl::getVectorInstrCost(unsigned Opcode, Type *ValTy,
1335 unsigned Index) {
1336 switch (Opcode) {
1337 case Instruction::ExtractElement:
1338 case Instruction::InsertElement: {
1339 unsigned EltSize
1340 = DL.getTypeSizeInBits(cast<VectorType>(ValTy)->getElementType());
1341 if (EltSize < 32) {
1342 return BaseT::getVectorInstrCost(Opcode, ValTy, Index);
1343 }
1344
1345 // Extracts are just reads of a subregister, so are free. Inserts are
1346 // considered free because we don't want to have any cost for scalarizing
1347 // operations, and we don't have to copy into a different register class.
1348
1349 // Dynamic indexing isn't free and is best avoided.
1350 return Index == ~0u ? 2 : 0;
1351 }
1352 default:
1353 return BaseT::getVectorInstrCost(Opcode, ValTy, Index);
1354 }
1355 }
1356
getUnrollingPreferences(Loop * L,ScalarEvolution & SE,TTI::UnrollingPreferences & UP)1357 void R600TTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
1358 TTI::UnrollingPreferences &UP) {
1359 CommonTTI.getUnrollingPreferences(L, SE, UP);
1360 }
1361
getPeelingPreferences(Loop * L,ScalarEvolution & SE,TTI::PeelingPreferences & PP)1362 void R600TTIImpl::getPeelingPreferences(Loop *L, ScalarEvolution &SE,
1363 TTI::PeelingPreferences &PP) {
1364 CommonTTI.getPeelingPreferences(L, SE, PP);
1365 }
1366