1 //===- llvm/Analysis/TargetTransformInfo.cpp ------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8
9 #include "llvm/Analysis/TargetTransformInfo.h"
10 #include "llvm/Analysis/CFG.h"
11 #include "llvm/Analysis/LoopIterator.h"
12 #include "llvm/Analysis/TargetTransformInfoImpl.h"
13 #include "llvm/IR/CFG.h"
14 #include "llvm/IR/DataLayout.h"
15 #include "llvm/IR/Dominators.h"
16 #include "llvm/IR/Instruction.h"
17 #include "llvm/IR/Instructions.h"
18 #include "llvm/IR/IntrinsicInst.h"
19 #include "llvm/IR/Module.h"
20 #include "llvm/IR/Operator.h"
21 #include "llvm/IR/PatternMatch.h"
22 #include "llvm/InitializePasses.h"
23 #include "llvm/Support/CommandLine.h"
24 #include "llvm/Support/ErrorHandling.h"
25 #include <utility>
26
27 using namespace llvm;
28 using namespace PatternMatch;
29
30 #define DEBUG_TYPE "tti"
31
32 static cl::opt<bool> EnableReduxCost("costmodel-reduxcost", cl::init(false),
33 cl::Hidden,
34 cl::desc("Recognize reduction patterns."));
35
36 namespace {
37 /// No-op implementation of the TTI interface using the utility base
38 /// classes.
39 ///
40 /// This is used when no target specific information is available.
41 struct NoTTIImpl : TargetTransformInfoImplCRTPBase<NoTTIImpl> {
NoTTIImpl__anona150d65d0111::NoTTIImpl42 explicit NoTTIImpl(const DataLayout &DL)
43 : TargetTransformInfoImplCRTPBase<NoTTIImpl>(DL) {}
44 };
45 } // namespace
46
canAnalyze(LoopInfo & LI)47 bool HardwareLoopInfo::canAnalyze(LoopInfo &LI) {
48 // If the loop has irreducible control flow, it can not be converted to
49 // Hardware loop.
50 LoopBlocksRPO RPOT(L);
51 RPOT.perform(&LI);
52 if (containsIrreducibleCFG<const BasicBlock *>(RPOT, LI))
53 return false;
54 return true;
55 }
56
IntrinsicCostAttributes(Intrinsic::ID Id,const CallBase & CI,InstructionCost ScalarizationCost)57 IntrinsicCostAttributes::IntrinsicCostAttributes(
58 Intrinsic::ID Id, const CallBase &CI, InstructionCost ScalarizationCost)
59 : II(dyn_cast<IntrinsicInst>(&CI)), RetTy(CI.getType()), IID(Id),
60 ScalarizationCost(ScalarizationCost) {
61
62 if (const auto *FPMO = dyn_cast<FPMathOperator>(&CI))
63 FMF = FPMO->getFastMathFlags();
64
65 Arguments.insert(Arguments.begin(), CI.arg_begin(), CI.arg_end());
66 FunctionType *FTy = CI.getCalledFunction()->getFunctionType();
67 ParamTys.insert(ParamTys.begin(), FTy->param_begin(), FTy->param_end());
68 }
69
IntrinsicCostAttributes(Intrinsic::ID Id,Type * RTy,ArrayRef<Type * > Tys,FastMathFlags Flags,const IntrinsicInst * I,InstructionCost ScalarCost)70 IntrinsicCostAttributes::IntrinsicCostAttributes(Intrinsic::ID Id, Type *RTy,
71 ArrayRef<Type *> Tys,
72 FastMathFlags Flags,
73 const IntrinsicInst *I,
74 InstructionCost ScalarCost)
75 : II(I), RetTy(RTy), IID(Id), FMF(Flags), ScalarizationCost(ScalarCost) {
76 ParamTys.insert(ParamTys.begin(), Tys.begin(), Tys.end());
77 }
78
IntrinsicCostAttributes(Intrinsic::ID Id,Type * Ty,ArrayRef<const Value * > Args)79 IntrinsicCostAttributes::IntrinsicCostAttributes(Intrinsic::ID Id, Type *Ty,
80 ArrayRef<const Value *> Args)
81 : RetTy(Ty), IID(Id) {
82
83 Arguments.insert(Arguments.begin(), Args.begin(), Args.end());
84 ParamTys.reserve(Arguments.size());
85 for (unsigned Idx = 0, Size = Arguments.size(); Idx != Size; ++Idx)
86 ParamTys.push_back(Arguments[Idx]->getType());
87 }
88
IntrinsicCostAttributes(Intrinsic::ID Id,Type * RTy,ArrayRef<const Value * > Args,ArrayRef<Type * > Tys,FastMathFlags Flags,const IntrinsicInst * I,InstructionCost ScalarCost)89 IntrinsicCostAttributes::IntrinsicCostAttributes(Intrinsic::ID Id, Type *RTy,
90 ArrayRef<const Value *> Args,
91 ArrayRef<Type *> Tys,
92 FastMathFlags Flags,
93 const IntrinsicInst *I,
94 InstructionCost ScalarCost)
95 : II(I), RetTy(RTy), IID(Id), FMF(Flags), ScalarizationCost(ScalarCost) {
96 ParamTys.insert(ParamTys.begin(), Tys.begin(), Tys.end());
97 Arguments.insert(Arguments.begin(), Args.begin(), Args.end());
98 }
99
isHardwareLoopCandidate(ScalarEvolution & SE,LoopInfo & LI,DominatorTree & DT,bool ForceNestedLoop,bool ForceHardwareLoopPHI)100 bool HardwareLoopInfo::isHardwareLoopCandidate(ScalarEvolution &SE,
101 LoopInfo &LI, DominatorTree &DT,
102 bool ForceNestedLoop,
103 bool ForceHardwareLoopPHI) {
104 SmallVector<BasicBlock *, 4> ExitingBlocks;
105 L->getExitingBlocks(ExitingBlocks);
106
107 for (BasicBlock *BB : ExitingBlocks) {
108 // If we pass the updated counter back through a phi, we need to know
109 // which latch the updated value will be coming from.
110 if (!L->isLoopLatch(BB)) {
111 if (ForceHardwareLoopPHI || CounterInReg)
112 continue;
113 }
114
115 const SCEV *EC = SE.getExitCount(L, BB);
116 if (isa<SCEVCouldNotCompute>(EC))
117 continue;
118 if (const SCEVConstant *ConstEC = dyn_cast<SCEVConstant>(EC)) {
119 if (ConstEC->getValue()->isZero())
120 continue;
121 } else if (!SE.isLoopInvariant(EC, L))
122 continue;
123
124 if (SE.getTypeSizeInBits(EC->getType()) > CountType->getBitWidth())
125 continue;
126
127 // If this exiting block is contained in a nested loop, it is not eligible
128 // for insertion of the branch-and-decrement since the inner loop would
129 // end up messing up the value in the CTR.
130 if (!IsNestingLegal && LI.getLoopFor(BB) != L && !ForceNestedLoop)
131 continue;
132
133 // We now have a loop-invariant count of loop iterations (which is not the
134 // constant zero) for which we know that this loop will not exit via this
135 // existing block.
136
137 // We need to make sure that this block will run on every loop iteration.
138 // For this to be true, we must dominate all blocks with backedges. Such
139 // blocks are in-loop predecessors to the header block.
140 bool NotAlways = false;
141 for (BasicBlock *Pred : predecessors(L->getHeader())) {
142 if (!L->contains(Pred))
143 continue;
144
145 if (!DT.dominates(BB, Pred)) {
146 NotAlways = true;
147 break;
148 }
149 }
150
151 if (NotAlways)
152 continue;
153
154 // Make sure this blocks ends with a conditional branch.
155 Instruction *TI = BB->getTerminator();
156 if (!TI)
157 continue;
158
159 if (BranchInst *BI = dyn_cast<BranchInst>(TI)) {
160 if (!BI->isConditional())
161 continue;
162
163 ExitBranch = BI;
164 } else
165 continue;
166
167 // Note that this block may not be the loop latch block, even if the loop
168 // has a latch block.
169 ExitBlock = BB;
170 TripCount = SE.getAddExpr(EC, SE.getOne(EC->getType()));
171
172 if (!EC->getType()->isPointerTy() && EC->getType() != CountType)
173 TripCount = SE.getZeroExtendExpr(TripCount, CountType);
174
175 break;
176 }
177
178 if (!ExitBlock)
179 return false;
180 return true;
181 }
182
TargetTransformInfo(const DataLayout & DL)183 TargetTransformInfo::TargetTransformInfo(const DataLayout &DL)
184 : TTIImpl(new Model<NoTTIImpl>(NoTTIImpl(DL))) {}
185
~TargetTransformInfo()186 TargetTransformInfo::~TargetTransformInfo() {}
187
TargetTransformInfo(TargetTransformInfo && Arg)188 TargetTransformInfo::TargetTransformInfo(TargetTransformInfo &&Arg)
189 : TTIImpl(std::move(Arg.TTIImpl)) {}
190
operator =(TargetTransformInfo && RHS)191 TargetTransformInfo &TargetTransformInfo::operator=(TargetTransformInfo &&RHS) {
192 TTIImpl = std::move(RHS.TTIImpl);
193 return *this;
194 }
195
getInliningThresholdMultiplier() const196 unsigned TargetTransformInfo::getInliningThresholdMultiplier() const {
197 return TTIImpl->getInliningThresholdMultiplier();
198 }
199
200 unsigned
adjustInliningThreshold(const CallBase * CB) const201 TargetTransformInfo::adjustInliningThreshold(const CallBase *CB) const {
202 return TTIImpl->adjustInliningThreshold(CB);
203 }
204
getInlinerVectorBonusPercent() const205 int TargetTransformInfo::getInlinerVectorBonusPercent() const {
206 return TTIImpl->getInlinerVectorBonusPercent();
207 }
208
209 InstructionCost
getGEPCost(Type * PointeeType,const Value * Ptr,ArrayRef<const Value * > Operands,TTI::TargetCostKind CostKind) const210 TargetTransformInfo::getGEPCost(Type *PointeeType, const Value *Ptr,
211 ArrayRef<const Value *> Operands,
212 TTI::TargetCostKind CostKind) const {
213 return TTIImpl->getGEPCost(PointeeType, Ptr, Operands, CostKind);
214 }
215
getEstimatedNumberOfCaseClusters(const SwitchInst & SI,unsigned & JTSize,ProfileSummaryInfo * PSI,BlockFrequencyInfo * BFI) const216 unsigned TargetTransformInfo::getEstimatedNumberOfCaseClusters(
217 const SwitchInst &SI, unsigned &JTSize, ProfileSummaryInfo *PSI,
218 BlockFrequencyInfo *BFI) const {
219 return TTIImpl->getEstimatedNumberOfCaseClusters(SI, JTSize, PSI, BFI);
220 }
221
222 InstructionCost
getUserCost(const User * U,ArrayRef<const Value * > Operands,enum TargetCostKind CostKind) const223 TargetTransformInfo::getUserCost(const User *U,
224 ArrayRef<const Value *> Operands,
225 enum TargetCostKind CostKind) const {
226 InstructionCost Cost = TTIImpl->getUserCost(U, Operands, CostKind);
227 assert((CostKind == TTI::TCK_RecipThroughput || Cost >= 0) &&
228 "TTI should not produce negative costs!");
229 return Cost;
230 }
231
getPredictableBranchThreshold() const232 BranchProbability TargetTransformInfo::getPredictableBranchThreshold() const {
233 return TTIImpl->getPredictableBranchThreshold();
234 }
235
hasBranchDivergence() const236 bool TargetTransformInfo::hasBranchDivergence() const {
237 return TTIImpl->hasBranchDivergence();
238 }
239
useGPUDivergenceAnalysis() const240 bool TargetTransformInfo::useGPUDivergenceAnalysis() const {
241 return TTIImpl->useGPUDivergenceAnalysis();
242 }
243
isSourceOfDivergence(const Value * V) const244 bool TargetTransformInfo::isSourceOfDivergence(const Value *V) const {
245 return TTIImpl->isSourceOfDivergence(V);
246 }
247
isAlwaysUniform(const Value * V) const248 bool llvm::TargetTransformInfo::isAlwaysUniform(const Value *V) const {
249 return TTIImpl->isAlwaysUniform(V);
250 }
251
getFlatAddressSpace() const252 unsigned TargetTransformInfo::getFlatAddressSpace() const {
253 return TTIImpl->getFlatAddressSpace();
254 }
255
collectFlatAddressOperands(SmallVectorImpl<int> & OpIndexes,Intrinsic::ID IID) const256 bool TargetTransformInfo::collectFlatAddressOperands(
257 SmallVectorImpl<int> &OpIndexes, Intrinsic::ID IID) const {
258 return TTIImpl->collectFlatAddressOperands(OpIndexes, IID);
259 }
260
isNoopAddrSpaceCast(unsigned FromAS,unsigned ToAS) const261 bool TargetTransformInfo::isNoopAddrSpaceCast(unsigned FromAS,
262 unsigned ToAS) const {
263 return TTIImpl->isNoopAddrSpaceCast(FromAS, ToAS);
264 }
265
getAssumedAddrSpace(const Value * V) const266 unsigned TargetTransformInfo::getAssumedAddrSpace(const Value *V) const {
267 return TTIImpl->getAssumedAddrSpace(V);
268 }
269
rewriteIntrinsicWithAddressSpace(IntrinsicInst * II,Value * OldV,Value * NewV) const270 Value *TargetTransformInfo::rewriteIntrinsicWithAddressSpace(
271 IntrinsicInst *II, Value *OldV, Value *NewV) const {
272 return TTIImpl->rewriteIntrinsicWithAddressSpace(II, OldV, NewV);
273 }
274
isLoweredToCall(const Function * F) const275 bool TargetTransformInfo::isLoweredToCall(const Function *F) const {
276 return TTIImpl->isLoweredToCall(F);
277 }
278
isHardwareLoopProfitable(Loop * L,ScalarEvolution & SE,AssumptionCache & AC,TargetLibraryInfo * LibInfo,HardwareLoopInfo & HWLoopInfo) const279 bool TargetTransformInfo::isHardwareLoopProfitable(
280 Loop *L, ScalarEvolution &SE, AssumptionCache &AC,
281 TargetLibraryInfo *LibInfo, HardwareLoopInfo &HWLoopInfo) const {
282 return TTIImpl->isHardwareLoopProfitable(L, SE, AC, LibInfo, HWLoopInfo);
283 }
284
preferPredicateOverEpilogue(Loop * L,LoopInfo * LI,ScalarEvolution & SE,AssumptionCache & AC,TargetLibraryInfo * TLI,DominatorTree * DT,const LoopAccessInfo * LAI) const285 bool TargetTransformInfo::preferPredicateOverEpilogue(
286 Loop *L, LoopInfo *LI, ScalarEvolution &SE, AssumptionCache &AC,
287 TargetLibraryInfo *TLI, DominatorTree *DT,
288 const LoopAccessInfo *LAI) const {
289 return TTIImpl->preferPredicateOverEpilogue(L, LI, SE, AC, TLI, DT, LAI);
290 }
291
emitGetActiveLaneMask() const292 bool TargetTransformInfo::emitGetActiveLaneMask() const {
293 return TTIImpl->emitGetActiveLaneMask();
294 }
295
296 Optional<Instruction *>
instCombineIntrinsic(InstCombiner & IC,IntrinsicInst & II) const297 TargetTransformInfo::instCombineIntrinsic(InstCombiner &IC,
298 IntrinsicInst &II) const {
299 return TTIImpl->instCombineIntrinsic(IC, II);
300 }
301
simplifyDemandedUseBitsIntrinsic(InstCombiner & IC,IntrinsicInst & II,APInt DemandedMask,KnownBits & Known,bool & KnownBitsComputed) const302 Optional<Value *> TargetTransformInfo::simplifyDemandedUseBitsIntrinsic(
303 InstCombiner &IC, IntrinsicInst &II, APInt DemandedMask, KnownBits &Known,
304 bool &KnownBitsComputed) const {
305 return TTIImpl->simplifyDemandedUseBitsIntrinsic(IC, II, DemandedMask, Known,
306 KnownBitsComputed);
307 }
308
simplifyDemandedVectorEltsIntrinsic(InstCombiner & IC,IntrinsicInst & II,APInt DemandedElts,APInt & UndefElts,APInt & UndefElts2,APInt & UndefElts3,std::function<void (Instruction *,unsigned,APInt,APInt &)> SimplifyAndSetOp) const309 Optional<Value *> TargetTransformInfo::simplifyDemandedVectorEltsIntrinsic(
310 InstCombiner &IC, IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts,
311 APInt &UndefElts2, APInt &UndefElts3,
312 std::function<void(Instruction *, unsigned, APInt, APInt &)>
313 SimplifyAndSetOp) const {
314 return TTIImpl->simplifyDemandedVectorEltsIntrinsic(
315 IC, II, DemandedElts, UndefElts, UndefElts2, UndefElts3,
316 SimplifyAndSetOp);
317 }
318
getUnrollingPreferences(Loop * L,ScalarEvolution & SE,UnrollingPreferences & UP) const319 void TargetTransformInfo::getUnrollingPreferences(
320 Loop *L, ScalarEvolution &SE, UnrollingPreferences &UP) const {
321 return TTIImpl->getUnrollingPreferences(L, SE, UP);
322 }
323
getPeelingPreferences(Loop * L,ScalarEvolution & SE,PeelingPreferences & PP) const324 void TargetTransformInfo::getPeelingPreferences(Loop *L, ScalarEvolution &SE,
325 PeelingPreferences &PP) const {
326 return TTIImpl->getPeelingPreferences(L, SE, PP);
327 }
328
isLegalAddImmediate(int64_t Imm) const329 bool TargetTransformInfo::isLegalAddImmediate(int64_t Imm) const {
330 return TTIImpl->isLegalAddImmediate(Imm);
331 }
332
isLegalICmpImmediate(int64_t Imm) const333 bool TargetTransformInfo::isLegalICmpImmediate(int64_t Imm) const {
334 return TTIImpl->isLegalICmpImmediate(Imm);
335 }
336
isLegalAddressingMode(Type * Ty,GlobalValue * BaseGV,int64_t BaseOffset,bool HasBaseReg,int64_t Scale,unsigned AddrSpace,Instruction * I) const337 bool TargetTransformInfo::isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV,
338 int64_t BaseOffset,
339 bool HasBaseReg, int64_t Scale,
340 unsigned AddrSpace,
341 Instruction *I) const {
342 return TTIImpl->isLegalAddressingMode(Ty, BaseGV, BaseOffset, HasBaseReg,
343 Scale, AddrSpace, I);
344 }
345
isLSRCostLess(LSRCost & C1,LSRCost & C2) const346 bool TargetTransformInfo::isLSRCostLess(LSRCost &C1, LSRCost &C2) const {
347 return TTIImpl->isLSRCostLess(C1, C2);
348 }
349
isNumRegsMajorCostOfLSR() const350 bool TargetTransformInfo::isNumRegsMajorCostOfLSR() const {
351 return TTIImpl->isNumRegsMajorCostOfLSR();
352 }
353
isProfitableLSRChainElement(Instruction * I) const354 bool TargetTransformInfo::isProfitableLSRChainElement(Instruction *I) const {
355 return TTIImpl->isProfitableLSRChainElement(I);
356 }
357
canMacroFuseCmp() const358 bool TargetTransformInfo::canMacroFuseCmp() const {
359 return TTIImpl->canMacroFuseCmp();
360 }
361
canSaveCmp(Loop * L,BranchInst ** BI,ScalarEvolution * SE,LoopInfo * LI,DominatorTree * DT,AssumptionCache * AC,TargetLibraryInfo * LibInfo) const362 bool TargetTransformInfo::canSaveCmp(Loop *L, BranchInst **BI,
363 ScalarEvolution *SE, LoopInfo *LI,
364 DominatorTree *DT, AssumptionCache *AC,
365 TargetLibraryInfo *LibInfo) const {
366 return TTIImpl->canSaveCmp(L, BI, SE, LI, DT, AC, LibInfo);
367 }
368
369 TTI::AddressingModeKind
getPreferredAddressingMode(const Loop * L,ScalarEvolution * SE) const370 TargetTransformInfo::getPreferredAddressingMode(const Loop *L,
371 ScalarEvolution *SE) const {
372 return TTIImpl->getPreferredAddressingMode(L, SE);
373 }
374
isLegalMaskedStore(Type * DataType,Align Alignment) const375 bool TargetTransformInfo::isLegalMaskedStore(Type *DataType,
376 Align Alignment) const {
377 return TTIImpl->isLegalMaskedStore(DataType, Alignment);
378 }
379
isLegalMaskedLoad(Type * DataType,Align Alignment) const380 bool TargetTransformInfo::isLegalMaskedLoad(Type *DataType,
381 Align Alignment) const {
382 return TTIImpl->isLegalMaskedLoad(DataType, Alignment);
383 }
384
isLegalNTStore(Type * DataType,Align Alignment) const385 bool TargetTransformInfo::isLegalNTStore(Type *DataType,
386 Align Alignment) const {
387 return TTIImpl->isLegalNTStore(DataType, Alignment);
388 }
389
isLegalNTLoad(Type * DataType,Align Alignment) const390 bool TargetTransformInfo::isLegalNTLoad(Type *DataType, Align Alignment) const {
391 return TTIImpl->isLegalNTLoad(DataType, Alignment);
392 }
393
isLegalMaskedGather(Type * DataType,Align Alignment) const394 bool TargetTransformInfo::isLegalMaskedGather(Type *DataType,
395 Align Alignment) const {
396 return TTIImpl->isLegalMaskedGather(DataType, Alignment);
397 }
398
isLegalMaskedScatter(Type * DataType,Align Alignment) const399 bool TargetTransformInfo::isLegalMaskedScatter(Type *DataType,
400 Align Alignment) const {
401 return TTIImpl->isLegalMaskedScatter(DataType, Alignment);
402 }
403
isLegalMaskedCompressStore(Type * DataType) const404 bool TargetTransformInfo::isLegalMaskedCompressStore(Type *DataType) const {
405 return TTIImpl->isLegalMaskedCompressStore(DataType);
406 }
407
isLegalMaskedExpandLoad(Type * DataType) const408 bool TargetTransformInfo::isLegalMaskedExpandLoad(Type *DataType) const {
409 return TTIImpl->isLegalMaskedExpandLoad(DataType);
410 }
411
hasDivRemOp(Type * DataType,bool IsSigned) const412 bool TargetTransformInfo::hasDivRemOp(Type *DataType, bool IsSigned) const {
413 return TTIImpl->hasDivRemOp(DataType, IsSigned);
414 }
415
hasVolatileVariant(Instruction * I,unsigned AddrSpace) const416 bool TargetTransformInfo::hasVolatileVariant(Instruction *I,
417 unsigned AddrSpace) const {
418 return TTIImpl->hasVolatileVariant(I, AddrSpace);
419 }
420
prefersVectorizedAddressing() const421 bool TargetTransformInfo::prefersVectorizedAddressing() const {
422 return TTIImpl->prefersVectorizedAddressing();
423 }
424
getScalingFactorCost(Type * Ty,GlobalValue * BaseGV,int64_t BaseOffset,bool HasBaseReg,int64_t Scale,unsigned AddrSpace) const425 InstructionCost TargetTransformInfo::getScalingFactorCost(
426 Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset, bool HasBaseReg,
427 int64_t Scale, unsigned AddrSpace) const {
428 InstructionCost Cost = TTIImpl->getScalingFactorCost(
429 Ty, BaseGV, BaseOffset, HasBaseReg, Scale, AddrSpace);
430 assert(Cost >= 0 && "TTI should not produce negative costs!");
431 return Cost;
432 }
433
LSRWithInstrQueries() const434 bool TargetTransformInfo::LSRWithInstrQueries() const {
435 return TTIImpl->LSRWithInstrQueries();
436 }
437
isTruncateFree(Type * Ty1,Type * Ty2) const438 bool TargetTransformInfo::isTruncateFree(Type *Ty1, Type *Ty2) const {
439 return TTIImpl->isTruncateFree(Ty1, Ty2);
440 }
441
isProfitableToHoist(Instruction * I) const442 bool TargetTransformInfo::isProfitableToHoist(Instruction *I) const {
443 return TTIImpl->isProfitableToHoist(I);
444 }
445
useAA() const446 bool TargetTransformInfo::useAA() const { return TTIImpl->useAA(); }
447
isTypeLegal(Type * Ty) const448 bool TargetTransformInfo::isTypeLegal(Type *Ty) const {
449 return TTIImpl->isTypeLegal(Ty);
450 }
451
getRegUsageForType(Type * Ty) const452 InstructionCost TargetTransformInfo::getRegUsageForType(Type *Ty) const {
453 return TTIImpl->getRegUsageForType(Ty);
454 }
455
shouldBuildLookupTables() const456 bool TargetTransformInfo::shouldBuildLookupTables() const {
457 return TTIImpl->shouldBuildLookupTables();
458 }
459
shouldBuildLookupTablesForConstant(Constant * C) const460 bool TargetTransformInfo::shouldBuildLookupTablesForConstant(
461 Constant *C) const {
462 return TTIImpl->shouldBuildLookupTablesForConstant(C);
463 }
464
shouldBuildRelLookupTables() const465 bool TargetTransformInfo::shouldBuildRelLookupTables() const {
466 return TTIImpl->shouldBuildRelLookupTables();
467 }
468
useColdCCForColdCall(Function & F) const469 bool TargetTransformInfo::useColdCCForColdCall(Function &F) const {
470 return TTIImpl->useColdCCForColdCall(F);
471 }
472
473 InstructionCost
getScalarizationOverhead(VectorType * Ty,const APInt & DemandedElts,bool Insert,bool Extract) const474 TargetTransformInfo::getScalarizationOverhead(VectorType *Ty,
475 const APInt &DemandedElts,
476 bool Insert, bool Extract) const {
477 return TTIImpl->getScalarizationOverhead(Ty, DemandedElts, Insert, Extract);
478 }
479
getOperandsScalarizationOverhead(ArrayRef<const Value * > Args,ArrayRef<Type * > Tys) const480 InstructionCost TargetTransformInfo::getOperandsScalarizationOverhead(
481 ArrayRef<const Value *> Args, ArrayRef<Type *> Tys) const {
482 return TTIImpl->getOperandsScalarizationOverhead(Args, Tys);
483 }
484
supportsEfficientVectorElementLoadStore() const485 bool TargetTransformInfo::supportsEfficientVectorElementLoadStore() const {
486 return TTIImpl->supportsEfficientVectorElementLoadStore();
487 }
488
enableAggressiveInterleaving(bool LoopHasReductions) const489 bool TargetTransformInfo::enableAggressiveInterleaving(
490 bool LoopHasReductions) const {
491 return TTIImpl->enableAggressiveInterleaving(LoopHasReductions);
492 }
493
494 TargetTransformInfo::MemCmpExpansionOptions
enableMemCmpExpansion(bool OptSize,bool IsZeroCmp) const495 TargetTransformInfo::enableMemCmpExpansion(bool OptSize, bool IsZeroCmp) const {
496 return TTIImpl->enableMemCmpExpansion(OptSize, IsZeroCmp);
497 }
498
enableInterleavedAccessVectorization() const499 bool TargetTransformInfo::enableInterleavedAccessVectorization() const {
500 return TTIImpl->enableInterleavedAccessVectorization();
501 }
502
enableMaskedInterleavedAccessVectorization() const503 bool TargetTransformInfo::enableMaskedInterleavedAccessVectorization() const {
504 return TTIImpl->enableMaskedInterleavedAccessVectorization();
505 }
506
isFPVectorizationPotentiallyUnsafe() const507 bool TargetTransformInfo::isFPVectorizationPotentiallyUnsafe() const {
508 return TTIImpl->isFPVectorizationPotentiallyUnsafe();
509 }
510
allowsMisalignedMemoryAccesses(LLVMContext & Context,unsigned BitWidth,unsigned AddressSpace,Align Alignment,bool * Fast) const511 bool TargetTransformInfo::allowsMisalignedMemoryAccesses(LLVMContext &Context,
512 unsigned BitWidth,
513 unsigned AddressSpace,
514 Align Alignment,
515 bool *Fast) const {
516 return TTIImpl->allowsMisalignedMemoryAccesses(Context, BitWidth,
517 AddressSpace, Alignment, Fast);
518 }
519
520 TargetTransformInfo::PopcntSupportKind
getPopcntSupport(unsigned IntTyWidthInBit) const521 TargetTransformInfo::getPopcntSupport(unsigned IntTyWidthInBit) const {
522 return TTIImpl->getPopcntSupport(IntTyWidthInBit);
523 }
524
haveFastSqrt(Type * Ty) const525 bool TargetTransformInfo::haveFastSqrt(Type *Ty) const {
526 return TTIImpl->haveFastSqrt(Ty);
527 }
528
isFCmpOrdCheaperThanFCmpZero(Type * Ty) const529 bool TargetTransformInfo::isFCmpOrdCheaperThanFCmpZero(Type *Ty) const {
530 return TTIImpl->isFCmpOrdCheaperThanFCmpZero(Ty);
531 }
532
getFPOpCost(Type * Ty) const533 InstructionCost TargetTransformInfo::getFPOpCost(Type *Ty) const {
534 InstructionCost Cost = TTIImpl->getFPOpCost(Ty);
535 assert(Cost >= 0 && "TTI should not produce negative costs!");
536 return Cost;
537 }
538
getIntImmCodeSizeCost(unsigned Opcode,unsigned Idx,const APInt & Imm,Type * Ty) const539 int TargetTransformInfo::getIntImmCodeSizeCost(unsigned Opcode, unsigned Idx,
540 const APInt &Imm,
541 Type *Ty) const {
542 int Cost = TTIImpl->getIntImmCodeSizeCost(Opcode, Idx, Imm, Ty);
543 assert(Cost >= 0 && "TTI should not produce negative costs!");
544 return Cost;
545 }
546
547 InstructionCost
getIntImmCost(const APInt & Imm,Type * Ty,TTI::TargetCostKind CostKind) const548 TargetTransformInfo::getIntImmCost(const APInt &Imm, Type *Ty,
549 TTI::TargetCostKind CostKind) const {
550 InstructionCost Cost = TTIImpl->getIntImmCost(Imm, Ty, CostKind);
551 assert(Cost >= 0 && "TTI should not produce negative costs!");
552 return Cost;
553 }
554
getIntImmCostInst(unsigned Opcode,unsigned Idx,const APInt & Imm,Type * Ty,TTI::TargetCostKind CostKind,Instruction * Inst) const555 InstructionCost TargetTransformInfo::getIntImmCostInst(
556 unsigned Opcode, unsigned Idx, const APInt &Imm, Type *Ty,
557 TTI::TargetCostKind CostKind, Instruction *Inst) const {
558 InstructionCost Cost =
559 TTIImpl->getIntImmCostInst(Opcode, Idx, Imm, Ty, CostKind, Inst);
560 assert(Cost >= 0 && "TTI should not produce negative costs!");
561 return Cost;
562 }
563
564 InstructionCost
getIntImmCostIntrin(Intrinsic::ID IID,unsigned Idx,const APInt & Imm,Type * Ty,TTI::TargetCostKind CostKind) const565 TargetTransformInfo::getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx,
566 const APInt &Imm, Type *Ty,
567 TTI::TargetCostKind CostKind) const {
568 InstructionCost Cost =
569 TTIImpl->getIntImmCostIntrin(IID, Idx, Imm, Ty, CostKind);
570 assert(Cost >= 0 && "TTI should not produce negative costs!");
571 return Cost;
572 }
573
getNumberOfRegisters(unsigned ClassID) const574 unsigned TargetTransformInfo::getNumberOfRegisters(unsigned ClassID) const {
575 return TTIImpl->getNumberOfRegisters(ClassID);
576 }
577
getRegisterClassForType(bool Vector,Type * Ty) const578 unsigned TargetTransformInfo::getRegisterClassForType(bool Vector,
579 Type *Ty) const {
580 return TTIImpl->getRegisterClassForType(Vector, Ty);
581 }
582
getRegisterClassName(unsigned ClassID) const583 const char *TargetTransformInfo::getRegisterClassName(unsigned ClassID) const {
584 return TTIImpl->getRegisterClassName(ClassID);
585 }
586
getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const587 TypeSize TargetTransformInfo::getRegisterBitWidth(
588 TargetTransformInfo::RegisterKind K) const {
589 return TTIImpl->getRegisterBitWidth(K);
590 }
591
getMinVectorRegisterBitWidth() const592 unsigned TargetTransformInfo::getMinVectorRegisterBitWidth() const {
593 return TTIImpl->getMinVectorRegisterBitWidth();
594 }
595
getMaxVScale() const596 Optional<unsigned> TargetTransformInfo::getMaxVScale() const {
597 return TTIImpl->getMaxVScale();
598 }
599
shouldMaximizeVectorBandwidth() const600 bool TargetTransformInfo::shouldMaximizeVectorBandwidth() const {
601 return TTIImpl->shouldMaximizeVectorBandwidth();
602 }
603
getMinimumVF(unsigned ElemWidth,bool IsScalable) const604 ElementCount TargetTransformInfo::getMinimumVF(unsigned ElemWidth,
605 bool IsScalable) const {
606 return TTIImpl->getMinimumVF(ElemWidth, IsScalable);
607 }
608
getMaximumVF(unsigned ElemWidth,unsigned Opcode) const609 unsigned TargetTransformInfo::getMaximumVF(unsigned ElemWidth,
610 unsigned Opcode) const {
611 return TTIImpl->getMaximumVF(ElemWidth, Opcode);
612 }
613
shouldConsiderAddressTypePromotion(const Instruction & I,bool & AllowPromotionWithoutCommonHeader) const614 bool TargetTransformInfo::shouldConsiderAddressTypePromotion(
615 const Instruction &I, bool &AllowPromotionWithoutCommonHeader) const {
616 return TTIImpl->shouldConsiderAddressTypePromotion(
617 I, AllowPromotionWithoutCommonHeader);
618 }
619
getCacheLineSize() const620 unsigned TargetTransformInfo::getCacheLineSize() const {
621 return TTIImpl->getCacheLineSize();
622 }
623
624 llvm::Optional<unsigned>
getCacheSize(CacheLevel Level) const625 TargetTransformInfo::getCacheSize(CacheLevel Level) const {
626 return TTIImpl->getCacheSize(Level);
627 }
628
629 llvm::Optional<unsigned>
getCacheAssociativity(CacheLevel Level) const630 TargetTransformInfo::getCacheAssociativity(CacheLevel Level) const {
631 return TTIImpl->getCacheAssociativity(Level);
632 }
633
getPrefetchDistance() const634 unsigned TargetTransformInfo::getPrefetchDistance() const {
635 return TTIImpl->getPrefetchDistance();
636 }
637
getMinPrefetchStride(unsigned NumMemAccesses,unsigned NumStridedMemAccesses,unsigned NumPrefetches,bool HasCall) const638 unsigned TargetTransformInfo::getMinPrefetchStride(
639 unsigned NumMemAccesses, unsigned NumStridedMemAccesses,
640 unsigned NumPrefetches, bool HasCall) const {
641 return TTIImpl->getMinPrefetchStride(NumMemAccesses, NumStridedMemAccesses,
642 NumPrefetches, HasCall);
643 }
644
getMaxPrefetchIterationsAhead() const645 unsigned TargetTransformInfo::getMaxPrefetchIterationsAhead() const {
646 return TTIImpl->getMaxPrefetchIterationsAhead();
647 }
648
enableWritePrefetching() const649 bool TargetTransformInfo::enableWritePrefetching() const {
650 return TTIImpl->enableWritePrefetching();
651 }
652
getMaxInterleaveFactor(unsigned VF) const653 unsigned TargetTransformInfo::getMaxInterleaveFactor(unsigned VF) const {
654 return TTIImpl->getMaxInterleaveFactor(VF);
655 }
656
657 TargetTransformInfo::OperandValueKind
getOperandInfo(const Value * V,OperandValueProperties & OpProps)658 TargetTransformInfo::getOperandInfo(const Value *V,
659 OperandValueProperties &OpProps) {
660 OperandValueKind OpInfo = OK_AnyValue;
661 OpProps = OP_None;
662
663 if (const auto *CI = dyn_cast<ConstantInt>(V)) {
664 if (CI->getValue().isPowerOf2())
665 OpProps = OP_PowerOf2;
666 return OK_UniformConstantValue;
667 }
668
669 // A broadcast shuffle creates a uniform value.
670 // TODO: Add support for non-zero index broadcasts.
671 // TODO: Add support for different source vector width.
672 if (const auto *ShuffleInst = dyn_cast<ShuffleVectorInst>(V))
673 if (ShuffleInst->isZeroEltSplat())
674 OpInfo = OK_UniformValue;
675
676 const Value *Splat = getSplatValue(V);
677
678 // Check for a splat of a constant or for a non uniform vector of constants
679 // and check if the constant(s) are all powers of two.
680 if (isa<ConstantVector>(V) || isa<ConstantDataVector>(V)) {
681 OpInfo = OK_NonUniformConstantValue;
682 if (Splat) {
683 OpInfo = OK_UniformConstantValue;
684 if (auto *CI = dyn_cast<ConstantInt>(Splat))
685 if (CI->getValue().isPowerOf2())
686 OpProps = OP_PowerOf2;
687 } else if (const auto *CDS = dyn_cast<ConstantDataSequential>(V)) {
688 OpProps = OP_PowerOf2;
689 for (unsigned I = 0, E = CDS->getNumElements(); I != E; ++I) {
690 if (auto *CI = dyn_cast<ConstantInt>(CDS->getElementAsConstant(I)))
691 if (CI->getValue().isPowerOf2())
692 continue;
693 OpProps = OP_None;
694 break;
695 }
696 }
697 }
698
699 // Check for a splat of a uniform value. This is not loop aware, so return
700 // true only for the obviously uniform cases (argument, globalvalue)
701 if (Splat && (isa<Argument>(Splat) || isa<GlobalValue>(Splat)))
702 OpInfo = OK_UniformValue;
703
704 return OpInfo;
705 }
706
getArithmeticInstrCost(unsigned Opcode,Type * Ty,TTI::TargetCostKind CostKind,OperandValueKind Opd1Info,OperandValueKind Opd2Info,OperandValueProperties Opd1PropInfo,OperandValueProperties Opd2PropInfo,ArrayRef<const Value * > Args,const Instruction * CxtI) const707 InstructionCost TargetTransformInfo::getArithmeticInstrCost(
708 unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind,
709 OperandValueKind Opd1Info, OperandValueKind Opd2Info,
710 OperandValueProperties Opd1PropInfo, OperandValueProperties Opd2PropInfo,
711 ArrayRef<const Value *> Args, const Instruction *CxtI) const {
712 InstructionCost Cost =
713 TTIImpl->getArithmeticInstrCost(Opcode, Ty, CostKind, Opd1Info, Opd2Info,
714 Opd1PropInfo, Opd2PropInfo, Args, CxtI);
715 assert(Cost >= 0 && "TTI should not produce negative costs!");
716 return Cost;
717 }
718
getShuffleCost(ShuffleKind Kind,VectorType * Ty,ArrayRef<int> Mask,int Index,VectorType * SubTp) const719 InstructionCost TargetTransformInfo::getShuffleCost(ShuffleKind Kind,
720 VectorType *Ty,
721 ArrayRef<int> Mask,
722 int Index,
723 VectorType *SubTp) const {
724 InstructionCost Cost = TTIImpl->getShuffleCost(Kind, Ty, Mask, Index, SubTp);
725 assert(Cost >= 0 && "TTI should not produce negative costs!");
726 return Cost;
727 }
728
729 TTI::CastContextHint
getCastContextHint(const Instruction * I)730 TargetTransformInfo::getCastContextHint(const Instruction *I) {
731 if (!I)
732 return CastContextHint::None;
733
734 auto getLoadStoreKind = [](const Value *V, unsigned LdStOp, unsigned MaskedOp,
735 unsigned GatScatOp) {
736 const Instruction *I = dyn_cast<Instruction>(V);
737 if (!I)
738 return CastContextHint::None;
739
740 if (I->getOpcode() == LdStOp)
741 return CastContextHint::Normal;
742
743 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
744 if (II->getIntrinsicID() == MaskedOp)
745 return TTI::CastContextHint::Masked;
746 if (II->getIntrinsicID() == GatScatOp)
747 return TTI::CastContextHint::GatherScatter;
748 }
749
750 return TTI::CastContextHint::None;
751 };
752
753 switch (I->getOpcode()) {
754 case Instruction::ZExt:
755 case Instruction::SExt:
756 case Instruction::FPExt:
757 return getLoadStoreKind(I->getOperand(0), Instruction::Load,
758 Intrinsic::masked_load, Intrinsic::masked_gather);
759 case Instruction::Trunc:
760 case Instruction::FPTrunc:
761 if (I->hasOneUse())
762 return getLoadStoreKind(*I->user_begin(), Instruction::Store,
763 Intrinsic::masked_store,
764 Intrinsic::masked_scatter);
765 break;
766 default:
767 return CastContextHint::None;
768 }
769
770 return TTI::CastContextHint::None;
771 }
772
getCastInstrCost(unsigned Opcode,Type * Dst,Type * Src,CastContextHint CCH,TTI::TargetCostKind CostKind,const Instruction * I) const773 InstructionCost TargetTransformInfo::getCastInstrCost(
774 unsigned Opcode, Type *Dst, Type *Src, CastContextHint CCH,
775 TTI::TargetCostKind CostKind, const Instruction *I) const {
776 assert((I == nullptr || I->getOpcode() == Opcode) &&
777 "Opcode should reflect passed instruction.");
778 InstructionCost Cost =
779 TTIImpl->getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I);
780 assert(Cost >= 0 && "TTI should not produce negative costs!");
781 return Cost;
782 }
783
getExtractWithExtendCost(unsigned Opcode,Type * Dst,VectorType * VecTy,unsigned Index) const784 InstructionCost TargetTransformInfo::getExtractWithExtendCost(
785 unsigned Opcode, Type *Dst, VectorType *VecTy, unsigned Index) const {
786 InstructionCost Cost =
787 TTIImpl->getExtractWithExtendCost(Opcode, Dst, VecTy, Index);
788 assert(Cost >= 0 && "TTI should not produce negative costs!");
789 return Cost;
790 }
791
getCFInstrCost(unsigned Opcode,TTI::TargetCostKind CostKind,const Instruction * I) const792 InstructionCost TargetTransformInfo::getCFInstrCost(
793 unsigned Opcode, TTI::TargetCostKind CostKind, const Instruction *I) const {
794 assert((I == nullptr || I->getOpcode() == Opcode) &&
795 "Opcode should reflect passed instruction.");
796 InstructionCost Cost = TTIImpl->getCFInstrCost(Opcode, CostKind, I);
797 assert(Cost >= 0 && "TTI should not produce negative costs!");
798 return Cost;
799 }
800
getCmpSelInstrCost(unsigned Opcode,Type * ValTy,Type * CondTy,CmpInst::Predicate VecPred,TTI::TargetCostKind CostKind,const Instruction * I) const801 InstructionCost TargetTransformInfo::getCmpSelInstrCost(
802 unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred,
803 TTI::TargetCostKind CostKind, const Instruction *I) const {
804 assert((I == nullptr || I->getOpcode() == Opcode) &&
805 "Opcode should reflect passed instruction.");
806 InstructionCost Cost =
807 TTIImpl->getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind, I);
808 assert(Cost >= 0 && "TTI should not produce negative costs!");
809 return Cost;
810 }
811
getVectorInstrCost(unsigned Opcode,Type * Val,unsigned Index) const812 InstructionCost TargetTransformInfo::getVectorInstrCost(unsigned Opcode,
813 Type *Val,
814 unsigned Index) const {
815 InstructionCost Cost = TTIImpl->getVectorInstrCost(Opcode, Val, Index);
816 assert(Cost >= 0 && "TTI should not produce negative costs!");
817 return Cost;
818 }
819
getMemoryOpCost(unsigned Opcode,Type * Src,Align Alignment,unsigned AddressSpace,TTI::TargetCostKind CostKind,const Instruction * I) const820 InstructionCost TargetTransformInfo::getMemoryOpCost(
821 unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace,
822 TTI::TargetCostKind CostKind, const Instruction *I) const {
823 assert((I == nullptr || I->getOpcode() == Opcode) &&
824 "Opcode should reflect passed instruction.");
825 InstructionCost Cost = TTIImpl->getMemoryOpCost(Opcode, Src, Alignment,
826 AddressSpace, CostKind, I);
827 assert(Cost >= 0 && "TTI should not produce negative costs!");
828 return Cost;
829 }
830
getMaskedMemoryOpCost(unsigned Opcode,Type * Src,Align Alignment,unsigned AddressSpace,TTI::TargetCostKind CostKind) const831 InstructionCost TargetTransformInfo::getMaskedMemoryOpCost(
832 unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace,
833 TTI::TargetCostKind CostKind) const {
834 InstructionCost Cost = TTIImpl->getMaskedMemoryOpCost(Opcode, Src, Alignment,
835 AddressSpace, CostKind);
836 assert(Cost >= 0 && "TTI should not produce negative costs!");
837 return Cost;
838 }
839
getGatherScatterOpCost(unsigned Opcode,Type * DataTy,const Value * Ptr,bool VariableMask,Align Alignment,TTI::TargetCostKind CostKind,const Instruction * I) const840 InstructionCost TargetTransformInfo::getGatherScatterOpCost(
841 unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask,
842 Align Alignment, TTI::TargetCostKind CostKind, const Instruction *I) const {
843 InstructionCost Cost = TTIImpl->getGatherScatterOpCost(
844 Opcode, DataTy, Ptr, VariableMask, Alignment, CostKind, I);
845 assert(Cost >= 0 && "TTI should not produce negative costs!");
846 return Cost;
847 }
848
getInterleavedMemoryOpCost(unsigned Opcode,Type * VecTy,unsigned Factor,ArrayRef<unsigned> Indices,Align Alignment,unsigned AddressSpace,TTI::TargetCostKind CostKind,bool UseMaskForCond,bool UseMaskForGaps) const849 InstructionCost TargetTransformInfo::getInterleavedMemoryOpCost(
850 unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
851 Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind,
852 bool UseMaskForCond, bool UseMaskForGaps) const {
853 InstructionCost Cost = TTIImpl->getInterleavedMemoryOpCost(
854 Opcode, VecTy, Factor, Indices, Alignment, AddressSpace, CostKind,
855 UseMaskForCond, UseMaskForGaps);
856 assert(Cost >= 0 && "TTI should not produce negative costs!");
857 return Cost;
858 }
859
860 InstructionCost
getIntrinsicInstrCost(const IntrinsicCostAttributes & ICA,TTI::TargetCostKind CostKind) const861 TargetTransformInfo::getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
862 TTI::TargetCostKind CostKind) const {
863 InstructionCost Cost = TTIImpl->getIntrinsicInstrCost(ICA, CostKind);
864 assert(Cost >= 0 && "TTI should not produce negative costs!");
865 return Cost;
866 }
867
868 InstructionCost
getCallInstrCost(Function * F,Type * RetTy,ArrayRef<Type * > Tys,TTI::TargetCostKind CostKind) const869 TargetTransformInfo::getCallInstrCost(Function *F, Type *RetTy,
870 ArrayRef<Type *> Tys,
871 TTI::TargetCostKind CostKind) const {
872 InstructionCost Cost = TTIImpl->getCallInstrCost(F, RetTy, Tys, CostKind);
873 assert(Cost >= 0 && "TTI should not produce negative costs!");
874 return Cost;
875 }
876
getNumberOfParts(Type * Tp) const877 unsigned TargetTransformInfo::getNumberOfParts(Type *Tp) const {
878 return TTIImpl->getNumberOfParts(Tp);
879 }
880
881 InstructionCost
getAddressComputationCost(Type * Tp,ScalarEvolution * SE,const SCEV * Ptr) const882 TargetTransformInfo::getAddressComputationCost(Type *Tp, ScalarEvolution *SE,
883 const SCEV *Ptr) const {
884 InstructionCost Cost = TTIImpl->getAddressComputationCost(Tp, SE, Ptr);
885 assert(Cost >= 0 && "TTI should not produce negative costs!");
886 return Cost;
887 }
888
getMemcpyCost(const Instruction * I) const889 InstructionCost TargetTransformInfo::getMemcpyCost(const Instruction *I) const {
890 InstructionCost Cost = TTIImpl->getMemcpyCost(I);
891 assert(Cost >= 0 && "TTI should not produce negative costs!");
892 return Cost;
893 }
894
getArithmeticReductionCost(unsigned Opcode,VectorType * Ty,bool IsPairwiseForm,TTI::TargetCostKind CostKind) const895 InstructionCost TargetTransformInfo::getArithmeticReductionCost(
896 unsigned Opcode, VectorType *Ty, bool IsPairwiseForm,
897 TTI::TargetCostKind CostKind) const {
898 InstructionCost Cost =
899 TTIImpl->getArithmeticReductionCost(Opcode, Ty, IsPairwiseForm, CostKind);
900 assert(Cost >= 0 && "TTI should not produce negative costs!");
901 return Cost;
902 }
903
getMinMaxReductionCost(VectorType * Ty,VectorType * CondTy,bool IsPairwiseForm,bool IsUnsigned,TTI::TargetCostKind CostKind) const904 InstructionCost TargetTransformInfo::getMinMaxReductionCost(
905 VectorType *Ty, VectorType *CondTy, bool IsPairwiseForm, bool IsUnsigned,
906 TTI::TargetCostKind CostKind) const {
907 InstructionCost Cost = TTIImpl->getMinMaxReductionCost(
908 Ty, CondTy, IsPairwiseForm, IsUnsigned, CostKind);
909 assert(Cost >= 0 && "TTI should not produce negative costs!");
910 return Cost;
911 }
912
getExtendedAddReductionCost(bool IsMLA,bool IsUnsigned,Type * ResTy,VectorType * Ty,TTI::TargetCostKind CostKind) const913 InstructionCost TargetTransformInfo::getExtendedAddReductionCost(
914 bool IsMLA, bool IsUnsigned, Type *ResTy, VectorType *Ty,
915 TTI::TargetCostKind CostKind) const {
916 return TTIImpl->getExtendedAddReductionCost(IsMLA, IsUnsigned, ResTy, Ty,
917 CostKind);
918 }
919
920 InstructionCost
getCostOfKeepingLiveOverCall(ArrayRef<Type * > Tys) const921 TargetTransformInfo::getCostOfKeepingLiveOverCall(ArrayRef<Type *> Tys) const {
922 return TTIImpl->getCostOfKeepingLiveOverCall(Tys);
923 }
924
getTgtMemIntrinsic(IntrinsicInst * Inst,MemIntrinsicInfo & Info) const925 bool TargetTransformInfo::getTgtMemIntrinsic(IntrinsicInst *Inst,
926 MemIntrinsicInfo &Info) const {
927 return TTIImpl->getTgtMemIntrinsic(Inst, Info);
928 }
929
getAtomicMemIntrinsicMaxElementSize() const930 unsigned TargetTransformInfo::getAtomicMemIntrinsicMaxElementSize() const {
931 return TTIImpl->getAtomicMemIntrinsicMaxElementSize();
932 }
933
getOrCreateResultFromMemIntrinsic(IntrinsicInst * Inst,Type * ExpectedType) const934 Value *TargetTransformInfo::getOrCreateResultFromMemIntrinsic(
935 IntrinsicInst *Inst, Type *ExpectedType) const {
936 return TTIImpl->getOrCreateResultFromMemIntrinsic(Inst, ExpectedType);
937 }
938
getMemcpyLoopLoweringType(LLVMContext & Context,Value * Length,unsigned SrcAddrSpace,unsigned DestAddrSpace,unsigned SrcAlign,unsigned DestAlign) const939 Type *TargetTransformInfo::getMemcpyLoopLoweringType(
940 LLVMContext &Context, Value *Length, unsigned SrcAddrSpace,
941 unsigned DestAddrSpace, unsigned SrcAlign, unsigned DestAlign) const {
942 return TTIImpl->getMemcpyLoopLoweringType(Context, Length, SrcAddrSpace,
943 DestAddrSpace, SrcAlign, DestAlign);
944 }
945
getMemcpyLoopResidualLoweringType(SmallVectorImpl<Type * > & OpsOut,LLVMContext & Context,unsigned RemainingBytes,unsigned SrcAddrSpace,unsigned DestAddrSpace,unsigned SrcAlign,unsigned DestAlign) const946 void TargetTransformInfo::getMemcpyLoopResidualLoweringType(
947 SmallVectorImpl<Type *> &OpsOut, LLVMContext &Context,
948 unsigned RemainingBytes, unsigned SrcAddrSpace, unsigned DestAddrSpace,
949 unsigned SrcAlign, unsigned DestAlign) const {
950 TTIImpl->getMemcpyLoopResidualLoweringType(OpsOut, Context, RemainingBytes,
951 SrcAddrSpace, DestAddrSpace,
952 SrcAlign, DestAlign);
953 }
954
areInlineCompatible(const Function * Caller,const Function * Callee) const955 bool TargetTransformInfo::areInlineCompatible(const Function *Caller,
956 const Function *Callee) const {
957 return TTIImpl->areInlineCompatible(Caller, Callee);
958 }
959
areFunctionArgsABICompatible(const Function * Caller,const Function * Callee,SmallPtrSetImpl<Argument * > & Args) const960 bool TargetTransformInfo::areFunctionArgsABICompatible(
961 const Function *Caller, const Function *Callee,
962 SmallPtrSetImpl<Argument *> &Args) const {
963 return TTIImpl->areFunctionArgsABICompatible(Caller, Callee, Args);
964 }
965
isIndexedLoadLegal(MemIndexedMode Mode,Type * Ty) const966 bool TargetTransformInfo::isIndexedLoadLegal(MemIndexedMode Mode,
967 Type *Ty) const {
968 return TTIImpl->isIndexedLoadLegal(Mode, Ty);
969 }
970
isIndexedStoreLegal(MemIndexedMode Mode,Type * Ty) const971 bool TargetTransformInfo::isIndexedStoreLegal(MemIndexedMode Mode,
972 Type *Ty) const {
973 return TTIImpl->isIndexedStoreLegal(Mode, Ty);
974 }
975
getLoadStoreVecRegBitWidth(unsigned AS) const976 unsigned TargetTransformInfo::getLoadStoreVecRegBitWidth(unsigned AS) const {
977 return TTIImpl->getLoadStoreVecRegBitWidth(AS);
978 }
979
isLegalToVectorizeLoad(LoadInst * LI) const980 bool TargetTransformInfo::isLegalToVectorizeLoad(LoadInst *LI) const {
981 return TTIImpl->isLegalToVectorizeLoad(LI);
982 }
983
isLegalToVectorizeStore(StoreInst * SI) const984 bool TargetTransformInfo::isLegalToVectorizeStore(StoreInst *SI) const {
985 return TTIImpl->isLegalToVectorizeStore(SI);
986 }
987
isLegalToVectorizeLoadChain(unsigned ChainSizeInBytes,Align Alignment,unsigned AddrSpace) const988 bool TargetTransformInfo::isLegalToVectorizeLoadChain(
989 unsigned ChainSizeInBytes, Align Alignment, unsigned AddrSpace) const {
990 return TTIImpl->isLegalToVectorizeLoadChain(ChainSizeInBytes, Alignment,
991 AddrSpace);
992 }
993
isLegalToVectorizeStoreChain(unsigned ChainSizeInBytes,Align Alignment,unsigned AddrSpace) const994 bool TargetTransformInfo::isLegalToVectorizeStoreChain(
995 unsigned ChainSizeInBytes, Align Alignment, unsigned AddrSpace) const {
996 return TTIImpl->isLegalToVectorizeStoreChain(ChainSizeInBytes, Alignment,
997 AddrSpace);
998 }
999
isLegalToVectorizeReduction(RecurrenceDescriptor RdxDesc,ElementCount VF) const1000 bool TargetTransformInfo::isLegalToVectorizeReduction(
1001 RecurrenceDescriptor RdxDesc, ElementCount VF) const {
1002 return TTIImpl->isLegalToVectorizeReduction(RdxDesc, VF);
1003 }
1004
getLoadVectorFactor(unsigned VF,unsigned LoadSize,unsigned ChainSizeInBytes,VectorType * VecTy) const1005 unsigned TargetTransformInfo::getLoadVectorFactor(unsigned VF,
1006 unsigned LoadSize,
1007 unsigned ChainSizeInBytes,
1008 VectorType *VecTy) const {
1009 return TTIImpl->getLoadVectorFactor(VF, LoadSize, ChainSizeInBytes, VecTy);
1010 }
1011
getStoreVectorFactor(unsigned VF,unsigned StoreSize,unsigned ChainSizeInBytes,VectorType * VecTy) const1012 unsigned TargetTransformInfo::getStoreVectorFactor(unsigned VF,
1013 unsigned StoreSize,
1014 unsigned ChainSizeInBytes,
1015 VectorType *VecTy) const {
1016 return TTIImpl->getStoreVectorFactor(VF, StoreSize, ChainSizeInBytes, VecTy);
1017 }
1018
preferInLoopReduction(unsigned Opcode,Type * Ty,ReductionFlags Flags) const1019 bool TargetTransformInfo::preferInLoopReduction(unsigned Opcode, Type *Ty,
1020 ReductionFlags Flags) const {
1021 return TTIImpl->preferInLoopReduction(Opcode, Ty, Flags);
1022 }
1023
preferPredicatedReductionSelect(unsigned Opcode,Type * Ty,ReductionFlags Flags) const1024 bool TargetTransformInfo::preferPredicatedReductionSelect(
1025 unsigned Opcode, Type *Ty, ReductionFlags Flags) const {
1026 return TTIImpl->preferPredicatedReductionSelect(Opcode, Ty, Flags);
1027 }
1028
1029 TargetTransformInfo::VPLegalization
getVPLegalizationStrategy(const VPIntrinsic & VPI) const1030 TargetTransformInfo::getVPLegalizationStrategy(const VPIntrinsic &VPI) const {
1031 return TTIImpl->getVPLegalizationStrategy(VPI);
1032 }
1033
shouldExpandReduction(const IntrinsicInst * II) const1034 bool TargetTransformInfo::shouldExpandReduction(const IntrinsicInst *II) const {
1035 return TTIImpl->shouldExpandReduction(II);
1036 }
1037
getGISelRematGlobalCost() const1038 unsigned TargetTransformInfo::getGISelRematGlobalCost() const {
1039 return TTIImpl->getGISelRematGlobalCost();
1040 }
1041
supportsScalableVectors() const1042 bool TargetTransformInfo::supportsScalableVectors() const {
1043 return TTIImpl->supportsScalableVectors();
1044 }
1045
hasActiveVectorLength() const1046 bool TargetTransformInfo::hasActiveVectorLength() const {
1047 return TTIImpl->hasActiveVectorLength();
1048 }
1049
1050 InstructionCost
getInstructionLatency(const Instruction * I) const1051 TargetTransformInfo::getInstructionLatency(const Instruction *I) const {
1052 return TTIImpl->getInstructionLatency(I);
1053 }
1054
matchPairwiseShuffleMask(ShuffleVectorInst * SI,bool IsLeft,unsigned Level)1055 static bool matchPairwiseShuffleMask(ShuffleVectorInst *SI, bool IsLeft,
1056 unsigned Level) {
1057 // We don't need a shuffle if we just want to have element 0 in position 0 of
1058 // the vector.
1059 if (!SI && Level == 0 && IsLeft)
1060 return true;
1061 else if (!SI)
1062 return false;
1063
1064 SmallVector<int, 32> Mask(
1065 cast<FixedVectorType>(SI->getType())->getNumElements(), -1);
1066
1067 // Build a mask of 0, 2, ... (left) or 1, 3, ... (right) depending on whether
1068 // we look at the left or right side.
1069 for (unsigned i = 0, e = (1 << Level), val = !IsLeft; i != e; ++i, val += 2)
1070 Mask[i] = val;
1071
1072 ArrayRef<int> ActualMask = SI->getShuffleMask();
1073 return Mask == ActualMask;
1074 }
1075
getReductionData(Instruction * I)1076 static Optional<TTI::ReductionData> getReductionData(Instruction *I) {
1077 Value *L, *R;
1078 if (m_BinOp(m_Value(L), m_Value(R)).match(I))
1079 return TTI::ReductionData(TTI::RK_Arithmetic, I->getOpcode(), L, R);
1080 if (auto *SI = dyn_cast<SelectInst>(I)) {
1081 if (m_SMin(m_Value(L), m_Value(R)).match(SI) ||
1082 m_SMax(m_Value(L), m_Value(R)).match(SI) ||
1083 m_OrdFMin(m_Value(L), m_Value(R)).match(SI) ||
1084 m_OrdFMax(m_Value(L), m_Value(R)).match(SI) ||
1085 m_UnordFMin(m_Value(L), m_Value(R)).match(SI) ||
1086 m_UnordFMax(m_Value(L), m_Value(R)).match(SI)) {
1087 auto *CI = cast<CmpInst>(SI->getCondition());
1088 return TTI::ReductionData(TTI::RK_MinMax, CI->getOpcode(), L, R);
1089 }
1090 if (m_UMin(m_Value(L), m_Value(R)).match(SI) ||
1091 m_UMax(m_Value(L), m_Value(R)).match(SI)) {
1092 auto *CI = cast<CmpInst>(SI->getCondition());
1093 return TTI::ReductionData(TTI::RK_UnsignedMinMax, CI->getOpcode(), L, R);
1094 }
1095 }
1096 return llvm::None;
1097 }
1098
matchPairwiseReductionAtLevel(Instruction * I,unsigned Level,unsigned NumLevels)1099 static TTI::ReductionKind matchPairwiseReductionAtLevel(Instruction *I,
1100 unsigned Level,
1101 unsigned NumLevels) {
1102 // Match one level of pairwise operations.
1103 // %rdx.shuf.0.0 = shufflevector <4 x float> %rdx, <4 x float> undef,
1104 // <4 x i32> <i32 0, i32 2 , i32 undef, i32 undef>
1105 // %rdx.shuf.0.1 = shufflevector <4 x float> %rdx, <4 x float> undef,
1106 // <4 x i32> <i32 1, i32 3, i32 undef, i32 undef>
1107 // %bin.rdx.0 = fadd <4 x float> %rdx.shuf.0.0, %rdx.shuf.0.1
1108 if (!I)
1109 return TTI::RK_None;
1110
1111 assert(I->getType()->isVectorTy() && "Expecting a vector type");
1112
1113 Optional<TTI::ReductionData> RD = getReductionData(I);
1114 if (!RD)
1115 return TTI::RK_None;
1116
1117 ShuffleVectorInst *LS = dyn_cast<ShuffleVectorInst>(RD->LHS);
1118 if (!LS && Level)
1119 return TTI::RK_None;
1120 ShuffleVectorInst *RS = dyn_cast<ShuffleVectorInst>(RD->RHS);
1121 if (!RS && Level)
1122 return TTI::RK_None;
1123
1124 // On level 0 we can omit one shufflevector instruction.
1125 if (!Level && !RS && !LS)
1126 return TTI::RK_None;
1127
1128 // Shuffle inputs must match.
1129 Value *NextLevelOpL = LS ? LS->getOperand(0) : nullptr;
1130 Value *NextLevelOpR = RS ? RS->getOperand(0) : nullptr;
1131 Value *NextLevelOp = nullptr;
1132 if (NextLevelOpR && NextLevelOpL) {
1133 // If we have two shuffles their operands must match.
1134 if (NextLevelOpL != NextLevelOpR)
1135 return TTI::RK_None;
1136
1137 NextLevelOp = NextLevelOpL;
1138 } else if (Level == 0 && (NextLevelOpR || NextLevelOpL)) {
1139 // On the first level we can omit the shufflevector <0, undef,...>. So the
1140 // input to the other shufflevector <1, undef> must match with one of the
1141 // inputs to the current binary operation.
1142 // Example:
1143 // %NextLevelOpL = shufflevector %R, <1, undef ...>
1144 // %BinOp = fadd %NextLevelOpL, %R
1145 if (NextLevelOpL && NextLevelOpL != RD->RHS)
1146 return TTI::RK_None;
1147 else if (NextLevelOpR && NextLevelOpR != RD->LHS)
1148 return TTI::RK_None;
1149
1150 NextLevelOp = NextLevelOpL ? RD->RHS : RD->LHS;
1151 } else
1152 return TTI::RK_None;
1153
1154 // Check that the next levels binary operation exists and matches with the
1155 // current one.
1156 if (Level + 1 != NumLevels) {
1157 if (!isa<Instruction>(NextLevelOp))
1158 return TTI::RK_None;
1159 Optional<TTI::ReductionData> NextLevelRD =
1160 getReductionData(cast<Instruction>(NextLevelOp));
1161 if (!NextLevelRD || !RD->hasSameData(*NextLevelRD))
1162 return TTI::RK_None;
1163 }
1164
1165 // Shuffle mask for pairwise operation must match.
1166 if (matchPairwiseShuffleMask(LS, /*IsLeft=*/true, Level)) {
1167 if (!matchPairwiseShuffleMask(RS, /*IsLeft=*/false, Level))
1168 return TTI::RK_None;
1169 } else if (matchPairwiseShuffleMask(RS, /*IsLeft=*/true, Level)) {
1170 if (!matchPairwiseShuffleMask(LS, /*IsLeft=*/false, Level))
1171 return TTI::RK_None;
1172 } else {
1173 return TTI::RK_None;
1174 }
1175
1176 if (++Level == NumLevels)
1177 return RD->Kind;
1178
1179 // Match next level.
1180 return matchPairwiseReductionAtLevel(dyn_cast<Instruction>(NextLevelOp), Level,
1181 NumLevels);
1182 }
1183
matchPairwiseReduction(const ExtractElementInst * ReduxRoot,unsigned & Opcode,VectorType * & Ty)1184 TTI::ReductionKind TTI::matchPairwiseReduction(
1185 const ExtractElementInst *ReduxRoot, unsigned &Opcode, VectorType *&Ty) {
1186 if (!EnableReduxCost)
1187 return TTI::RK_None;
1188
1189 // Need to extract the first element.
1190 ConstantInt *CI = dyn_cast<ConstantInt>(ReduxRoot->getOperand(1));
1191 unsigned Idx = ~0u;
1192 if (CI)
1193 Idx = CI->getZExtValue();
1194 if (Idx != 0)
1195 return TTI::RK_None;
1196
1197 auto *RdxStart = dyn_cast<Instruction>(ReduxRoot->getOperand(0));
1198 if (!RdxStart)
1199 return TTI::RK_None;
1200 Optional<TTI::ReductionData> RD = getReductionData(RdxStart);
1201 if (!RD)
1202 return TTI::RK_None;
1203
1204 auto *VecTy = cast<FixedVectorType>(RdxStart->getType());
1205 unsigned NumVecElems = VecTy->getNumElements();
1206 if (!isPowerOf2_32(NumVecElems))
1207 return TTI::RK_None;
1208
1209 // We look for a sequence of shuffle,shuffle,add triples like the following
1210 // that builds a pairwise reduction tree.
1211 //
1212 // (X0, X1, X2, X3)
1213 // (X0 + X1, X2 + X3, undef, undef)
1214 // ((X0 + X1) + (X2 + X3), undef, undef, undef)
1215 //
1216 // %rdx.shuf.0.0 = shufflevector <4 x float> %rdx, <4 x float> undef,
1217 // <4 x i32> <i32 0, i32 2 , i32 undef, i32 undef>
1218 // %rdx.shuf.0.1 = shufflevector <4 x float> %rdx, <4 x float> undef,
1219 // <4 x i32> <i32 1, i32 3, i32 undef, i32 undef>
1220 // %bin.rdx.0 = fadd <4 x float> %rdx.shuf.0.0, %rdx.shuf.0.1
1221 // %rdx.shuf.1.0 = shufflevector <4 x float> %bin.rdx.0, <4 x float> undef,
1222 // <4 x i32> <i32 0, i32 undef, i32 undef, i32 undef>
1223 // %rdx.shuf.1.1 = shufflevector <4 x float> %bin.rdx.0, <4 x float> undef,
1224 // <4 x i32> <i32 1, i32 undef, i32 undef, i32 undef>
1225 // %bin.rdx8 = fadd <4 x float> %rdx.shuf.1.0, %rdx.shuf.1.1
1226 // %r = extractelement <4 x float> %bin.rdx8, i32 0
1227 if (matchPairwiseReductionAtLevel(RdxStart, 0, Log2_32(NumVecElems)) ==
1228 TTI::RK_None)
1229 return TTI::RK_None;
1230
1231 Opcode = RD->Opcode;
1232 Ty = VecTy;
1233
1234 return RD->Kind;
1235 }
1236
1237 static std::pair<Value *, ShuffleVectorInst *>
getShuffleAndOtherOprd(Value * L,Value * R)1238 getShuffleAndOtherOprd(Value *L, Value *R) {
1239 ShuffleVectorInst *S = nullptr;
1240
1241 if ((S = dyn_cast<ShuffleVectorInst>(L)))
1242 return std::make_pair(R, S);
1243
1244 S = dyn_cast<ShuffleVectorInst>(R);
1245 return std::make_pair(L, S);
1246 }
1247
matchVectorSplittingReduction(const ExtractElementInst * ReduxRoot,unsigned & Opcode,VectorType * & Ty)1248 TTI::ReductionKind TTI::matchVectorSplittingReduction(
1249 const ExtractElementInst *ReduxRoot, unsigned &Opcode, VectorType *&Ty) {
1250
1251 if (!EnableReduxCost)
1252 return TTI::RK_None;
1253
1254 // Need to extract the first element.
1255 ConstantInt *CI = dyn_cast<ConstantInt>(ReduxRoot->getOperand(1));
1256 unsigned Idx = ~0u;
1257 if (CI)
1258 Idx = CI->getZExtValue();
1259 if (Idx != 0)
1260 return TTI::RK_None;
1261
1262 auto *RdxStart = dyn_cast<Instruction>(ReduxRoot->getOperand(0));
1263 if (!RdxStart)
1264 return TTI::RK_None;
1265 Optional<TTI::ReductionData> RD = getReductionData(RdxStart);
1266 if (!RD)
1267 return TTI::RK_None;
1268
1269 auto *VecTy = cast<FixedVectorType>(ReduxRoot->getOperand(0)->getType());
1270 unsigned NumVecElems = VecTy->getNumElements();
1271 if (!isPowerOf2_32(NumVecElems))
1272 return TTI::RK_None;
1273
1274 // We look for a sequence of shuffles and adds like the following matching one
1275 // fadd, shuffle vector pair at a time.
1276 //
1277 // %rdx.shuf = shufflevector <4 x float> %rdx, <4 x float> undef,
1278 // <4 x i32> <i32 2, i32 3, i32 undef, i32 undef>
1279 // %bin.rdx = fadd <4 x float> %rdx, %rdx.shuf
1280 // %rdx.shuf7 = shufflevector <4 x float> %bin.rdx, <4 x float> undef,
1281 // <4 x i32> <i32 1, i32 undef, i32 undef, i32 undef>
1282 // %bin.rdx8 = fadd <4 x float> %bin.rdx, %rdx.shuf7
1283 // %r = extractelement <4 x float> %bin.rdx8, i32 0
1284
1285 unsigned MaskStart = 1;
1286 Instruction *RdxOp = RdxStart;
1287 SmallVector<int, 32> ShuffleMask(NumVecElems, 0);
1288 unsigned NumVecElemsRemain = NumVecElems;
1289 while (NumVecElemsRemain - 1) {
1290 // Check for the right reduction operation.
1291 if (!RdxOp)
1292 return TTI::RK_None;
1293 Optional<TTI::ReductionData> RDLevel = getReductionData(RdxOp);
1294 if (!RDLevel || !RDLevel->hasSameData(*RD))
1295 return TTI::RK_None;
1296
1297 Value *NextRdxOp;
1298 ShuffleVectorInst *Shuffle;
1299 std::tie(NextRdxOp, Shuffle) =
1300 getShuffleAndOtherOprd(RDLevel->LHS, RDLevel->RHS);
1301
1302 // Check the current reduction operation and the shuffle use the same value.
1303 if (Shuffle == nullptr)
1304 return TTI::RK_None;
1305 if (Shuffle->getOperand(0) != NextRdxOp)
1306 return TTI::RK_None;
1307
1308 // Check that shuffle masks matches.
1309 for (unsigned j = 0; j != MaskStart; ++j)
1310 ShuffleMask[j] = MaskStart + j;
1311 // Fill the rest of the mask with -1 for undef.
1312 std::fill(&ShuffleMask[MaskStart], ShuffleMask.end(), -1);
1313
1314 ArrayRef<int> Mask = Shuffle->getShuffleMask();
1315 if (ShuffleMask != Mask)
1316 return TTI::RK_None;
1317
1318 RdxOp = dyn_cast<Instruction>(NextRdxOp);
1319 NumVecElemsRemain /= 2;
1320 MaskStart *= 2;
1321 }
1322
1323 Opcode = RD->Opcode;
1324 Ty = VecTy;
1325 return RD->Kind;
1326 }
1327
1328 TTI::ReductionKind
matchVectorReduction(const ExtractElementInst * Root,unsigned & Opcode,VectorType * & Ty,bool & IsPairwise)1329 TTI::matchVectorReduction(const ExtractElementInst *Root, unsigned &Opcode,
1330 VectorType *&Ty, bool &IsPairwise) {
1331 TTI::ReductionKind RdxKind = matchVectorSplittingReduction(Root, Opcode, Ty);
1332 if (RdxKind != TTI::ReductionKind::RK_None) {
1333 IsPairwise = false;
1334 return RdxKind;
1335 }
1336 IsPairwise = true;
1337 return matchPairwiseReduction(Root, Opcode, Ty);
1338 }
1339
1340 InstructionCost
getInstructionThroughput(const Instruction * I) const1341 TargetTransformInfo::getInstructionThroughput(const Instruction *I) const {
1342 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
1343
1344 switch (I->getOpcode()) {
1345 case Instruction::GetElementPtr:
1346 case Instruction::Ret:
1347 case Instruction::PHI:
1348 case Instruction::Br:
1349 case Instruction::Add:
1350 case Instruction::FAdd:
1351 case Instruction::Sub:
1352 case Instruction::FSub:
1353 case Instruction::Mul:
1354 case Instruction::FMul:
1355 case Instruction::UDiv:
1356 case Instruction::SDiv:
1357 case Instruction::FDiv:
1358 case Instruction::URem:
1359 case Instruction::SRem:
1360 case Instruction::FRem:
1361 case Instruction::Shl:
1362 case Instruction::LShr:
1363 case Instruction::AShr:
1364 case Instruction::And:
1365 case Instruction::Or:
1366 case Instruction::Xor:
1367 case Instruction::FNeg:
1368 case Instruction::Select:
1369 case Instruction::ICmp:
1370 case Instruction::FCmp:
1371 case Instruction::Store:
1372 case Instruction::Load:
1373 case Instruction::ZExt:
1374 case Instruction::SExt:
1375 case Instruction::FPToUI:
1376 case Instruction::FPToSI:
1377 case Instruction::FPExt:
1378 case Instruction::PtrToInt:
1379 case Instruction::IntToPtr:
1380 case Instruction::SIToFP:
1381 case Instruction::UIToFP:
1382 case Instruction::Trunc:
1383 case Instruction::FPTrunc:
1384 case Instruction::BitCast:
1385 case Instruction::AddrSpaceCast:
1386 case Instruction::ExtractElement:
1387 case Instruction::InsertElement:
1388 case Instruction::ExtractValue:
1389 case Instruction::ShuffleVector:
1390 case Instruction::Call:
1391 case Instruction::Switch:
1392 return getUserCost(I, CostKind);
1393 default:
1394 // We don't have any information on this instruction.
1395 return -1;
1396 }
1397 }
1398
~Concept()1399 TargetTransformInfo::Concept::~Concept() {}
1400
TargetIRAnalysis()1401 TargetIRAnalysis::TargetIRAnalysis() : TTICallback(&getDefaultTTI) {}
1402
TargetIRAnalysis(std::function<Result (const Function &)> TTICallback)1403 TargetIRAnalysis::TargetIRAnalysis(
1404 std::function<Result(const Function &)> TTICallback)
1405 : TTICallback(std::move(TTICallback)) {}
1406
run(const Function & F,FunctionAnalysisManager &)1407 TargetIRAnalysis::Result TargetIRAnalysis::run(const Function &F,
1408 FunctionAnalysisManager &) {
1409 return TTICallback(F);
1410 }
1411
1412 AnalysisKey TargetIRAnalysis::Key;
1413
getDefaultTTI(const Function & F)1414 TargetIRAnalysis::Result TargetIRAnalysis::getDefaultTTI(const Function &F) {
1415 return Result(F.getParent()->getDataLayout());
1416 }
1417
1418 // Register the basic pass.
1419 INITIALIZE_PASS(TargetTransformInfoWrapperPass, "tti",
1420 "Target Transform Information", false, true)
1421 char TargetTransformInfoWrapperPass::ID = 0;
1422
anchor()1423 void TargetTransformInfoWrapperPass::anchor() {}
1424
TargetTransformInfoWrapperPass()1425 TargetTransformInfoWrapperPass::TargetTransformInfoWrapperPass()
1426 : ImmutablePass(ID) {
1427 initializeTargetTransformInfoWrapperPassPass(
1428 *PassRegistry::getPassRegistry());
1429 }
1430
TargetTransformInfoWrapperPass(TargetIRAnalysis TIRA)1431 TargetTransformInfoWrapperPass::TargetTransformInfoWrapperPass(
1432 TargetIRAnalysis TIRA)
1433 : ImmutablePass(ID), TIRA(std::move(TIRA)) {
1434 initializeTargetTransformInfoWrapperPassPass(
1435 *PassRegistry::getPassRegistry());
1436 }
1437
getTTI(const Function & F)1438 TargetTransformInfo &TargetTransformInfoWrapperPass::getTTI(const Function &F) {
1439 FunctionAnalysisManager DummyFAM;
1440 TTI = TIRA.run(F, DummyFAM);
1441 return *TTI;
1442 }
1443
1444 ImmutablePass *
createTargetTransformInfoWrapperPass(TargetIRAnalysis TIRA)1445 llvm::createTargetTransformInfoWrapperPass(TargetIRAnalysis TIRA) {
1446 return new TargetTransformInfoWrapperPass(std::move(TIRA));
1447 }
1448