xref: /llvm-project/llvm/lib/Target/X86/X86TargetTransformInfo.h (revision 0ad6be1927f89cef09aa5d0fb244873f687997c9)
1 //===-- X86TargetTransformInfo.h - X86 specific TTI -------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file
9 /// This file a TargetTransformInfo::Concept conforming object specific to the
10 /// X86 target machine. It uses the target's detailed information to
11 /// provide more precise answers to certain TTI queries, while letting the
12 /// target independent and default TTI implementations handle the rest.
13 ///
14 //===----------------------------------------------------------------------===//
15 
16 #ifndef LLVM_LIB_TARGET_X86_X86TARGETTRANSFORMINFO_H
17 #define LLVM_LIB_TARGET_X86_X86TARGETTRANSFORMINFO_H
18 
19 #include "X86TargetMachine.h"
20 #include "llvm/Analysis/TargetTransformInfo.h"
21 #include "llvm/CodeGen/BasicTTIImpl.h"
22 #include <optional>
23 
24 namespace llvm {
25 
26 class InstCombiner;
27 
28 class X86TTIImpl : public BasicTTIImplBase<X86TTIImpl> {
29   typedef BasicTTIImplBase<X86TTIImpl> BaseT;
30   typedef TargetTransformInfo TTI;
31   friend BaseT;
32 
33   const X86Subtarget *ST;
34   const X86TargetLowering *TLI;
35 
36   const X86Subtarget *getST() const { return ST; }
37   const X86TargetLowering *getTLI() const { return TLI; }
38 
39   const FeatureBitset InlineFeatureIgnoreList = {
40       // This indicates the CPU is 64 bit capable not that we are in 64-bit
41       // mode.
42       X86::FeatureX86_64,
43 
44       // These features don't have any intrinsics or ABI effect.
45       X86::FeatureNOPL,
46       X86::FeatureCX16,
47       X86::FeatureLAHFSAHF64,
48 
49       // Some older targets can be setup to fold unaligned loads.
50       X86::FeatureSSEUnalignedMem,
51 
52       // Codegen control options.
53       X86::TuningFast11ByteNOP,
54       X86::TuningFast15ByteNOP,
55       X86::TuningFastBEXTR,
56       X86::TuningFastHorizontalOps,
57       X86::TuningFastLZCNT,
58       X86::TuningFastScalarFSQRT,
59       X86::TuningFastSHLDRotate,
60       X86::TuningFastScalarShiftMasks,
61       X86::TuningFastVectorShiftMasks,
62       X86::TuningFastVariableCrossLaneShuffle,
63       X86::TuningFastVariablePerLaneShuffle,
64       X86::TuningFastVectorFSQRT,
65       X86::TuningLEAForSP,
66       X86::TuningLEAUsesAG,
67       X86::TuningLZCNTFalseDeps,
68       X86::TuningBranchFusion,
69       X86::TuningMacroFusion,
70       X86::TuningPadShortFunctions,
71       X86::TuningPOPCNTFalseDeps,
72       X86::TuningMULCFalseDeps,
73       X86::TuningPERMFalseDeps,
74       X86::TuningRANGEFalseDeps,
75       X86::TuningGETMANTFalseDeps,
76       X86::TuningMULLQFalseDeps,
77       X86::TuningSlow3OpsLEA,
78       X86::TuningSlowDivide32,
79       X86::TuningSlowDivide64,
80       X86::TuningSlowIncDec,
81       X86::TuningSlowLEA,
82       X86::TuningSlowPMADDWD,
83       X86::TuningSlowPMULLD,
84       X86::TuningSlowSHLD,
85       X86::TuningSlowTwoMemOps,
86       X86::TuningSlowUAMem16,
87       X86::TuningPreferMaskRegisters,
88       X86::TuningInsertVZEROUPPER,
89       X86::TuningUseSLMArithCosts,
90       X86::TuningUseGLMDivSqrtCosts,
91       X86::TuningNoDomainDelay,
92       X86::TuningNoDomainDelayMov,
93       X86::TuningNoDomainDelayShuffle,
94       X86::TuningNoDomainDelayBlend,
95       X86::TuningPreferShiftShuffle,
96       X86::TuningFastImmVectorShift,
97       X86::TuningFastDPWSSD,
98 
99       // Perf-tuning flags.
100       X86::TuningFastGather,
101       X86::TuningSlowUAMem32,
102       X86::TuningAllowLight256Bit,
103 
104       // Based on whether user set the -mprefer-vector-width command line.
105       X86::TuningPrefer128Bit,
106       X86::TuningPrefer256Bit,
107 
108       // CPU name enums. These just follow CPU string.
109       X86::ProcIntelAtom
110   };
111 
112 public:
113   explicit X86TTIImpl(const X86TargetMachine *TM, const Function &F)
114       : BaseT(TM, F.getDataLayout()), ST(TM->getSubtargetImpl(F)),
115         TLI(ST->getTargetLowering()) {}
116 
117   /// \name Scalar TTI Implementations
118   /// @{
119   TTI::PopcntSupportKind getPopcntSupport(unsigned TyWidth);
120 
121   /// @}
122 
123   /// \name Cache TTI Implementation
124   /// @{
125   std::optional<unsigned> getCacheSize(
126     TargetTransformInfo::CacheLevel Level) const override;
127   std::optional<unsigned> getCacheAssociativity(
128     TargetTransformInfo::CacheLevel Level) const override;
129   /// @}
130 
131   /// \name Vector TTI Implementations
132   /// @{
133 
134   unsigned getNumberOfRegisters(unsigned ClassID) const;
135   bool hasConditionalLoadStoreForType(Type *Ty = nullptr) const;
136   TypeSize getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const;
137   unsigned getLoadStoreVecRegBitWidth(unsigned AS) const;
138   unsigned getMaxInterleaveFactor(ElementCount VF);
139   InstructionCost getArithmeticInstrCost(
140       unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind,
141       TTI::OperandValueInfo Op1Info = {TTI::OK_AnyValue, TTI::OP_None},
142       TTI::OperandValueInfo Op2Info = {TTI::OK_AnyValue, TTI::OP_None},
143       ArrayRef<const Value *> Args = {}, const Instruction *CxtI = nullptr);
144   InstructionCost getAltInstrCost(VectorType *VecTy, unsigned Opcode0,
145                                   unsigned Opcode1,
146                                   const SmallBitVector &OpcodeMask,
147                                   TTI::TargetCostKind CostKind) const;
148 
149   InstructionCost getShuffleCost(TTI::ShuffleKind Kind, VectorType *Tp,
150                                  ArrayRef<int> Mask,
151                                  TTI::TargetCostKind CostKind, int Index,
152                                  VectorType *SubTp,
153                                  ArrayRef<const Value *> Args = {},
154                                  const Instruction *CxtI = nullptr);
155   InstructionCost getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src,
156                                    TTI::CastContextHint CCH,
157                                    TTI::TargetCostKind CostKind,
158                                    const Instruction *I = nullptr);
159   InstructionCost getCmpSelInstrCost(
160       unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred,
161       TTI::TargetCostKind CostKind,
162       TTI::OperandValueInfo Op1Info = {TTI::OK_AnyValue, TTI::OP_None},
163       TTI::OperandValueInfo Op2Info = {TTI::OK_AnyValue, TTI::OP_None},
164       const Instruction *I = nullptr);
165   using BaseT::getVectorInstrCost;
166   InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val,
167                                      TTI::TargetCostKind CostKind,
168                                      unsigned Index, Value *Op0, Value *Op1);
169   InstructionCost getScalarizationOverhead(VectorType *Ty,
170                                            const APInt &DemandedElts,
171                                            bool Insert, bool Extract,
172                                            TTI::TargetCostKind CostKind,
173                                            ArrayRef<Value *> VL = {});
174   InstructionCost getReplicationShuffleCost(Type *EltTy, int ReplicationFactor,
175                                             int VF,
176                                             const APInt &DemandedDstElts,
177                                             TTI::TargetCostKind CostKind);
178   InstructionCost
179   getMemoryOpCost(unsigned Opcode, Type *Src, MaybeAlign Alignment,
180                   unsigned AddressSpace, TTI::TargetCostKind CostKind,
181                   TTI::OperandValueInfo OpInfo = {TTI::OK_AnyValue, TTI::OP_None},
182                   const Instruction *I = nullptr);
183   InstructionCost getMaskedMemoryOpCost(unsigned Opcode, Type *Src,
184                                         Align Alignment, unsigned AddressSpace,
185                                         TTI::TargetCostKind CostKind);
186   InstructionCost getGatherScatterOpCost(unsigned Opcode, Type *DataTy,
187                                          const Value *Ptr, bool VariableMask,
188                                          Align Alignment,
189                                          TTI::TargetCostKind CostKind,
190                                          const Instruction *I);
191   InstructionCost getPointersChainCost(ArrayRef<const Value *> Ptrs,
192                                        const Value *Base,
193                                        const TTI::PointersChainInfo &Info,
194                                        Type *AccessTy,
195                                        TTI::TargetCostKind CostKind);
196   InstructionCost getAddressComputationCost(Type *PtrTy, ScalarEvolution *SE,
197                                             const SCEV *Ptr);
198 
199   std::optional<Instruction *> instCombineIntrinsic(InstCombiner &IC,
200                                                     IntrinsicInst &II) const;
201   std::optional<Value *>
202   simplifyDemandedUseBitsIntrinsic(InstCombiner &IC, IntrinsicInst &II,
203                                    APInt DemandedMask, KnownBits &Known,
204                                    bool &KnownBitsComputed) const;
205   std::optional<Value *> simplifyDemandedVectorEltsIntrinsic(
206       InstCombiner &IC, IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts,
207       APInt &UndefElts2, APInt &UndefElts3,
208       std::function<void(Instruction *, unsigned, APInt, APInt &)>
209           SimplifyAndSetOp) const;
210 
211   unsigned getAtomicMemIntrinsicMaxElementSize() const;
212 
213   InstructionCost getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
214                                         TTI::TargetCostKind CostKind);
215 
216   InstructionCost getArithmeticReductionCost(unsigned Opcode, VectorType *Ty,
217                                              std::optional<FastMathFlags> FMF,
218                                              TTI::TargetCostKind CostKind);
219 
220   InstructionCost getMinMaxCost(Intrinsic::ID IID, Type *Ty,
221                                 TTI::TargetCostKind CostKind,
222                                 FastMathFlags FMF);
223 
224   InstructionCost getMinMaxReductionCost(Intrinsic::ID IID, VectorType *Ty,
225                                          FastMathFlags FMF,
226                                          TTI::TargetCostKind CostKind);
227 
228   InstructionCost getInterleavedMemoryOpCost(
229       unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
230       Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind,
231       bool UseMaskForCond = false, bool UseMaskForGaps = false);
232   InstructionCost getInterleavedMemoryOpCostAVX512(
233       unsigned Opcode, FixedVectorType *VecTy, unsigned Factor,
234       ArrayRef<unsigned> Indices, Align Alignment, unsigned AddressSpace,
235       TTI::TargetCostKind CostKind, bool UseMaskForCond = false,
236       bool UseMaskForGaps = false);
237 
238   InstructionCost getIntImmCost(int64_t);
239 
240   InstructionCost getIntImmCost(const APInt &Imm, Type *Ty,
241                                 TTI::TargetCostKind CostKind);
242 
243   InstructionCost getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind,
244                                  const Instruction *I = nullptr);
245 
246   InstructionCost getIntImmCostInst(unsigned Opcode, unsigned Idx,
247                                     const APInt &Imm, Type *Ty,
248                                     TTI::TargetCostKind CostKind,
249                                     Instruction *Inst = nullptr);
250   InstructionCost getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx,
251                                       const APInt &Imm, Type *Ty,
252                                       TTI::TargetCostKind CostKind);
253   /// Return the cost of the scaling factor used in the addressing
254   /// mode represented by AM for this target, for a load/store
255   /// of the specified type.
256   /// If the AM is supported, the return value must be >= 0.
257   /// If the AM is not supported, it returns a negative value.
258   InstructionCost getScalingFactorCost(Type *Ty, GlobalValue *BaseGV,
259                                        StackOffset BaseOffset, bool HasBaseReg,
260                                        int64_t Scale, unsigned AddrSpace) const;
261 
262   bool isLSRCostLess(const TargetTransformInfo::LSRCost &C1,
263                      const TargetTransformInfo::LSRCost &C2);
264   bool canMacroFuseCmp();
265   bool isLegalMaskedLoad(Type *DataType, Align Alignment);
266   bool isLegalMaskedStore(Type *DataType, Align Alignment);
267   bool isLegalNTLoad(Type *DataType, Align Alignment);
268   bool isLegalNTStore(Type *DataType, Align Alignment);
269   bool isLegalBroadcastLoad(Type *ElementTy, ElementCount NumElements) const;
270   bool forceScalarizeMaskedGather(VectorType *VTy, Align Alignment);
271   bool forceScalarizeMaskedScatter(VectorType *VTy, Align Alignment) {
272     return forceScalarizeMaskedGather(VTy, Alignment);
273   }
274   bool isLegalMaskedGatherScatter(Type *DataType, Align Alignment);
275   bool isLegalMaskedGather(Type *DataType, Align Alignment);
276   bool isLegalMaskedScatter(Type *DataType, Align Alignment);
277   bool isLegalMaskedExpandLoad(Type *DataType, Align Alignment);
278   bool isLegalMaskedCompressStore(Type *DataType, Align Alignment);
279   bool isLegalAltInstr(VectorType *VecTy, unsigned Opcode0, unsigned Opcode1,
280                        const SmallBitVector &OpcodeMask) const;
281   bool hasDivRemOp(Type *DataType, bool IsSigned);
282   bool isExpensiveToSpeculativelyExecute(const Instruction *I);
283   bool isFCmpOrdCheaperThanFCmpZero(Type *Ty);
284   bool areInlineCompatible(const Function *Caller,
285                            const Function *Callee) const;
286   bool areTypesABICompatible(const Function *Caller, const Function *Callee,
287                              const ArrayRef<Type *> &Type) const;
288 
289   uint64_t getMaxMemIntrinsicInlineSizeThreshold() const {
290     return ST->getMaxInlineSizeThreshold();
291   }
292 
293   TTI::MemCmpExpansionOptions enableMemCmpExpansion(bool OptSize,
294                                                     bool IsZeroCmp) const;
295   bool prefersVectorizedAddressing() const;
296   bool supportsEfficientVectorElementLoadStore() const;
297   bool enableInterleavedAccessVectorization();
298 
299   InstructionCost getBranchMispredictPenalty() const;
300 
301   bool isProfitableToSinkOperands(Instruction *I,
302                                   SmallVectorImpl<Use *> &Ops) const;
303 
304   bool isVectorShiftByScalarCheap(Type *Ty) const;
305 
306   unsigned getStoreMinimumVF(unsigned VF, Type *ScalarMemTy,
307                              Type *ScalarValTy) const;
308 
309 private:
310   bool supportsGather() const;
311   InstructionCost getGSVectorCost(unsigned Opcode, TTI::TargetCostKind CostKind,
312                                   Type *DataTy, const Value *Ptr,
313                                   Align Alignment, unsigned AddressSpace);
314 
315   int getGatherOverhead() const;
316   int getScatterOverhead() const;
317 
318   /// @}
319 };
320 
321 } // end namespace llvm
322 
323 #endif
324