xref: /llvm-project/llvm/lib/Target/PowerPC/PPCTargetTransformInfo.cpp (revision dadfbb52f771fc3d3dc9721714598630f5d53146)
1 //===-- PPCTargetTransformInfo.cpp - PPC specific TTI ---------------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 
10 #include "PPCTargetTransformInfo.h"
11 #include "llvm/Analysis/TargetTransformInfo.h"
12 #include "llvm/CodeGen/BasicTTIImpl.h"
13 #include "llvm/Support/CommandLine.h"
14 #include "llvm/Support/Debug.h"
15 #include "llvm/Target/CostTable.h"
16 #include "llvm/Target/TargetLowering.h"
17 using namespace llvm;
18 
19 #define DEBUG_TYPE "ppctti"
20 
21 static cl::opt<bool> DisablePPCConstHoist("disable-ppc-constant-hoisting",
22 cl::desc("disable constant hoisting on PPC"), cl::init(false), cl::Hidden);
23 
24 // This is currently only used for the data prefetch pass which is only enabled
25 // for BG/Q by default.
26 static cl::opt<unsigned>
27 CacheLineSize("ppc-loop-prefetch-cache-line", cl::Hidden, cl::init(64),
28               cl::desc("The loop prefetch cache line size"));
29 
30 // This seems like a reasonable default for the BG/Q (this pass is enabled, by
31 // default, only on the BG/Q).
32 static cl::opt<unsigned>
33 PrefDist("ppc-loop-prefetch-distance", cl::Hidden, cl::init(300),
34          cl::desc("The loop prefetch distance"));
35 
36 //===----------------------------------------------------------------------===//
37 //
38 // PPC cost model.
39 //
40 //===----------------------------------------------------------------------===//
41 
42 TargetTransformInfo::PopcntSupportKind
43 PPCTTIImpl::getPopcntSupport(unsigned TyWidth) {
44   assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2");
45   if (ST->hasPOPCNTD() && TyWidth <= 64)
46     return TTI::PSK_FastHardware;
47   return TTI::PSK_Software;
48 }
49 
50 int PPCTTIImpl::getIntImmCost(const APInt &Imm, Type *Ty) {
51   if (DisablePPCConstHoist)
52     return BaseT::getIntImmCost(Imm, Ty);
53 
54   assert(Ty->isIntegerTy());
55 
56   unsigned BitSize = Ty->getPrimitiveSizeInBits();
57   if (BitSize == 0)
58     return ~0U;
59 
60   if (Imm == 0)
61     return TTI::TCC_Free;
62 
63   if (Imm.getBitWidth() <= 64) {
64     if (isInt<16>(Imm.getSExtValue()))
65       return TTI::TCC_Basic;
66 
67     if (isInt<32>(Imm.getSExtValue())) {
68       // A constant that can be materialized using lis.
69       if ((Imm.getZExtValue() & 0xFFFF) == 0)
70         return TTI::TCC_Basic;
71 
72       return 2 * TTI::TCC_Basic;
73     }
74   }
75 
76   return 4 * TTI::TCC_Basic;
77 }
78 
79 int PPCTTIImpl::getIntImmCost(Intrinsic::ID IID, unsigned Idx, const APInt &Imm,
80                               Type *Ty) {
81   if (DisablePPCConstHoist)
82     return BaseT::getIntImmCost(IID, Idx, Imm, Ty);
83 
84   assert(Ty->isIntegerTy());
85 
86   unsigned BitSize = Ty->getPrimitiveSizeInBits();
87   if (BitSize == 0)
88     return ~0U;
89 
90   switch (IID) {
91   default:
92     return TTI::TCC_Free;
93   case Intrinsic::sadd_with_overflow:
94   case Intrinsic::uadd_with_overflow:
95   case Intrinsic::ssub_with_overflow:
96   case Intrinsic::usub_with_overflow:
97     if ((Idx == 1) && Imm.getBitWidth() <= 64 && isInt<16>(Imm.getSExtValue()))
98       return TTI::TCC_Free;
99     break;
100   case Intrinsic::experimental_stackmap:
101     if ((Idx < 2) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
102       return TTI::TCC_Free;
103     break;
104   case Intrinsic::experimental_patchpoint_void:
105   case Intrinsic::experimental_patchpoint_i64:
106     if ((Idx < 4) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
107       return TTI::TCC_Free;
108     break;
109   }
110   return PPCTTIImpl::getIntImmCost(Imm, Ty);
111 }
112 
113 int PPCTTIImpl::getIntImmCost(unsigned Opcode, unsigned Idx, const APInt &Imm,
114                               Type *Ty) {
115   if (DisablePPCConstHoist)
116     return BaseT::getIntImmCost(Opcode, Idx, Imm, Ty);
117 
118   assert(Ty->isIntegerTy());
119 
120   unsigned BitSize = Ty->getPrimitiveSizeInBits();
121   if (BitSize == 0)
122     return ~0U;
123 
124   unsigned ImmIdx = ~0U;
125   bool ShiftedFree = false, RunFree = false, UnsignedFree = false,
126        ZeroFree = false;
127   switch (Opcode) {
128   default:
129     return TTI::TCC_Free;
130   case Instruction::GetElementPtr:
131     // Always hoist the base address of a GetElementPtr. This prevents the
132     // creation of new constants for every base constant that gets constant
133     // folded with the offset.
134     if (Idx == 0)
135       return 2 * TTI::TCC_Basic;
136     return TTI::TCC_Free;
137   case Instruction::And:
138     RunFree = true; // (for the rotate-and-mask instructions)
139     // Fallthrough...
140   case Instruction::Add:
141   case Instruction::Or:
142   case Instruction::Xor:
143     ShiftedFree = true;
144     // Fallthrough...
145   case Instruction::Sub:
146   case Instruction::Mul:
147   case Instruction::Shl:
148   case Instruction::LShr:
149   case Instruction::AShr:
150     ImmIdx = 1;
151     break;
152   case Instruction::ICmp:
153     UnsignedFree = true;
154     ImmIdx = 1;
155     // Fallthrough... (zero comparisons can use record-form instructions)
156   case Instruction::Select:
157     ZeroFree = true;
158     break;
159   case Instruction::PHI:
160   case Instruction::Call:
161   case Instruction::Ret:
162   case Instruction::Load:
163   case Instruction::Store:
164     break;
165   }
166 
167   if (ZeroFree && Imm == 0)
168     return TTI::TCC_Free;
169 
170   if (Idx == ImmIdx && Imm.getBitWidth() <= 64) {
171     if (isInt<16>(Imm.getSExtValue()))
172       return TTI::TCC_Free;
173 
174     if (RunFree) {
175       if (Imm.getBitWidth() <= 32 &&
176           (isShiftedMask_32(Imm.getZExtValue()) ||
177            isShiftedMask_32(~Imm.getZExtValue())))
178         return TTI::TCC_Free;
179 
180       if (ST->isPPC64() &&
181           (isShiftedMask_64(Imm.getZExtValue()) ||
182            isShiftedMask_64(~Imm.getZExtValue())))
183         return TTI::TCC_Free;
184     }
185 
186     if (UnsignedFree && isUInt<16>(Imm.getZExtValue()))
187       return TTI::TCC_Free;
188 
189     if (ShiftedFree && (Imm.getZExtValue() & 0xFFFF) == 0)
190       return TTI::TCC_Free;
191   }
192 
193   return PPCTTIImpl::getIntImmCost(Imm, Ty);
194 }
195 
196 void PPCTTIImpl::getUnrollingPreferences(Loop *L,
197                                          TTI::UnrollingPreferences &UP) {
198   if (ST->getDarwinDirective() == PPC::DIR_A2) {
199     // The A2 is in-order with a deep pipeline, and concatenation unrolling
200     // helps expose latency-hiding opportunities to the instruction scheduler.
201     UP.Partial = UP.Runtime = true;
202 
203     // We unroll a lot on the A2 (hundreds of instructions), and the benefits
204     // often outweigh the cost of a division to compute the trip count.
205     UP.AllowExpensiveTripCount = true;
206   }
207 
208   BaseT::getUnrollingPreferences(L, UP);
209 }
210 
211 bool PPCTTIImpl::enableAggressiveInterleaving(bool LoopHasReductions) {
212   // On the A2, always unroll aggressively. For QPX unaligned loads, we depend
213   // on combining the loads generated for consecutive accesses, and failure to
214   // do so is particularly expensive. This makes it much more likely (compared
215   // to only using concatenation unrolling).
216   if (ST->getDarwinDirective() == PPC::DIR_A2)
217     return true;
218 
219   return LoopHasReductions;
220 }
221 
222 bool PPCTTIImpl::enableInterleavedAccessVectorization() {
223   return true;
224 }
225 
226 unsigned PPCTTIImpl::getNumberOfRegisters(bool Vector) {
227   if (Vector && !ST->hasAltivec() && !ST->hasQPX())
228     return 0;
229   return ST->hasVSX() ? 64 : 32;
230 }
231 
232 unsigned PPCTTIImpl::getRegisterBitWidth(bool Vector) {
233   if (Vector) {
234     if (ST->hasQPX()) return 256;
235     if (ST->hasAltivec()) return 128;
236     return 0;
237   }
238 
239   if (ST->isPPC64())
240     return 64;
241   return 32;
242 
243 }
244 
245 unsigned PPCTTIImpl::getCacheLineSize() {
246   // This is currently only used for the data prefetch pass which is only
247   // enabled for BG/Q by default.
248   return CacheLineSize;
249 }
250 
251 unsigned PPCTTIImpl::getPrefetchDistance() { return PrefDist; }
252 
253 unsigned PPCTTIImpl::getMaxInterleaveFactor(unsigned VF) {
254   unsigned Directive = ST->getDarwinDirective();
255   // The 440 has no SIMD support, but floating-point instructions
256   // have a 5-cycle latency, so unroll by 5x for latency hiding.
257   if (Directive == PPC::DIR_440)
258     return 5;
259 
260   // The A2 has no SIMD support, but floating-point instructions
261   // have a 6-cycle latency, so unroll by 6x for latency hiding.
262   if (Directive == PPC::DIR_A2)
263     return 6;
264 
265   // FIXME: For lack of any better information, do no harm...
266   if (Directive == PPC::DIR_E500mc || Directive == PPC::DIR_E5500)
267     return 1;
268 
269   // For P7 and P8, floating-point instructions have a 6-cycle latency and
270   // there are two execution units, so unroll by 12x for latency hiding.
271   if (Directive == PPC::DIR_PWR7 ||
272       Directive == PPC::DIR_PWR8)
273     return 12;
274 
275   // For most things, modern systems have two execution units (and
276   // out-of-order execution).
277   return 2;
278 }
279 
280 int PPCTTIImpl::getArithmeticInstrCost(
281     unsigned Opcode, Type *Ty, TTI::OperandValueKind Op1Info,
282     TTI::OperandValueKind Op2Info, TTI::OperandValueProperties Opd1PropInfo,
283     TTI::OperandValueProperties Opd2PropInfo) {
284   assert(TLI->InstructionOpcodeToISD(Opcode) && "Invalid opcode");
285 
286   // Fallback to the default implementation.
287   return BaseT::getArithmeticInstrCost(Opcode, Ty, Op1Info, Op2Info,
288                                        Opd1PropInfo, Opd2PropInfo);
289 }
290 
291 int PPCTTIImpl::getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, int Index,
292                                Type *SubTp) {
293   // Legalize the type.
294   std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp);
295 
296   // PPC, for both Altivec/VSX and QPX, support cheap arbitrary permutations
297   // (at least in the sense that there need only be one non-loop-invariant
298   // instruction). We need one such shuffle instruction for each actual
299   // register (this is not true for arbitrary shuffles, but is true for the
300   // structured types of shuffles covered by TTI::ShuffleKind).
301   return LT.first;
302 }
303 
304 int PPCTTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src) {
305   assert(TLI->InstructionOpcodeToISD(Opcode) && "Invalid opcode");
306 
307   return BaseT::getCastInstrCost(Opcode, Dst, Src);
308 }
309 
310 int PPCTTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy) {
311   return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy);
312 }
313 
314 int PPCTTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index) {
315   assert(Val->isVectorTy() && "This must be a vector type");
316 
317   int ISD = TLI->InstructionOpcodeToISD(Opcode);
318   assert(ISD && "Invalid opcode");
319 
320   if (ST->hasVSX() && Val->getScalarType()->isDoubleTy()) {
321     // Double-precision scalars are already located in index #0.
322     if (Index == 0)
323       return 0;
324 
325     return BaseT::getVectorInstrCost(Opcode, Val, Index);
326   } else if (ST->hasQPX() && Val->getScalarType()->isFloatingPointTy()) {
327     // Floating point scalars are already located in index #0.
328     if (Index == 0)
329       return 0;
330 
331     return BaseT::getVectorInstrCost(Opcode, Val, Index);
332   }
333 
334   // Estimated cost of a load-hit-store delay.  This was obtained
335   // experimentally as a minimum needed to prevent unprofitable
336   // vectorization for the paq8p benchmark.  It may need to be
337   // raised further if other unprofitable cases remain.
338   unsigned LHSPenalty = 2;
339   if (ISD == ISD::INSERT_VECTOR_ELT)
340     LHSPenalty += 7;
341 
342   // Vector element insert/extract with Altivec is very expensive,
343   // because they require store and reload with the attendant
344   // processor stall for load-hit-store.  Until VSX is available,
345   // these need to be estimated as very costly.
346   if (ISD == ISD::EXTRACT_VECTOR_ELT ||
347       ISD == ISD::INSERT_VECTOR_ELT)
348     return LHSPenalty + BaseT::getVectorInstrCost(Opcode, Val, Index);
349 
350   return BaseT::getVectorInstrCost(Opcode, Val, Index);
351 }
352 
353 int PPCTTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
354                                 unsigned AddressSpace) {
355   // Legalize the type.
356   std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Src);
357   assert((Opcode == Instruction::Load || Opcode == Instruction::Store) &&
358          "Invalid Opcode");
359 
360   int Cost = BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace);
361 
362   // Aligned loads and stores are easy.
363   unsigned SrcBytes = LT.second.getStoreSize();
364   if (!SrcBytes || !Alignment || Alignment >= SrcBytes)
365     return Cost;
366 
367   bool IsAltivecType = ST->hasAltivec() &&
368                        (LT.second == MVT::v16i8 || LT.second == MVT::v8i16 ||
369                         LT.second == MVT::v4i32 || LT.second == MVT::v4f32);
370   bool IsVSXType = ST->hasVSX() &&
371                    (LT.second == MVT::v2f64 || LT.second == MVT::v2i64);
372   bool IsQPXType = ST->hasQPX() &&
373                    (LT.second == MVT::v4f64 || LT.second == MVT::v4f32);
374 
375   // If we can use the permutation-based load sequence, then this is also
376   // relatively cheap (not counting loop-invariant instructions): one load plus
377   // one permute (the last load in a series has extra cost, but we're
378   // neglecting that here). Note that on the P7, we should do unaligned loads
379   // for Altivec types using the VSX instructions, but that's more expensive
380   // than using the permutation-based load sequence. On the P8, that's no
381   // longer true.
382   if (Opcode == Instruction::Load &&
383       ((!ST->hasP8Vector() && IsAltivecType) || IsQPXType) &&
384       Alignment >= LT.second.getScalarType().getStoreSize())
385     return Cost + LT.first; // Add the cost of the permutations.
386 
387   // For VSX, we can do unaligned loads and stores on Altivec/VSX types. On the
388   // P7, unaligned vector loads are more expensive than the permutation-based
389   // load sequence, so that might be used instead, but regardless, the net cost
390   // is about the same (not counting loop-invariant instructions).
391   if (IsVSXType || (ST->hasVSX() && IsAltivecType))
392     return Cost;
393 
394   // PPC in general does not support unaligned loads and stores. They'll need
395   // to be decomposed based on the alignment factor.
396 
397   // Add the cost of each scalar load or store.
398   Cost += LT.first*(SrcBytes/Alignment-1);
399 
400   // For a vector type, there is also scalarization overhead (only for
401   // stores, loads are expanded using the vector-load + permutation sequence,
402   // which is much less expensive).
403   if (Src->isVectorTy() && Opcode == Instruction::Store)
404     for (int i = 0, e = Src->getVectorNumElements(); i < e; ++i)
405       Cost += getVectorInstrCost(Instruction::ExtractElement, Src, i);
406 
407   return Cost;
408 }
409 
410 int PPCTTIImpl::getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy,
411                                            unsigned Factor,
412                                            ArrayRef<unsigned> Indices,
413                                            unsigned Alignment,
414                                            unsigned AddressSpace) {
415   assert(isa<VectorType>(VecTy) &&
416          "Expect a vector type for interleaved memory op");
417 
418   // Legalize the type.
419   std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, VecTy);
420 
421   // Firstly, the cost of load/store operation.
422   int Cost = getMemoryOpCost(Opcode, VecTy, Alignment, AddressSpace);
423 
424   // PPC, for both Altivec/VSX and QPX, support cheap arbitrary permutations
425   // (at least in the sense that there need only be one non-loop-invariant
426   // instruction). For each result vector, we need one shuffle per incoming
427   // vector (except that the first shuffle can take two incoming vectors
428   // because it does not need to take itself).
429   Cost += Factor*(LT.first-1);
430 
431   return Cost;
432 }
433 
434