xref: /netbsd-src/external/apache2/llvm/dist/llvm/lib/Target/X86/X86TargetTransformInfo.cpp (revision 82d56013d7b633d116a93943de88e08335357a7c)
1 //===-- X86TargetTransformInfo.cpp - X86 specific TTI pass ----------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file
9 /// This file implements a TargetTransformInfo analysis pass specific to the
10 /// X86 target machine. It uses the target's detailed information to provide
11 /// more precise answers to certain TTI queries, while letting the target
12 /// independent and default TTI implementations handle the rest.
13 ///
14 //===----------------------------------------------------------------------===//
15 /// About Cost Model numbers used below it's necessary to say the following:
16 /// the numbers correspond to some "generic" X86 CPU instead of usage of
17 /// concrete CPU model. Usually the numbers correspond to CPU where the feature
18 /// apeared at the first time. For example, if we do Subtarget.hasSSE42() in
19 /// the lookups below the cost is based on Nehalem as that was the first CPU
20 /// to support that feature level and thus has most likely the worst case cost.
21 /// Some examples of other technologies/CPUs:
22 ///   SSE 3   - Pentium4 / Athlon64
23 ///   SSE 4.1 - Penryn
24 ///   SSE 4.2 - Nehalem
25 ///   AVX     - Sandy Bridge
26 ///   AVX2    - Haswell
27 ///   AVX-512 - Xeon Phi / Skylake
28 /// And some examples of instruction target dependent costs (latency)
29 ///                   divss     sqrtss          rsqrtss
30 ///   AMD K7            11-16     19              3
31 ///   Piledriver        9-24      13-15           5
32 ///   Jaguar            14        16              2
33 ///   Pentium II,III    18        30              2
34 ///   Nehalem           7-14      7-18            3
35 ///   Haswell           10-13     11              5
36 /// TODO: Develop and implement  the target dependent cost model and
37 /// specialize cost numbers for different Cost Model Targets such as throughput,
38 /// code size, latency and uop count.
39 //===----------------------------------------------------------------------===//
40 
41 #include "X86TargetTransformInfo.h"
42 #include "llvm/Analysis/TargetTransformInfo.h"
43 #include "llvm/CodeGen/BasicTTIImpl.h"
44 #include "llvm/CodeGen/CostTable.h"
45 #include "llvm/CodeGen/TargetLowering.h"
46 #include "llvm/IR/IntrinsicInst.h"
47 #include "llvm/Support/Debug.h"
48 
49 using namespace llvm;
50 
51 #define DEBUG_TYPE "x86tti"
52 
53 //===----------------------------------------------------------------------===//
54 //
55 // X86 cost model.
56 //
57 //===----------------------------------------------------------------------===//
58 
59 TargetTransformInfo::PopcntSupportKind
getPopcntSupport(unsigned TyWidth)60 X86TTIImpl::getPopcntSupport(unsigned TyWidth) {
61   assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2");
62   // TODO: Currently the __builtin_popcount() implementation using SSE3
63   //   instructions is inefficient. Once the problem is fixed, we should
64   //   call ST->hasSSE3() instead of ST->hasPOPCNT().
65   return ST->hasPOPCNT() ? TTI::PSK_FastHardware : TTI::PSK_Software;
66 }
67 
getCacheSize(TargetTransformInfo::CacheLevel Level) const68 llvm::Optional<unsigned> X86TTIImpl::getCacheSize(
69   TargetTransformInfo::CacheLevel Level) const {
70   switch (Level) {
71   case TargetTransformInfo::CacheLevel::L1D:
72     //   - Penryn
73     //   - Nehalem
74     //   - Westmere
75     //   - Sandy Bridge
76     //   - Ivy Bridge
77     //   - Haswell
78     //   - Broadwell
79     //   - Skylake
80     //   - Kabylake
81     return 32 * 1024;  //  32 KByte
82   case TargetTransformInfo::CacheLevel::L2D:
83     //   - Penryn
84     //   - Nehalem
85     //   - Westmere
86     //   - Sandy Bridge
87     //   - Ivy Bridge
88     //   - Haswell
89     //   - Broadwell
90     //   - Skylake
91     //   - Kabylake
92     return 256 * 1024; // 256 KByte
93   }
94 
95   llvm_unreachable("Unknown TargetTransformInfo::CacheLevel");
96 }
97 
getCacheAssociativity(TargetTransformInfo::CacheLevel Level) const98 llvm::Optional<unsigned> X86TTIImpl::getCacheAssociativity(
99   TargetTransformInfo::CacheLevel Level) const {
100   //   - Penryn
101   //   - Nehalem
102   //   - Westmere
103   //   - Sandy Bridge
104   //   - Ivy Bridge
105   //   - Haswell
106   //   - Broadwell
107   //   - Skylake
108   //   - Kabylake
109   switch (Level) {
110   case TargetTransformInfo::CacheLevel::L1D:
111     LLVM_FALLTHROUGH;
112   case TargetTransformInfo::CacheLevel::L2D:
113     return 8;
114   }
115 
116   llvm_unreachable("Unknown TargetTransformInfo::CacheLevel");
117 }
118 
getNumberOfRegisters(unsigned ClassID) const119 unsigned X86TTIImpl::getNumberOfRegisters(unsigned ClassID) const {
120   bool Vector = (ClassID == 1);
121   if (Vector && !ST->hasSSE1())
122     return 0;
123 
124   if (ST->is64Bit()) {
125     if (Vector && ST->hasAVX512())
126       return 32;
127     return 16;
128   }
129   return 8;
130 }
131 
132 TypeSize
getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const133 X86TTIImpl::getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const {
134   unsigned PreferVectorWidth = ST->getPreferVectorWidth();
135   switch (K) {
136   case TargetTransformInfo::RGK_Scalar:
137     return TypeSize::getFixed(ST->is64Bit() ? 64 : 32);
138   case TargetTransformInfo::RGK_FixedWidthVector:
139     if (ST->hasAVX512() && PreferVectorWidth >= 512)
140       return TypeSize::getFixed(512);
141     if (ST->hasAVX() && PreferVectorWidth >= 256)
142       return TypeSize::getFixed(256);
143     if (ST->hasSSE1() && PreferVectorWidth >= 128)
144       return TypeSize::getFixed(128);
145     return TypeSize::getFixed(0);
146   case TargetTransformInfo::RGK_ScalableVector:
147     return TypeSize::getScalable(0);
148   }
149 
150   llvm_unreachable("Unsupported register kind");
151 }
152 
getLoadStoreVecRegBitWidth(unsigned) const153 unsigned X86TTIImpl::getLoadStoreVecRegBitWidth(unsigned) const {
154   return getRegisterBitWidth(TargetTransformInfo::RGK_FixedWidthVector)
155       .getFixedSize();
156 }
157 
getMaxInterleaveFactor(unsigned VF)158 unsigned X86TTIImpl::getMaxInterleaveFactor(unsigned VF) {
159   // If the loop will not be vectorized, don't interleave the loop.
160   // Let regular unroll to unroll the loop, which saves the overflow
161   // check and memory check cost.
162   if (VF == 1)
163     return 1;
164 
165   if (ST->isAtom())
166     return 1;
167 
168   // Sandybridge and Haswell have multiple execution ports and pipelined
169   // vector units.
170   if (ST->hasAVX())
171     return 4;
172 
173   return 2;
174 }
175 
getArithmeticInstrCost(unsigned Opcode,Type * Ty,TTI::TargetCostKind CostKind,TTI::OperandValueKind Op1Info,TTI::OperandValueKind Op2Info,TTI::OperandValueProperties Opd1PropInfo,TTI::OperandValueProperties Opd2PropInfo,ArrayRef<const Value * > Args,const Instruction * CxtI)176 InstructionCost X86TTIImpl::getArithmeticInstrCost(
177     unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind,
178     TTI::OperandValueKind Op1Info, TTI::OperandValueKind Op2Info,
179     TTI::OperandValueProperties Opd1PropInfo,
180     TTI::OperandValueProperties Opd2PropInfo, ArrayRef<const Value *> Args,
181     const Instruction *CxtI) {
182   // TODO: Handle more cost kinds.
183   if (CostKind != TTI::TCK_RecipThroughput)
184     return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Op1Info,
185                                          Op2Info, Opd1PropInfo,
186                                          Opd2PropInfo, Args, CxtI);
187 
188   // vXi8 multiplications are always promoted to vXi16.
189   if (Opcode == Instruction::Mul && Ty->isVectorTy() &&
190       Ty->getScalarSizeInBits() == 8) {
191     Type *WideVecTy =
192         VectorType::getExtendedElementVectorType(cast<VectorType>(Ty));
193     return getCastInstrCost(Instruction::ZExt, WideVecTy, Ty,
194                             TargetTransformInfo::CastContextHint::None,
195                             CostKind) +
196            getCastInstrCost(Instruction::Trunc, Ty, WideVecTy,
197                             TargetTransformInfo::CastContextHint::None,
198                             CostKind) +
199            getArithmeticInstrCost(Opcode, WideVecTy, CostKind, Op1Info, Op2Info,
200                                   Opd1PropInfo, Opd2PropInfo);
201   }
202 
203   // Legalize the type.
204   std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
205 
206   int ISD = TLI->InstructionOpcodeToISD(Opcode);
207   assert(ISD && "Invalid opcode");
208 
209   static const CostTblEntry GLMCostTable[] = {
210     { ISD::FDIV,  MVT::f32,   18 }, // divss
211     { ISD::FDIV,  MVT::v4f32, 35 }, // divps
212     { ISD::FDIV,  MVT::f64,   33 }, // divsd
213     { ISD::FDIV,  MVT::v2f64, 65 }, // divpd
214   };
215 
216   if (ST->useGLMDivSqrtCosts())
217     if (const auto *Entry = CostTableLookup(GLMCostTable, ISD,
218                                             LT.second))
219       return LT.first * Entry->Cost;
220 
221   static const CostTblEntry SLMCostTable[] = {
222     { ISD::MUL,   MVT::v4i32, 11 }, // pmulld
223     { ISD::MUL,   MVT::v8i16, 2  }, // pmullw
224     { ISD::FMUL,  MVT::f64,   2  }, // mulsd
225     { ISD::FMUL,  MVT::v2f64, 4  }, // mulpd
226     { ISD::FMUL,  MVT::v4f32, 2  }, // mulps
227     { ISD::FDIV,  MVT::f32,   17 }, // divss
228     { ISD::FDIV,  MVT::v4f32, 39 }, // divps
229     { ISD::FDIV,  MVT::f64,   32 }, // divsd
230     { ISD::FDIV,  MVT::v2f64, 69 }, // divpd
231     { ISD::FADD,  MVT::v2f64, 2  }, // addpd
232     { ISD::FSUB,  MVT::v2f64, 2  }, // subpd
233     // v2i64/v4i64 mul is custom lowered as a series of long:
234     // multiplies(3), shifts(3) and adds(2)
235     // slm muldq version throughput is 2 and addq throughput 4
236     // thus: 3X2 (muldq throughput) + 3X1 (shift throughput) +
237     //       3X4 (addq throughput) = 17
238     { ISD::MUL,   MVT::v2i64, 17 },
239     // slm addq\subq throughput is 4
240     { ISD::ADD,   MVT::v2i64, 4  },
241     { ISD::SUB,   MVT::v2i64, 4  },
242   };
243 
244   if (ST->isSLM()) {
245     if (Args.size() == 2 && ISD == ISD::MUL && LT.second == MVT::v4i32) {
246       // Check if the operands can be shrinked into a smaller datatype.
247       bool Op1Signed = false;
248       unsigned Op1MinSize = BaseT::minRequiredElementSize(Args[0], Op1Signed);
249       bool Op2Signed = false;
250       unsigned Op2MinSize = BaseT::minRequiredElementSize(Args[1], Op2Signed);
251 
252       bool SignedMode = Op1Signed || Op2Signed;
253       unsigned OpMinSize = std::max(Op1MinSize, Op2MinSize);
254 
255       if (OpMinSize <= 7)
256         return LT.first * 3; // pmullw/sext
257       if (!SignedMode && OpMinSize <= 8)
258         return LT.first * 3; // pmullw/zext
259       if (OpMinSize <= 15)
260         return LT.first * 5; // pmullw/pmulhw/pshuf
261       if (!SignedMode && OpMinSize <= 16)
262         return LT.first * 5; // pmullw/pmulhw/pshuf
263     }
264 
265     if (const auto *Entry = CostTableLookup(SLMCostTable, ISD,
266                                             LT.second)) {
267       return LT.first * Entry->Cost;
268     }
269   }
270 
271   if ((ISD == ISD::SDIV || ISD == ISD::SREM || ISD == ISD::UDIV ||
272        ISD == ISD::UREM) &&
273       (Op2Info == TargetTransformInfo::OK_UniformConstantValue ||
274        Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) &&
275       Opd2PropInfo == TargetTransformInfo::OP_PowerOf2) {
276     if (ISD == ISD::SDIV || ISD == ISD::SREM) {
277       // On X86, vector signed division by constants power-of-two are
278       // normally expanded to the sequence SRA + SRL + ADD + SRA.
279       // The OperandValue properties may not be the same as that of the previous
280       // operation; conservatively assume OP_None.
281       InstructionCost Cost =
282           2 * getArithmeticInstrCost(Instruction::AShr, Ty, CostKind, Op1Info,
283                                      Op2Info, TargetTransformInfo::OP_None,
284                                      TargetTransformInfo::OP_None);
285       Cost += getArithmeticInstrCost(Instruction::LShr, Ty, CostKind, Op1Info,
286                                      Op2Info,
287                                      TargetTransformInfo::OP_None,
288                                      TargetTransformInfo::OP_None);
289       Cost += getArithmeticInstrCost(Instruction::Add, Ty, CostKind, Op1Info,
290                                      Op2Info,
291                                      TargetTransformInfo::OP_None,
292                                      TargetTransformInfo::OP_None);
293 
294       if (ISD == ISD::SREM) {
295         // For SREM: (X % C) is the equivalent of (X - (X/C)*C)
296         Cost += getArithmeticInstrCost(Instruction::Mul, Ty, CostKind, Op1Info,
297                                        Op2Info);
298         Cost += getArithmeticInstrCost(Instruction::Sub, Ty, CostKind, Op1Info,
299                                        Op2Info);
300       }
301 
302       return Cost;
303     }
304 
305     // Vector unsigned division/remainder will be simplified to shifts/masks.
306     if (ISD == ISD::UDIV)
307       return getArithmeticInstrCost(Instruction::LShr, Ty, CostKind,
308                                     Op1Info, Op2Info,
309                                     TargetTransformInfo::OP_None,
310                                     TargetTransformInfo::OP_None);
311 
312     else // UREM
313       return getArithmeticInstrCost(Instruction::And, Ty, CostKind,
314                                     Op1Info, Op2Info,
315                                     TargetTransformInfo::OP_None,
316                                     TargetTransformInfo::OP_None);
317   }
318 
319   static const CostTblEntry AVX512BWUniformConstCostTable[] = {
320     { ISD::SHL,  MVT::v64i8,   2 }, // psllw + pand.
321     { ISD::SRL,  MVT::v64i8,   2 }, // psrlw + pand.
322     { ISD::SRA,  MVT::v64i8,   4 }, // psrlw, pand, pxor, psubb.
323   };
324 
325   if (Op2Info == TargetTransformInfo::OK_UniformConstantValue &&
326       ST->hasBWI()) {
327     if (const auto *Entry = CostTableLookup(AVX512BWUniformConstCostTable, ISD,
328                                             LT.second))
329       return LT.first * Entry->Cost;
330   }
331 
332   static const CostTblEntry AVX512UniformConstCostTable[] = {
333     { ISD::SRA,  MVT::v2i64,   1 },
334     { ISD::SRA,  MVT::v4i64,   1 },
335     { ISD::SRA,  MVT::v8i64,   1 },
336 
337     { ISD::SHL,  MVT::v64i8,   4 }, // psllw + pand.
338     { ISD::SRL,  MVT::v64i8,   4 }, // psrlw + pand.
339     { ISD::SRA,  MVT::v64i8,   8 }, // psrlw, pand, pxor, psubb.
340 
341     { ISD::SDIV, MVT::v16i32,  6 }, // pmuludq sequence
342     { ISD::SREM, MVT::v16i32,  8 }, // pmuludq+mul+sub sequence
343     { ISD::UDIV, MVT::v16i32,  5 }, // pmuludq sequence
344     { ISD::UREM, MVT::v16i32,  7 }, // pmuludq+mul+sub sequence
345   };
346 
347   if (Op2Info == TargetTransformInfo::OK_UniformConstantValue &&
348       ST->hasAVX512()) {
349     if (const auto *Entry = CostTableLookup(AVX512UniformConstCostTable, ISD,
350                                             LT.second))
351       return LT.first * Entry->Cost;
352   }
353 
354   static const CostTblEntry AVX2UniformConstCostTable[] = {
355     { ISD::SHL,  MVT::v32i8,   2 }, // psllw + pand.
356     { ISD::SRL,  MVT::v32i8,   2 }, // psrlw + pand.
357     { ISD::SRA,  MVT::v32i8,   4 }, // psrlw, pand, pxor, psubb.
358 
359     { ISD::SRA,  MVT::v4i64,   4 }, // 2 x psrad + shuffle.
360 
361     { ISD::SDIV, MVT::v8i32,   6 }, // pmuludq sequence
362     { ISD::SREM, MVT::v8i32,   8 }, // pmuludq+mul+sub sequence
363     { ISD::UDIV, MVT::v8i32,   5 }, // pmuludq sequence
364     { ISD::UREM, MVT::v8i32,   7 }, // pmuludq+mul+sub sequence
365   };
366 
367   if (Op2Info == TargetTransformInfo::OK_UniformConstantValue &&
368       ST->hasAVX2()) {
369     if (const auto *Entry = CostTableLookup(AVX2UniformConstCostTable, ISD,
370                                             LT.second))
371       return LT.first * Entry->Cost;
372   }
373 
374   static const CostTblEntry SSE2UniformConstCostTable[] = {
375     { ISD::SHL,  MVT::v16i8,     2 }, // psllw + pand.
376     { ISD::SRL,  MVT::v16i8,     2 }, // psrlw + pand.
377     { ISD::SRA,  MVT::v16i8,     4 }, // psrlw, pand, pxor, psubb.
378 
379     { ISD::SHL,  MVT::v32i8,   4+2 }, // 2*(psllw + pand) + split.
380     { ISD::SRL,  MVT::v32i8,   4+2 }, // 2*(psrlw + pand) + split.
381     { ISD::SRA,  MVT::v32i8,   8+2 }, // 2*(psrlw, pand, pxor, psubb) + split.
382 
383     { ISD::SDIV, MVT::v8i32,  12+2 }, // 2*pmuludq sequence + split.
384     { ISD::SREM, MVT::v8i32,  16+2 }, // 2*pmuludq+mul+sub sequence + split.
385     { ISD::SDIV, MVT::v4i32,     6 }, // pmuludq sequence
386     { ISD::SREM, MVT::v4i32,     8 }, // pmuludq+mul+sub sequence
387     { ISD::UDIV, MVT::v8i32,  10+2 }, // 2*pmuludq sequence + split.
388     { ISD::UREM, MVT::v8i32,  14+2 }, // 2*pmuludq+mul+sub sequence + split.
389     { ISD::UDIV, MVT::v4i32,     5 }, // pmuludq sequence
390     { ISD::UREM, MVT::v4i32,     7 }, // pmuludq+mul+sub sequence
391   };
392 
393   // XOP has faster vXi8 shifts.
394   if (Op2Info == TargetTransformInfo::OK_UniformConstantValue &&
395       ST->hasSSE2() && !ST->hasXOP()) {
396     if (const auto *Entry =
397             CostTableLookup(SSE2UniformConstCostTable, ISD, LT.second))
398       return LT.first * Entry->Cost;
399   }
400 
401   static const CostTblEntry AVX512BWConstCostTable[] = {
402     { ISD::SDIV, MVT::v64i8,  14 }, // 2*ext+2*pmulhw sequence
403     { ISD::SREM, MVT::v64i8,  16 }, // 2*ext+2*pmulhw+mul+sub sequence
404     { ISD::UDIV, MVT::v64i8,  14 }, // 2*ext+2*pmulhw sequence
405     { ISD::UREM, MVT::v64i8,  16 }, // 2*ext+2*pmulhw+mul+sub sequence
406     { ISD::SDIV, MVT::v32i16,  6 }, // vpmulhw sequence
407     { ISD::SREM, MVT::v32i16,  8 }, // vpmulhw+mul+sub sequence
408     { ISD::UDIV, MVT::v32i16,  6 }, // vpmulhuw sequence
409     { ISD::UREM, MVT::v32i16,  8 }, // vpmulhuw+mul+sub sequence
410   };
411 
412   if ((Op2Info == TargetTransformInfo::OK_UniformConstantValue ||
413        Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) &&
414       ST->hasBWI()) {
415     if (const auto *Entry =
416             CostTableLookup(AVX512BWConstCostTable, ISD, LT.second))
417       return LT.first * Entry->Cost;
418   }
419 
420   static const CostTblEntry AVX512ConstCostTable[] = {
421     { ISD::SDIV, MVT::v16i32, 15 }, // vpmuldq sequence
422     { ISD::SREM, MVT::v16i32, 17 }, // vpmuldq+mul+sub sequence
423     { ISD::UDIV, MVT::v16i32, 15 }, // vpmuludq sequence
424     { ISD::UREM, MVT::v16i32, 17 }, // vpmuludq+mul+sub sequence
425     { ISD::SDIV, MVT::v64i8,  28 }, // 4*ext+4*pmulhw sequence
426     { ISD::SREM, MVT::v64i8,  32 }, // 4*ext+4*pmulhw+mul+sub sequence
427     { ISD::UDIV, MVT::v64i8,  28 }, // 4*ext+4*pmulhw sequence
428     { ISD::UREM, MVT::v64i8,  32 }, // 4*ext+4*pmulhw+mul+sub sequence
429     { ISD::SDIV, MVT::v32i16, 12 }, // 2*vpmulhw sequence
430     { ISD::SREM, MVT::v32i16, 16 }, // 2*vpmulhw+mul+sub sequence
431     { ISD::UDIV, MVT::v32i16, 12 }, // 2*vpmulhuw sequence
432     { ISD::UREM, MVT::v32i16, 16 }, // 2*vpmulhuw+mul+sub sequence
433   };
434 
435   if ((Op2Info == TargetTransformInfo::OK_UniformConstantValue ||
436        Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) &&
437       ST->hasAVX512()) {
438     if (const auto *Entry =
439             CostTableLookup(AVX512ConstCostTable, ISD, LT.second))
440       return LT.first * Entry->Cost;
441   }
442 
443   static const CostTblEntry AVX2ConstCostTable[] = {
444     { ISD::SDIV, MVT::v32i8,  14 }, // 2*ext+2*pmulhw sequence
445     { ISD::SREM, MVT::v32i8,  16 }, // 2*ext+2*pmulhw+mul+sub sequence
446     { ISD::UDIV, MVT::v32i8,  14 }, // 2*ext+2*pmulhw sequence
447     { ISD::UREM, MVT::v32i8,  16 }, // 2*ext+2*pmulhw+mul+sub sequence
448     { ISD::SDIV, MVT::v16i16,  6 }, // vpmulhw sequence
449     { ISD::SREM, MVT::v16i16,  8 }, // vpmulhw+mul+sub sequence
450     { ISD::UDIV, MVT::v16i16,  6 }, // vpmulhuw sequence
451     { ISD::UREM, MVT::v16i16,  8 }, // vpmulhuw+mul+sub sequence
452     { ISD::SDIV, MVT::v8i32,  15 }, // vpmuldq sequence
453     { ISD::SREM, MVT::v8i32,  19 }, // vpmuldq+mul+sub sequence
454     { ISD::UDIV, MVT::v8i32,  15 }, // vpmuludq sequence
455     { ISD::UREM, MVT::v8i32,  19 }, // vpmuludq+mul+sub sequence
456   };
457 
458   if ((Op2Info == TargetTransformInfo::OK_UniformConstantValue ||
459        Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) &&
460       ST->hasAVX2()) {
461     if (const auto *Entry = CostTableLookup(AVX2ConstCostTable, ISD, LT.second))
462       return LT.first * Entry->Cost;
463   }
464 
465   static const CostTblEntry SSE2ConstCostTable[] = {
466     { ISD::SDIV, MVT::v32i8,  28+2 }, // 4*ext+4*pmulhw sequence + split.
467     { ISD::SREM, MVT::v32i8,  32+2 }, // 4*ext+4*pmulhw+mul+sub sequence + split.
468     { ISD::SDIV, MVT::v16i8,    14 }, // 2*ext+2*pmulhw sequence
469     { ISD::SREM, MVT::v16i8,    16 }, // 2*ext+2*pmulhw+mul+sub sequence
470     { ISD::UDIV, MVT::v32i8,  28+2 }, // 4*ext+4*pmulhw sequence + split.
471     { ISD::UREM, MVT::v32i8,  32+2 }, // 4*ext+4*pmulhw+mul+sub sequence + split.
472     { ISD::UDIV, MVT::v16i8,    14 }, // 2*ext+2*pmulhw sequence
473     { ISD::UREM, MVT::v16i8,    16 }, // 2*ext+2*pmulhw+mul+sub sequence
474     { ISD::SDIV, MVT::v16i16, 12+2 }, // 2*pmulhw sequence + split.
475     { ISD::SREM, MVT::v16i16, 16+2 }, // 2*pmulhw+mul+sub sequence + split.
476     { ISD::SDIV, MVT::v8i16,     6 }, // pmulhw sequence
477     { ISD::SREM, MVT::v8i16,     8 }, // pmulhw+mul+sub sequence
478     { ISD::UDIV, MVT::v16i16, 12+2 }, // 2*pmulhuw sequence + split.
479     { ISD::UREM, MVT::v16i16, 16+2 }, // 2*pmulhuw+mul+sub sequence + split.
480     { ISD::UDIV, MVT::v8i16,     6 }, // pmulhuw sequence
481     { ISD::UREM, MVT::v8i16,     8 }, // pmulhuw+mul+sub sequence
482     { ISD::SDIV, MVT::v8i32,  38+2 }, // 2*pmuludq sequence + split.
483     { ISD::SREM, MVT::v8i32,  48+2 }, // 2*pmuludq+mul+sub sequence + split.
484     { ISD::SDIV, MVT::v4i32,    19 }, // pmuludq sequence
485     { ISD::SREM, MVT::v4i32,    24 }, // pmuludq+mul+sub sequence
486     { ISD::UDIV, MVT::v8i32,  30+2 }, // 2*pmuludq sequence + split.
487     { ISD::UREM, MVT::v8i32,  40+2 }, // 2*pmuludq+mul+sub sequence + split.
488     { ISD::UDIV, MVT::v4i32,    15 }, // pmuludq sequence
489     { ISD::UREM, MVT::v4i32,    20 }, // pmuludq+mul+sub sequence
490   };
491 
492   if ((Op2Info == TargetTransformInfo::OK_UniformConstantValue ||
493        Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) &&
494       ST->hasSSE2()) {
495     // pmuldq sequence.
496     if (ISD == ISD::SDIV && LT.second == MVT::v8i32 && ST->hasAVX())
497       return LT.first * 32;
498     if (ISD == ISD::SREM && LT.second == MVT::v8i32 && ST->hasAVX())
499       return LT.first * 38;
500     if (ISD == ISD::SDIV && LT.second == MVT::v4i32 && ST->hasSSE41())
501       return LT.first * 15;
502     if (ISD == ISD::SREM && LT.second == MVT::v4i32 && ST->hasSSE41())
503       return LT.first * 20;
504 
505     if (const auto *Entry = CostTableLookup(SSE2ConstCostTable, ISD, LT.second))
506       return LT.first * Entry->Cost;
507   }
508 
509   static const CostTblEntry AVX512BWShiftCostTable[] = {
510     { ISD::SHL,   MVT::v8i16,      1 }, // vpsllvw
511     { ISD::SRL,   MVT::v8i16,      1 }, // vpsrlvw
512     { ISD::SRA,   MVT::v8i16,      1 }, // vpsravw
513 
514     { ISD::SHL,   MVT::v16i16,     1 }, // vpsllvw
515     { ISD::SRL,   MVT::v16i16,     1 }, // vpsrlvw
516     { ISD::SRA,   MVT::v16i16,     1 }, // vpsravw
517 
518     { ISD::SHL,   MVT::v32i16,     1 }, // vpsllvw
519     { ISD::SRL,   MVT::v32i16,     1 }, // vpsrlvw
520     { ISD::SRA,   MVT::v32i16,     1 }, // vpsravw
521   };
522 
523   if (ST->hasBWI())
524     if (const auto *Entry = CostTableLookup(AVX512BWShiftCostTable, ISD, LT.second))
525       return LT.first * Entry->Cost;
526 
527   static const CostTblEntry AVX2UniformCostTable[] = {
528     // Uniform splats are cheaper for the following instructions.
529     { ISD::SHL,  MVT::v16i16, 1 }, // psllw.
530     { ISD::SRL,  MVT::v16i16, 1 }, // psrlw.
531     { ISD::SRA,  MVT::v16i16, 1 }, // psraw.
532     { ISD::SHL,  MVT::v32i16, 2 }, // 2*psllw.
533     { ISD::SRL,  MVT::v32i16, 2 }, // 2*psrlw.
534     { ISD::SRA,  MVT::v32i16, 2 }, // 2*psraw.
535 
536     { ISD::SHL,  MVT::v8i32,  1 }, // pslld
537     { ISD::SRL,  MVT::v8i32,  1 }, // psrld
538     { ISD::SRA,  MVT::v8i32,  1 }, // psrad
539     { ISD::SHL,  MVT::v4i64,  1 }, // psllq
540     { ISD::SRL,  MVT::v4i64,  1 }, // psrlq
541   };
542 
543   if (ST->hasAVX2() &&
544       ((Op2Info == TargetTransformInfo::OK_UniformConstantValue) ||
545        (Op2Info == TargetTransformInfo::OK_UniformValue))) {
546     if (const auto *Entry =
547             CostTableLookup(AVX2UniformCostTable, ISD, LT.second))
548       return LT.first * Entry->Cost;
549   }
550 
551   static const CostTblEntry SSE2UniformCostTable[] = {
552     // Uniform splats are cheaper for the following instructions.
553     { ISD::SHL,  MVT::v8i16,  1 }, // psllw.
554     { ISD::SHL,  MVT::v4i32,  1 }, // pslld
555     { ISD::SHL,  MVT::v2i64,  1 }, // psllq.
556 
557     { ISD::SRL,  MVT::v8i16,  1 }, // psrlw.
558     { ISD::SRL,  MVT::v4i32,  1 }, // psrld.
559     { ISD::SRL,  MVT::v2i64,  1 }, // psrlq.
560 
561     { ISD::SRA,  MVT::v8i16,  1 }, // psraw.
562     { ISD::SRA,  MVT::v4i32,  1 }, // psrad.
563   };
564 
565   if (ST->hasSSE2() &&
566       ((Op2Info == TargetTransformInfo::OK_UniformConstantValue) ||
567        (Op2Info == TargetTransformInfo::OK_UniformValue))) {
568     if (const auto *Entry =
569             CostTableLookup(SSE2UniformCostTable, ISD, LT.second))
570       return LT.first * Entry->Cost;
571   }
572 
573   static const CostTblEntry AVX512DQCostTable[] = {
574     { ISD::MUL,  MVT::v2i64, 1 },
575     { ISD::MUL,  MVT::v4i64, 1 },
576     { ISD::MUL,  MVT::v8i64, 1 }
577   };
578 
579   // Look for AVX512DQ lowering tricks for custom cases.
580   if (ST->hasDQI())
581     if (const auto *Entry = CostTableLookup(AVX512DQCostTable, ISD, LT.second))
582       return LT.first * Entry->Cost;
583 
584   static const CostTblEntry AVX512BWCostTable[] = {
585     { ISD::SHL,   MVT::v64i8,     11 }, // vpblendvb sequence.
586     { ISD::SRL,   MVT::v64i8,     11 }, // vpblendvb sequence.
587     { ISD::SRA,   MVT::v64i8,     24 }, // vpblendvb sequence.
588   };
589 
590   // Look for AVX512BW lowering tricks for custom cases.
591   if (ST->hasBWI())
592     if (const auto *Entry = CostTableLookup(AVX512BWCostTable, ISD, LT.second))
593       return LT.first * Entry->Cost;
594 
595   static const CostTblEntry AVX512CostTable[] = {
596     { ISD::SHL,     MVT::v8i32,      1 },
597     { ISD::SRL,     MVT::v8i32,      1 },
598     { ISD::SRA,     MVT::v8i32,      1 },
599     { ISD::SHL,     MVT::v16i32,     1 },
600     { ISD::SRL,     MVT::v16i32,     1 },
601     { ISD::SRA,     MVT::v16i32,     1 },
602 
603     { ISD::SHL,     MVT::v4i64,      1 },
604     { ISD::SRL,     MVT::v4i64,      1 },
605     { ISD::SHL,     MVT::v8i64,      1 },
606     { ISD::SRL,     MVT::v8i64,      1 },
607 
608     { ISD::SRA,     MVT::v2i64,      1 },
609     { ISD::SRA,     MVT::v4i64,      1 },
610     { ISD::SRA,     MVT::v8i64,      1 },
611 
612     { ISD::MUL,     MVT::v16i32,     1 }, // pmulld (Skylake from agner.org)
613     { ISD::MUL,     MVT::v8i32,      1 }, // pmulld (Skylake from agner.org)
614     { ISD::MUL,     MVT::v4i32,      1 }, // pmulld (Skylake from agner.org)
615     { ISD::MUL,     MVT::v8i64,      8 }, // 3*pmuludq/3*shift/2*add
616 
617     { ISD::FNEG,    MVT::v8f64,      1 }, // Skylake from http://www.agner.org/
618     { ISD::FADD,    MVT::v8f64,      1 }, // Skylake from http://www.agner.org/
619     { ISD::FSUB,    MVT::v8f64,      1 }, // Skylake from http://www.agner.org/
620     { ISD::FMUL,    MVT::v8f64,      1 }, // Skylake from http://www.agner.org/
621 
622     { ISD::FNEG,    MVT::v16f32,     1 }, // Skylake from http://www.agner.org/
623     { ISD::FADD,    MVT::v16f32,     1 }, // Skylake from http://www.agner.org/
624     { ISD::FSUB,    MVT::v16f32,     1 }, // Skylake from http://www.agner.org/
625     { ISD::FMUL,    MVT::v16f32,     1 }, // Skylake from http://www.agner.org/
626   };
627 
628   if (ST->hasAVX512())
629     if (const auto *Entry = CostTableLookup(AVX512CostTable, ISD, LT.second))
630       return LT.first * Entry->Cost;
631 
632   static const CostTblEntry AVX2ShiftCostTable[] = {
633     // Shifts on v4i64/v8i32 on AVX2 is legal even though we declare to
634     // customize them to detect the cases where shift amount is a scalar one.
635     { ISD::SHL,     MVT::v4i32,    1 },
636     { ISD::SRL,     MVT::v4i32,    1 },
637     { ISD::SRA,     MVT::v4i32,    1 },
638     { ISD::SHL,     MVT::v8i32,    2 }, // vpsllvd (Haswell from agner.org)
639     { ISD::SRL,     MVT::v8i32,    2 }, // vpsrlvd (Haswell from agner.org)
640     { ISD::SRA,     MVT::v8i32,    2 }, // vpsravd (Haswell from agner.org)
641     { ISD::SHL,     MVT::v2i64,    1 },
642     { ISD::SRL,     MVT::v2i64,    1 },
643     { ISD::SHL,     MVT::v4i64,    2 }, // vpsllvq (Haswell from agner.org)
644     { ISD::SRL,     MVT::v4i64,    2 }, // vpsrlvq (Haswell from agner.org)
645   };
646 
647   if (ST->hasAVX512()) {
648     if (ISD == ISD::SHL && LT.second == MVT::v32i16 &&
649         (Op2Info == TargetTransformInfo::OK_UniformConstantValue ||
650          Op2Info == TargetTransformInfo::OK_NonUniformConstantValue))
651       // On AVX512, a packed v32i16 shift left by a constant build_vector
652       // is lowered into a vector multiply (vpmullw).
653       return getArithmeticInstrCost(Instruction::Mul, Ty, CostKind,
654                                     Op1Info, Op2Info,
655                                     TargetTransformInfo::OP_None,
656                                     TargetTransformInfo::OP_None);
657   }
658 
659   // Look for AVX2 lowering tricks.
660   if (ST->hasAVX2()) {
661     if (ISD == ISD::SHL && LT.second == MVT::v16i16 &&
662         (Op2Info == TargetTransformInfo::OK_UniformConstantValue ||
663          Op2Info == TargetTransformInfo::OK_NonUniformConstantValue))
664       // On AVX2, a packed v16i16 shift left by a constant build_vector
665       // is lowered into a vector multiply (vpmullw).
666       return getArithmeticInstrCost(Instruction::Mul, Ty, CostKind,
667                                     Op1Info, Op2Info,
668                                     TargetTransformInfo::OP_None,
669                                     TargetTransformInfo::OP_None);
670 
671     if (const auto *Entry = CostTableLookup(AVX2ShiftCostTable, ISD, LT.second))
672       return LT.first * Entry->Cost;
673   }
674 
675   static const CostTblEntry XOPShiftCostTable[] = {
676     // 128bit shifts take 1cy, but right shifts require negation beforehand.
677     { ISD::SHL,     MVT::v16i8,    1 },
678     { ISD::SRL,     MVT::v16i8,    2 },
679     { ISD::SRA,     MVT::v16i8,    2 },
680     { ISD::SHL,     MVT::v8i16,    1 },
681     { ISD::SRL,     MVT::v8i16,    2 },
682     { ISD::SRA,     MVT::v8i16,    2 },
683     { ISD::SHL,     MVT::v4i32,    1 },
684     { ISD::SRL,     MVT::v4i32,    2 },
685     { ISD::SRA,     MVT::v4i32,    2 },
686     { ISD::SHL,     MVT::v2i64,    1 },
687     { ISD::SRL,     MVT::v2i64,    2 },
688     { ISD::SRA,     MVT::v2i64,    2 },
689     // 256bit shifts require splitting if AVX2 didn't catch them above.
690     { ISD::SHL,     MVT::v32i8,  2+2 },
691     { ISD::SRL,     MVT::v32i8,  4+2 },
692     { ISD::SRA,     MVT::v32i8,  4+2 },
693     { ISD::SHL,     MVT::v16i16, 2+2 },
694     { ISD::SRL,     MVT::v16i16, 4+2 },
695     { ISD::SRA,     MVT::v16i16, 4+2 },
696     { ISD::SHL,     MVT::v8i32,  2+2 },
697     { ISD::SRL,     MVT::v8i32,  4+2 },
698     { ISD::SRA,     MVT::v8i32,  4+2 },
699     { ISD::SHL,     MVT::v4i64,  2+2 },
700     { ISD::SRL,     MVT::v4i64,  4+2 },
701     { ISD::SRA,     MVT::v4i64,  4+2 },
702   };
703 
704   // Look for XOP lowering tricks.
705   if (ST->hasXOP()) {
706     // If the right shift is constant then we'll fold the negation so
707     // it's as cheap as a left shift.
708     int ShiftISD = ISD;
709     if ((ShiftISD == ISD::SRL || ShiftISD == ISD::SRA) &&
710         (Op2Info == TargetTransformInfo::OK_UniformConstantValue ||
711          Op2Info == TargetTransformInfo::OK_NonUniformConstantValue))
712       ShiftISD = ISD::SHL;
713     if (const auto *Entry =
714             CostTableLookup(XOPShiftCostTable, ShiftISD, LT.second))
715       return LT.first * Entry->Cost;
716   }
717 
718   static const CostTblEntry SSE2UniformShiftCostTable[] = {
719     // Uniform splats are cheaper for the following instructions.
720     { ISD::SHL,  MVT::v16i16, 2+2 }, // 2*psllw + split.
721     { ISD::SHL,  MVT::v8i32,  2+2 }, // 2*pslld + split.
722     { ISD::SHL,  MVT::v4i64,  2+2 }, // 2*psllq + split.
723 
724     { ISD::SRL,  MVT::v16i16, 2+2 }, // 2*psrlw + split.
725     { ISD::SRL,  MVT::v8i32,  2+2 }, // 2*psrld + split.
726     { ISD::SRL,  MVT::v4i64,  2+2 }, // 2*psrlq + split.
727 
728     { ISD::SRA,  MVT::v16i16, 2+2 }, // 2*psraw + split.
729     { ISD::SRA,  MVT::v8i32,  2+2 }, // 2*psrad + split.
730     { ISD::SRA,  MVT::v2i64,    4 }, // 2*psrad + shuffle.
731     { ISD::SRA,  MVT::v4i64,  8+2 }, // 2*(2*psrad + shuffle) + split.
732   };
733 
734   if (ST->hasSSE2() &&
735       ((Op2Info == TargetTransformInfo::OK_UniformConstantValue) ||
736        (Op2Info == TargetTransformInfo::OK_UniformValue))) {
737 
738     // Handle AVX2 uniform v4i64 ISD::SRA, it's not worth a table.
739     if (ISD == ISD::SRA && LT.second == MVT::v4i64 && ST->hasAVX2())
740       return LT.first * 4; // 2*psrad + shuffle.
741 
742     if (const auto *Entry =
743             CostTableLookup(SSE2UniformShiftCostTable, ISD, LT.second))
744       return LT.first * Entry->Cost;
745   }
746 
747   if (ISD == ISD::SHL &&
748       Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) {
749     MVT VT = LT.second;
750     // Vector shift left by non uniform constant can be lowered
751     // into vector multiply.
752     if (((VT == MVT::v8i16 || VT == MVT::v4i32) && ST->hasSSE2()) ||
753         ((VT == MVT::v16i16 || VT == MVT::v8i32) && ST->hasAVX()))
754       ISD = ISD::MUL;
755   }
756 
757   static const CostTblEntry AVX2CostTable[] = {
758     { ISD::SHL,  MVT::v32i8,     11 }, // vpblendvb sequence.
759     { ISD::SHL,  MVT::v64i8,     22 }, // 2*vpblendvb sequence.
760     { ISD::SHL,  MVT::v16i16,    10 }, // extend/vpsrlvd/pack sequence.
761     { ISD::SHL,  MVT::v32i16,    20 }, // 2*extend/vpsrlvd/pack sequence.
762 
763     { ISD::SRL,  MVT::v32i8,     11 }, // vpblendvb sequence.
764     { ISD::SRL,  MVT::v64i8,     22 }, // 2*vpblendvb sequence.
765     { ISD::SRL,  MVT::v16i16,    10 }, // extend/vpsrlvd/pack sequence.
766     { ISD::SRL,  MVT::v32i16,    20 }, // 2*extend/vpsrlvd/pack sequence.
767 
768     { ISD::SRA,  MVT::v32i8,     24 }, // vpblendvb sequence.
769     { ISD::SRA,  MVT::v64i8,     48 }, // 2*vpblendvb sequence.
770     { ISD::SRA,  MVT::v16i16,    10 }, // extend/vpsravd/pack sequence.
771     { ISD::SRA,  MVT::v32i16,    20 }, // 2*extend/vpsravd/pack sequence.
772     { ISD::SRA,  MVT::v2i64,      4 }, // srl/xor/sub sequence.
773     { ISD::SRA,  MVT::v4i64,      4 }, // srl/xor/sub sequence.
774 
775     { ISD::SUB,  MVT::v32i8,      1 }, // psubb
776     { ISD::ADD,  MVT::v32i8,      1 }, // paddb
777     { ISD::SUB,  MVT::v16i16,     1 }, // psubw
778     { ISD::ADD,  MVT::v16i16,     1 }, // paddw
779     { ISD::SUB,  MVT::v8i32,      1 }, // psubd
780     { ISD::ADD,  MVT::v8i32,      1 }, // paddd
781     { ISD::SUB,  MVT::v4i64,      1 }, // psubq
782     { ISD::ADD,  MVT::v4i64,      1 }, // paddq
783 
784     { ISD::MUL,  MVT::v16i16,     1 }, // pmullw
785     { ISD::MUL,  MVT::v8i32,      2 }, // pmulld (Haswell from agner.org)
786     { ISD::MUL,  MVT::v4i64,      8 }, // 3*pmuludq/3*shift/2*add
787 
788     { ISD::FNEG, MVT::v4f64,      1 }, // Haswell from http://www.agner.org/
789     { ISD::FNEG, MVT::v8f32,      1 }, // Haswell from http://www.agner.org/
790     { ISD::FADD, MVT::v4f64,      1 }, // Haswell from http://www.agner.org/
791     { ISD::FADD, MVT::v8f32,      1 }, // Haswell from http://www.agner.org/
792     { ISD::FSUB, MVT::v4f64,      1 }, // Haswell from http://www.agner.org/
793     { ISD::FSUB, MVT::v8f32,      1 }, // Haswell from http://www.agner.org/
794     { ISD::FMUL, MVT::f64,        1 }, // Haswell from http://www.agner.org/
795     { ISD::FMUL, MVT::v2f64,      1 }, // Haswell from http://www.agner.org/
796     { ISD::FMUL, MVT::v4f64,      1 }, // Haswell from http://www.agner.org/
797     { ISD::FMUL, MVT::v8f32,      1 }, // Haswell from http://www.agner.org/
798 
799     { ISD::FDIV, MVT::f32,        7 }, // Haswell from http://www.agner.org/
800     { ISD::FDIV, MVT::v4f32,      7 }, // Haswell from http://www.agner.org/
801     { ISD::FDIV, MVT::v8f32,     14 }, // Haswell from http://www.agner.org/
802     { ISD::FDIV, MVT::f64,       14 }, // Haswell from http://www.agner.org/
803     { ISD::FDIV, MVT::v2f64,     14 }, // Haswell from http://www.agner.org/
804     { ISD::FDIV, MVT::v4f64,     28 }, // Haswell from http://www.agner.org/
805   };
806 
807   // Look for AVX2 lowering tricks for custom cases.
808   if (ST->hasAVX2())
809     if (const auto *Entry = CostTableLookup(AVX2CostTable, ISD, LT.second))
810       return LT.first * Entry->Cost;
811 
812   static const CostTblEntry AVX1CostTable[] = {
813     // We don't have to scalarize unsupported ops. We can issue two half-sized
814     // operations and we only need to extract the upper YMM half.
815     // Two ops + 1 extract + 1 insert = 4.
816     { ISD::MUL,     MVT::v16i16,     4 },
817     { ISD::MUL,     MVT::v8i32,      5 }, // BTVER2 from http://www.agner.org/
818     { ISD::MUL,     MVT::v4i64,     12 },
819 
820     { ISD::SUB,     MVT::v32i8,      4 },
821     { ISD::ADD,     MVT::v32i8,      4 },
822     { ISD::SUB,     MVT::v16i16,     4 },
823     { ISD::ADD,     MVT::v16i16,     4 },
824     { ISD::SUB,     MVT::v8i32,      4 },
825     { ISD::ADD,     MVT::v8i32,      4 },
826     { ISD::SUB,     MVT::v4i64,      4 },
827     { ISD::ADD,     MVT::v4i64,      4 },
828 
829     { ISD::FNEG,    MVT::v4f64,      2 }, // BTVER2 from http://www.agner.org/
830     { ISD::FNEG,    MVT::v8f32,      2 }, // BTVER2 from http://www.agner.org/
831 
832     { ISD::FMUL,    MVT::f64,        2 }, // BTVER2 from http://www.agner.org/
833     { ISD::FMUL,    MVT::v2f64,      2 }, // BTVER2 from http://www.agner.org/
834     { ISD::FMUL,    MVT::v4f64,      4 }, // BTVER2 from http://www.agner.org/
835 
836     { ISD::FDIV,    MVT::f32,       14 }, // SNB from http://www.agner.org/
837     { ISD::FDIV,    MVT::v4f32,     14 }, // SNB from http://www.agner.org/
838     { ISD::FDIV,    MVT::v8f32,     28 }, // SNB from http://www.agner.org/
839     { ISD::FDIV,    MVT::f64,       22 }, // SNB from http://www.agner.org/
840     { ISD::FDIV,    MVT::v2f64,     22 }, // SNB from http://www.agner.org/
841     { ISD::FDIV,    MVT::v4f64,     44 }, // SNB from http://www.agner.org/
842   };
843 
844   if (ST->hasAVX())
845     if (const auto *Entry = CostTableLookup(AVX1CostTable, ISD, LT.second))
846       return LT.first * Entry->Cost;
847 
848   static const CostTblEntry SSE42CostTable[] = {
849     { ISD::FADD, MVT::f64,     1 }, // Nehalem from http://www.agner.org/
850     { ISD::FADD, MVT::f32,     1 }, // Nehalem from http://www.agner.org/
851     { ISD::FADD, MVT::v2f64,   1 }, // Nehalem from http://www.agner.org/
852     { ISD::FADD, MVT::v4f32,   1 }, // Nehalem from http://www.agner.org/
853 
854     { ISD::FSUB, MVT::f64,     1 }, // Nehalem from http://www.agner.org/
855     { ISD::FSUB, MVT::f32 ,    1 }, // Nehalem from http://www.agner.org/
856     { ISD::FSUB, MVT::v2f64,   1 }, // Nehalem from http://www.agner.org/
857     { ISD::FSUB, MVT::v4f32,   1 }, // Nehalem from http://www.agner.org/
858 
859     { ISD::FMUL, MVT::f64,     1 }, // Nehalem from http://www.agner.org/
860     { ISD::FMUL, MVT::f32,     1 }, // Nehalem from http://www.agner.org/
861     { ISD::FMUL, MVT::v2f64,   1 }, // Nehalem from http://www.agner.org/
862     { ISD::FMUL, MVT::v4f32,   1 }, // Nehalem from http://www.agner.org/
863 
864     { ISD::FDIV,  MVT::f32,   14 }, // Nehalem from http://www.agner.org/
865     { ISD::FDIV,  MVT::v4f32, 14 }, // Nehalem from http://www.agner.org/
866     { ISD::FDIV,  MVT::f64,   22 }, // Nehalem from http://www.agner.org/
867     { ISD::FDIV,  MVT::v2f64, 22 }, // Nehalem from http://www.agner.org/
868 
869     { ISD::MUL,   MVT::v2i64,  6 }  // 3*pmuludq/3*shift/2*add
870   };
871 
872   if (ST->hasSSE42())
873     if (const auto *Entry = CostTableLookup(SSE42CostTable, ISD, LT.second))
874       return LT.first * Entry->Cost;
875 
876   static const CostTblEntry SSE41CostTable[] = {
877     { ISD::SHL,  MVT::v16i8,      11 }, // pblendvb sequence.
878     { ISD::SHL,  MVT::v32i8,  2*11+2 }, // pblendvb sequence + split.
879     { ISD::SHL,  MVT::v8i16,      14 }, // pblendvb sequence.
880     { ISD::SHL,  MVT::v16i16, 2*14+2 }, // pblendvb sequence + split.
881     { ISD::SHL,  MVT::v4i32,       4 }, // pslld/paddd/cvttps2dq/pmulld
882     { ISD::SHL,  MVT::v8i32,   2*4+2 }, // pslld/paddd/cvttps2dq/pmulld + split
883 
884     { ISD::SRL,  MVT::v16i8,      12 }, // pblendvb sequence.
885     { ISD::SRL,  MVT::v32i8,  2*12+2 }, // pblendvb sequence + split.
886     { ISD::SRL,  MVT::v8i16,      14 }, // pblendvb sequence.
887     { ISD::SRL,  MVT::v16i16, 2*14+2 }, // pblendvb sequence + split.
888     { ISD::SRL,  MVT::v4i32,      11 }, // Shift each lane + blend.
889     { ISD::SRL,  MVT::v8i32,  2*11+2 }, // Shift each lane + blend + split.
890 
891     { ISD::SRA,  MVT::v16i8,      24 }, // pblendvb sequence.
892     { ISD::SRA,  MVT::v32i8,  2*24+2 }, // pblendvb sequence + split.
893     { ISD::SRA,  MVT::v8i16,      14 }, // pblendvb sequence.
894     { ISD::SRA,  MVT::v16i16, 2*14+2 }, // pblendvb sequence + split.
895     { ISD::SRA,  MVT::v4i32,      12 }, // Shift each lane + blend.
896     { ISD::SRA,  MVT::v8i32,  2*12+2 }, // Shift each lane + blend + split.
897 
898     { ISD::MUL,  MVT::v4i32,       2 }  // pmulld (Nehalem from agner.org)
899   };
900 
901   if (ST->hasSSE41())
902     if (const auto *Entry = CostTableLookup(SSE41CostTable, ISD, LT.second))
903       return LT.first * Entry->Cost;
904 
905   static const CostTblEntry SSE2CostTable[] = {
906     // We don't correctly identify costs of casts because they are marked as
907     // custom.
908     { ISD::SHL,  MVT::v16i8,      26 }, // cmpgtb sequence.
909     { ISD::SHL,  MVT::v8i16,      32 }, // cmpgtb sequence.
910     { ISD::SHL,  MVT::v4i32,     2*5 }, // We optimized this using mul.
911     { ISD::SHL,  MVT::v2i64,       4 }, // splat+shuffle sequence.
912     { ISD::SHL,  MVT::v4i64,   2*4+2 }, // splat+shuffle sequence + split.
913 
914     { ISD::SRL,  MVT::v16i8,      26 }, // cmpgtb sequence.
915     { ISD::SRL,  MVT::v8i16,      32 }, // cmpgtb sequence.
916     { ISD::SRL,  MVT::v4i32,      16 }, // Shift each lane + blend.
917     { ISD::SRL,  MVT::v2i64,       4 }, // splat+shuffle sequence.
918     { ISD::SRL,  MVT::v4i64,   2*4+2 }, // splat+shuffle sequence + split.
919 
920     { ISD::SRA,  MVT::v16i8,      54 }, // unpacked cmpgtb sequence.
921     { ISD::SRA,  MVT::v8i16,      32 }, // cmpgtb sequence.
922     { ISD::SRA,  MVT::v4i32,      16 }, // Shift each lane + blend.
923     { ISD::SRA,  MVT::v2i64,      12 }, // srl/xor/sub sequence.
924     { ISD::SRA,  MVT::v4i64,  2*12+2 }, // srl/xor/sub sequence+split.
925 
926     { ISD::MUL,  MVT::v8i16,       1 }, // pmullw
927     { ISD::MUL,  MVT::v4i32,       6 }, // 3*pmuludq/4*shuffle
928     { ISD::MUL,  MVT::v2i64,       8 }, // 3*pmuludq/3*shift/2*add
929 
930     { ISD::FDIV, MVT::f32,        23 }, // Pentium IV from http://www.agner.org/
931     { ISD::FDIV, MVT::v4f32,      39 }, // Pentium IV from http://www.agner.org/
932     { ISD::FDIV, MVT::f64,        38 }, // Pentium IV from http://www.agner.org/
933     { ISD::FDIV, MVT::v2f64,      69 }, // Pentium IV from http://www.agner.org/
934 
935     { ISD::FNEG, MVT::f32,         1 }, // Pentium IV from http://www.agner.org/
936     { ISD::FNEG, MVT::f64,         1 }, // Pentium IV from http://www.agner.org/
937     { ISD::FNEG, MVT::v4f32,       1 }, // Pentium IV from http://www.agner.org/
938     { ISD::FNEG, MVT::v2f64,       1 }, // Pentium IV from http://www.agner.org/
939 
940     { ISD::FADD, MVT::f32,         2 }, // Pentium IV from http://www.agner.org/
941     { ISD::FADD, MVT::f64,         2 }, // Pentium IV from http://www.agner.org/
942 
943     { ISD::FSUB, MVT::f32,         2 }, // Pentium IV from http://www.agner.org/
944     { ISD::FSUB, MVT::f64,         2 }, // Pentium IV from http://www.agner.org/
945   };
946 
947   if (ST->hasSSE2())
948     if (const auto *Entry = CostTableLookup(SSE2CostTable, ISD, LT.second))
949       return LT.first * Entry->Cost;
950 
951   static const CostTblEntry SSE1CostTable[] = {
952     { ISD::FDIV, MVT::f32,   17 }, // Pentium III from http://www.agner.org/
953     { ISD::FDIV, MVT::v4f32, 34 }, // Pentium III from http://www.agner.org/
954 
955     { ISD::FNEG, MVT::f32,    2 }, // Pentium III from http://www.agner.org/
956     { ISD::FNEG, MVT::v4f32,  2 }, // Pentium III from http://www.agner.org/
957 
958     { ISD::FADD, MVT::f32,    1 }, // Pentium III from http://www.agner.org/
959     { ISD::FADD, MVT::v4f32,  2 }, // Pentium III from http://www.agner.org/
960 
961     { ISD::FSUB, MVT::f32,    1 }, // Pentium III from http://www.agner.org/
962     { ISD::FSUB, MVT::v4f32,  2 }, // Pentium III from http://www.agner.org/
963   };
964 
965   if (ST->hasSSE1())
966     if (const auto *Entry = CostTableLookup(SSE1CostTable, ISD, LT.second))
967       return LT.first * Entry->Cost;
968 
969   static const CostTblEntry X64CostTbl[] = { // 64-bit targets
970     { ISD::ADD,  MVT::i64,    1 }, // Core (Merom) from http://www.agner.org/
971     { ISD::SUB,  MVT::i64,    1 }, // Core (Merom) from http://www.agner.org/
972   };
973 
974   if (ST->is64Bit())
975     if (const auto *Entry = CostTableLookup(X64CostTbl, ISD, LT.second))
976       return LT.first * Entry->Cost;
977 
978   static const CostTblEntry X86CostTbl[] = { // 32 or 64-bit targets
979     { ISD::ADD,  MVT::i8,    1 }, // Pentium III from http://www.agner.org/
980     { ISD::ADD,  MVT::i16,   1 }, // Pentium III from http://www.agner.org/
981     { ISD::ADD,  MVT::i32,   1 }, // Pentium III from http://www.agner.org/
982 
983     { ISD::SUB,  MVT::i8,    1 }, // Pentium III from http://www.agner.org/
984     { ISD::SUB,  MVT::i16,   1 }, // Pentium III from http://www.agner.org/
985     { ISD::SUB,  MVT::i32,   1 }, // Pentium III from http://www.agner.org/
986   };
987 
988   if (const auto *Entry = CostTableLookup(X86CostTbl, ISD, LT.second))
989     return LT.first * Entry->Cost;
990 
991   // It is not a good idea to vectorize division. We have to scalarize it and
992   // in the process we will often end up having to spilling regular
993   // registers. The overhead of division is going to dominate most kernels
994   // anyways so try hard to prevent vectorization of division - it is
995   // generally a bad idea. Assume somewhat arbitrarily that we have to be able
996   // to hide "20 cycles" for each lane.
997   if (LT.second.isVector() && (ISD == ISD::SDIV || ISD == ISD::SREM ||
998                                ISD == ISD::UDIV || ISD == ISD::UREM)) {
999     InstructionCost ScalarCost = getArithmeticInstrCost(
1000         Opcode, Ty->getScalarType(), CostKind, Op1Info, Op2Info,
1001         TargetTransformInfo::OP_None, TargetTransformInfo::OP_None);
1002     return 20 * LT.first * LT.second.getVectorNumElements() * ScalarCost;
1003   }
1004 
1005   // Fallback to the default implementation.
1006   return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Op1Info, Op2Info);
1007 }
1008 
getShuffleCost(TTI::ShuffleKind Kind,VectorType * BaseTp,ArrayRef<int> Mask,int Index,VectorType * SubTp)1009 InstructionCost X86TTIImpl::getShuffleCost(TTI::ShuffleKind Kind,
1010                                            VectorType *BaseTp,
1011                                            ArrayRef<int> Mask, int Index,
1012                                            VectorType *SubTp) {
1013   // 64-bit packed float vectors (v2f32) are widened to type v4f32.
1014   // 64-bit packed integer vectors (v2i32) are widened to type v4i32.
1015   std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, BaseTp);
1016 
1017   Kind = improveShuffleKindFromMask(Kind, Mask);
1018   // Treat Transpose as 2-op shuffles - there's no difference in lowering.
1019   if (Kind == TTI::SK_Transpose)
1020     Kind = TTI::SK_PermuteTwoSrc;
1021 
1022   // For Broadcasts we are splatting the first element from the first input
1023   // register, so only need to reference that input and all the output
1024   // registers are the same.
1025   if (Kind == TTI::SK_Broadcast)
1026     LT.first = 1;
1027 
1028   // Subvector extractions are free if they start at the beginning of a
1029   // vector and cheap if the subvectors are aligned.
1030   if (Kind == TTI::SK_ExtractSubvector && LT.second.isVector()) {
1031     int NumElts = LT.second.getVectorNumElements();
1032     if ((Index % NumElts) == 0)
1033       return 0;
1034     std::pair<InstructionCost, MVT> SubLT =
1035         TLI->getTypeLegalizationCost(DL, SubTp);
1036     if (SubLT.second.isVector()) {
1037       int NumSubElts = SubLT.second.getVectorNumElements();
1038       if ((Index % NumSubElts) == 0 && (NumElts % NumSubElts) == 0)
1039         return SubLT.first;
1040       // Handle some cases for widening legalization. For now we only handle
1041       // cases where the original subvector was naturally aligned and evenly
1042       // fit in its legalized subvector type.
1043       // FIXME: Remove some of the alignment restrictions.
1044       // FIXME: We can use permq for 64-bit or larger extracts from 256-bit
1045       // vectors.
1046       int OrigSubElts = cast<FixedVectorType>(SubTp)->getNumElements();
1047       if (NumSubElts > OrigSubElts && (Index % OrigSubElts) == 0 &&
1048           (NumSubElts % OrigSubElts) == 0 &&
1049           LT.second.getVectorElementType() ==
1050               SubLT.second.getVectorElementType() &&
1051           LT.second.getVectorElementType().getSizeInBits() ==
1052               BaseTp->getElementType()->getPrimitiveSizeInBits()) {
1053         assert(NumElts >= NumSubElts && NumElts > OrigSubElts &&
1054                "Unexpected number of elements!");
1055         auto *VecTy = FixedVectorType::get(BaseTp->getElementType(),
1056                                            LT.second.getVectorNumElements());
1057         auto *SubTy = FixedVectorType::get(BaseTp->getElementType(),
1058                                            SubLT.second.getVectorNumElements());
1059         int ExtractIndex = alignDown((Index % NumElts), NumSubElts);
1060         InstructionCost ExtractCost = getShuffleCost(
1061             TTI::SK_ExtractSubvector, VecTy, None, ExtractIndex, SubTy);
1062 
1063         // If the original size is 32-bits or more, we can use pshufd. Otherwise
1064         // if we have SSSE3 we can use pshufb.
1065         if (SubTp->getPrimitiveSizeInBits() >= 32 || ST->hasSSSE3())
1066           return ExtractCost + 1; // pshufd or pshufb
1067 
1068         assert(SubTp->getPrimitiveSizeInBits() == 16 &&
1069                "Unexpected vector size");
1070 
1071         return ExtractCost + 2; // worst case pshufhw + pshufd
1072       }
1073     }
1074   }
1075 
1076   // Subvector insertions are cheap if the subvectors are aligned.
1077   // Note that in general, the insertion starting at the beginning of a vector
1078   // isn't free, because we need to preserve the rest of the wide vector.
1079   if (Kind == TTI::SK_InsertSubvector && LT.second.isVector()) {
1080     int NumElts = LT.second.getVectorNumElements();
1081     std::pair<InstructionCost, MVT> SubLT =
1082         TLI->getTypeLegalizationCost(DL, SubTp);
1083     if (SubLT.second.isVector()) {
1084       int NumSubElts = SubLT.second.getVectorNumElements();
1085       if ((Index % NumSubElts) == 0 && (NumElts % NumSubElts) == 0)
1086         return SubLT.first;
1087     }
1088   }
1089 
1090   // Handle some common (illegal) sub-vector types as they are often very cheap
1091   // to shuffle even on targets without PSHUFB.
1092   EVT VT = TLI->getValueType(DL, BaseTp);
1093   if (VT.isSimple() && VT.isVector() && VT.getSizeInBits() < 128 &&
1094       !ST->hasSSSE3()) {
1095      static const CostTblEntry SSE2SubVectorShuffleTbl[] = {
1096       {TTI::SK_Broadcast,        MVT::v4i16, 1}, // pshuflw
1097       {TTI::SK_Broadcast,        MVT::v2i16, 1}, // pshuflw
1098       {TTI::SK_Broadcast,        MVT::v8i8,  2}, // punpck/pshuflw
1099       {TTI::SK_Broadcast,        MVT::v4i8,  2}, // punpck/pshuflw
1100       {TTI::SK_Broadcast,        MVT::v2i8,  1}, // punpck
1101 
1102       {TTI::SK_Reverse,          MVT::v4i16, 1}, // pshuflw
1103       {TTI::SK_Reverse,          MVT::v2i16, 1}, // pshuflw
1104       {TTI::SK_Reverse,          MVT::v4i8,  3}, // punpck/pshuflw/packus
1105       {TTI::SK_Reverse,          MVT::v2i8,  1}, // punpck
1106 
1107       {TTI::SK_PermuteTwoSrc,    MVT::v4i16, 2}, // punpck/pshuflw
1108       {TTI::SK_PermuteTwoSrc,    MVT::v2i16, 2}, // punpck/pshuflw
1109       {TTI::SK_PermuteTwoSrc,    MVT::v8i8,  7}, // punpck/pshuflw
1110       {TTI::SK_PermuteTwoSrc,    MVT::v4i8,  4}, // punpck/pshuflw
1111       {TTI::SK_PermuteTwoSrc,    MVT::v2i8,  2}, // punpck
1112 
1113       {TTI::SK_PermuteSingleSrc, MVT::v4i16, 1}, // pshuflw
1114       {TTI::SK_PermuteSingleSrc, MVT::v2i16, 1}, // pshuflw
1115       {TTI::SK_PermuteSingleSrc, MVT::v8i8,  5}, // punpck/pshuflw
1116       {TTI::SK_PermuteSingleSrc, MVT::v4i8,  3}, // punpck/pshuflw
1117       {TTI::SK_PermuteSingleSrc, MVT::v2i8,  1}, // punpck
1118     };
1119 
1120     if (ST->hasSSE2())
1121       if (const auto *Entry =
1122               CostTableLookup(SSE2SubVectorShuffleTbl, Kind, VT.getSimpleVT()))
1123         return Entry->Cost;
1124   }
1125 
1126   // We are going to permute multiple sources and the result will be in multiple
1127   // destinations. Providing an accurate cost only for splits where the element
1128   // type remains the same.
1129   if (Kind == TTI::SK_PermuteSingleSrc && LT.first != 1) {
1130     MVT LegalVT = LT.second;
1131     if (LegalVT.isVector() &&
1132         LegalVT.getVectorElementType().getSizeInBits() ==
1133             BaseTp->getElementType()->getPrimitiveSizeInBits() &&
1134         LegalVT.getVectorNumElements() <
1135             cast<FixedVectorType>(BaseTp)->getNumElements()) {
1136 
1137       unsigned VecTySize = DL.getTypeStoreSize(BaseTp);
1138       unsigned LegalVTSize = LegalVT.getStoreSize();
1139       // Number of source vectors after legalization:
1140       unsigned NumOfSrcs = (VecTySize + LegalVTSize - 1) / LegalVTSize;
1141       // Number of destination vectors after legalization:
1142       InstructionCost NumOfDests = LT.first;
1143 
1144       auto *SingleOpTy = FixedVectorType::get(BaseTp->getElementType(),
1145                                               LegalVT.getVectorNumElements());
1146 
1147       InstructionCost NumOfShuffles = (NumOfSrcs - 1) * NumOfDests;
1148       return NumOfShuffles * getShuffleCost(TTI::SK_PermuteTwoSrc, SingleOpTy,
1149                                             None, 0, nullptr);
1150     }
1151 
1152     return BaseT::getShuffleCost(Kind, BaseTp, Mask, Index, SubTp);
1153   }
1154 
1155   // For 2-input shuffles, we must account for splitting the 2 inputs into many.
1156   if (Kind == TTI::SK_PermuteTwoSrc && LT.first != 1) {
1157     // We assume that source and destination have the same vector type.
1158     InstructionCost NumOfDests = LT.first;
1159     InstructionCost NumOfShufflesPerDest = LT.first * 2 - 1;
1160     LT.first = NumOfDests * NumOfShufflesPerDest;
1161   }
1162 
1163   static const CostTblEntry AVX512VBMIShuffleTbl[] = {
1164       {TTI::SK_Reverse, MVT::v64i8, 1}, // vpermb
1165       {TTI::SK_Reverse, MVT::v32i8, 1}, // vpermb
1166 
1167       {TTI::SK_PermuteSingleSrc, MVT::v64i8, 1}, // vpermb
1168       {TTI::SK_PermuteSingleSrc, MVT::v32i8, 1}, // vpermb
1169 
1170       {TTI::SK_PermuteTwoSrc, MVT::v64i8, 2}, // vpermt2b
1171       {TTI::SK_PermuteTwoSrc, MVT::v32i8, 2}, // vpermt2b
1172       {TTI::SK_PermuteTwoSrc, MVT::v16i8, 2}  // vpermt2b
1173   };
1174 
1175   if (ST->hasVBMI())
1176     if (const auto *Entry =
1177             CostTableLookup(AVX512VBMIShuffleTbl, Kind, LT.second))
1178       return LT.first * Entry->Cost;
1179 
1180   static const CostTblEntry AVX512BWShuffleTbl[] = {
1181       {TTI::SK_Broadcast, MVT::v32i16, 1}, // vpbroadcastw
1182       {TTI::SK_Broadcast, MVT::v64i8, 1},  // vpbroadcastb
1183 
1184       {TTI::SK_Reverse, MVT::v32i16, 2}, // vpermw
1185       {TTI::SK_Reverse, MVT::v16i16, 2}, // vpermw
1186       {TTI::SK_Reverse, MVT::v64i8, 2},  // pshufb + vshufi64x2
1187 
1188       {TTI::SK_PermuteSingleSrc, MVT::v32i16, 2}, // vpermw
1189       {TTI::SK_PermuteSingleSrc, MVT::v16i16, 2}, // vpermw
1190       {TTI::SK_PermuteSingleSrc, MVT::v64i8, 8},  // extend to v32i16
1191 
1192       {TTI::SK_PermuteTwoSrc, MVT::v32i16, 2}, // vpermt2w
1193       {TTI::SK_PermuteTwoSrc, MVT::v16i16, 2}, // vpermt2w
1194       {TTI::SK_PermuteTwoSrc, MVT::v8i16, 2},  // vpermt2w
1195       {TTI::SK_PermuteTwoSrc, MVT::v64i8, 19}, // 6 * v32i8 + 1
1196 
1197       {TTI::SK_Select, MVT::v32i16, 1}, // vblendmw
1198       {TTI::SK_Select, MVT::v64i8,  1}, // vblendmb
1199   };
1200 
1201   if (ST->hasBWI())
1202     if (const auto *Entry =
1203             CostTableLookup(AVX512BWShuffleTbl, Kind, LT.second))
1204       return LT.first * Entry->Cost;
1205 
1206   static const CostTblEntry AVX512ShuffleTbl[] = {
1207       {TTI::SK_Broadcast, MVT::v8f64, 1},  // vbroadcastpd
1208       {TTI::SK_Broadcast, MVT::v16f32, 1}, // vbroadcastps
1209       {TTI::SK_Broadcast, MVT::v8i64, 1},  // vpbroadcastq
1210       {TTI::SK_Broadcast, MVT::v16i32, 1}, // vpbroadcastd
1211       {TTI::SK_Broadcast, MVT::v32i16, 1}, // vpbroadcastw
1212       {TTI::SK_Broadcast, MVT::v64i8, 1},  // vpbroadcastb
1213 
1214       {TTI::SK_Reverse, MVT::v8f64, 1},  // vpermpd
1215       {TTI::SK_Reverse, MVT::v16f32, 1}, // vpermps
1216       {TTI::SK_Reverse, MVT::v8i64, 1},  // vpermq
1217       {TTI::SK_Reverse, MVT::v16i32, 1}, // vpermd
1218       {TTI::SK_Reverse, MVT::v32i16, 7}, // per mca
1219       {TTI::SK_Reverse, MVT::v64i8,  7}, // per mca
1220 
1221       {TTI::SK_PermuteSingleSrc, MVT::v8f64, 1},  // vpermpd
1222       {TTI::SK_PermuteSingleSrc, MVT::v4f64, 1},  // vpermpd
1223       {TTI::SK_PermuteSingleSrc, MVT::v2f64, 1},  // vpermpd
1224       {TTI::SK_PermuteSingleSrc, MVT::v16f32, 1}, // vpermps
1225       {TTI::SK_PermuteSingleSrc, MVT::v8f32, 1},  // vpermps
1226       {TTI::SK_PermuteSingleSrc, MVT::v4f32, 1},  // vpermps
1227       {TTI::SK_PermuteSingleSrc, MVT::v8i64, 1},  // vpermq
1228       {TTI::SK_PermuteSingleSrc, MVT::v4i64, 1},  // vpermq
1229       {TTI::SK_PermuteSingleSrc, MVT::v2i64, 1},  // vpermq
1230       {TTI::SK_PermuteSingleSrc, MVT::v16i32, 1}, // vpermd
1231       {TTI::SK_PermuteSingleSrc, MVT::v8i32, 1},  // vpermd
1232       {TTI::SK_PermuteSingleSrc, MVT::v4i32, 1},  // vpermd
1233       {TTI::SK_PermuteSingleSrc, MVT::v16i8, 1},  // pshufb
1234 
1235       {TTI::SK_PermuteTwoSrc, MVT::v8f64, 1},  // vpermt2pd
1236       {TTI::SK_PermuteTwoSrc, MVT::v16f32, 1}, // vpermt2ps
1237       {TTI::SK_PermuteTwoSrc, MVT::v8i64, 1},  // vpermt2q
1238       {TTI::SK_PermuteTwoSrc, MVT::v16i32, 1}, // vpermt2d
1239       {TTI::SK_PermuteTwoSrc, MVT::v4f64, 1},  // vpermt2pd
1240       {TTI::SK_PermuteTwoSrc, MVT::v8f32, 1},  // vpermt2ps
1241       {TTI::SK_PermuteTwoSrc, MVT::v4i64, 1},  // vpermt2q
1242       {TTI::SK_PermuteTwoSrc, MVT::v8i32, 1},  // vpermt2d
1243       {TTI::SK_PermuteTwoSrc, MVT::v2f64, 1},  // vpermt2pd
1244       {TTI::SK_PermuteTwoSrc, MVT::v4f32, 1},  // vpermt2ps
1245       {TTI::SK_PermuteTwoSrc, MVT::v2i64, 1},  // vpermt2q
1246       {TTI::SK_PermuteTwoSrc, MVT::v4i32, 1},  // vpermt2d
1247 
1248       // FIXME: This just applies the type legalization cost rules above
1249       // assuming these completely split.
1250       {TTI::SK_PermuteSingleSrc, MVT::v32i16, 14},
1251       {TTI::SK_PermuteSingleSrc, MVT::v64i8,  14},
1252       {TTI::SK_PermuteTwoSrc,    MVT::v32i16, 42},
1253       {TTI::SK_PermuteTwoSrc,    MVT::v64i8,  42},
1254 
1255       {TTI::SK_Select, MVT::v32i16, 1}, // vpternlogq
1256       {TTI::SK_Select, MVT::v64i8,  1}, // vpternlogq
1257       {TTI::SK_Select, MVT::v8f64,  1}, // vblendmpd
1258       {TTI::SK_Select, MVT::v16f32, 1}, // vblendmps
1259       {TTI::SK_Select, MVT::v8i64,  1}, // vblendmq
1260       {TTI::SK_Select, MVT::v16i32, 1}, // vblendmd
1261   };
1262 
1263   if (ST->hasAVX512())
1264     if (const auto *Entry = CostTableLookup(AVX512ShuffleTbl, Kind, LT.second))
1265       return LT.first * Entry->Cost;
1266 
1267   static const CostTblEntry AVX2ShuffleTbl[] = {
1268       {TTI::SK_Broadcast, MVT::v4f64, 1},  // vbroadcastpd
1269       {TTI::SK_Broadcast, MVT::v8f32, 1},  // vbroadcastps
1270       {TTI::SK_Broadcast, MVT::v4i64, 1},  // vpbroadcastq
1271       {TTI::SK_Broadcast, MVT::v8i32, 1},  // vpbroadcastd
1272       {TTI::SK_Broadcast, MVT::v16i16, 1}, // vpbroadcastw
1273       {TTI::SK_Broadcast, MVT::v32i8, 1},  // vpbroadcastb
1274 
1275       {TTI::SK_Reverse, MVT::v4f64, 1},  // vpermpd
1276       {TTI::SK_Reverse, MVT::v8f32, 1},  // vpermps
1277       {TTI::SK_Reverse, MVT::v4i64, 1},  // vpermq
1278       {TTI::SK_Reverse, MVT::v8i32, 1},  // vpermd
1279       {TTI::SK_Reverse, MVT::v16i16, 2}, // vperm2i128 + pshufb
1280       {TTI::SK_Reverse, MVT::v32i8, 2},  // vperm2i128 + pshufb
1281 
1282       {TTI::SK_Select, MVT::v16i16, 1}, // vpblendvb
1283       {TTI::SK_Select, MVT::v32i8, 1},  // vpblendvb
1284 
1285       {TTI::SK_PermuteSingleSrc, MVT::v4f64, 1},  // vpermpd
1286       {TTI::SK_PermuteSingleSrc, MVT::v8f32, 1},  // vpermps
1287       {TTI::SK_PermuteSingleSrc, MVT::v4i64, 1},  // vpermq
1288       {TTI::SK_PermuteSingleSrc, MVT::v8i32, 1},  // vpermd
1289       {TTI::SK_PermuteSingleSrc, MVT::v16i16, 4}, // vperm2i128 + 2*vpshufb
1290                                                   // + vpblendvb
1291       {TTI::SK_PermuteSingleSrc, MVT::v32i8, 4},  // vperm2i128 + 2*vpshufb
1292                                                   // + vpblendvb
1293 
1294       {TTI::SK_PermuteTwoSrc, MVT::v4f64, 3},  // 2*vpermpd + vblendpd
1295       {TTI::SK_PermuteTwoSrc, MVT::v8f32, 3},  // 2*vpermps + vblendps
1296       {TTI::SK_PermuteTwoSrc, MVT::v4i64, 3},  // 2*vpermq + vpblendd
1297       {TTI::SK_PermuteTwoSrc, MVT::v8i32, 3},  // 2*vpermd + vpblendd
1298       {TTI::SK_PermuteTwoSrc, MVT::v16i16, 7}, // 2*vperm2i128 + 4*vpshufb
1299                                                // + vpblendvb
1300       {TTI::SK_PermuteTwoSrc, MVT::v32i8, 7},  // 2*vperm2i128 + 4*vpshufb
1301                                                // + vpblendvb
1302   };
1303 
1304   if (ST->hasAVX2())
1305     if (const auto *Entry = CostTableLookup(AVX2ShuffleTbl, Kind, LT.second))
1306       return LT.first * Entry->Cost;
1307 
1308   static const CostTblEntry XOPShuffleTbl[] = {
1309       {TTI::SK_PermuteSingleSrc, MVT::v4f64, 2},  // vperm2f128 + vpermil2pd
1310       {TTI::SK_PermuteSingleSrc, MVT::v8f32, 2},  // vperm2f128 + vpermil2ps
1311       {TTI::SK_PermuteSingleSrc, MVT::v4i64, 2},  // vperm2f128 + vpermil2pd
1312       {TTI::SK_PermuteSingleSrc, MVT::v8i32, 2},  // vperm2f128 + vpermil2ps
1313       {TTI::SK_PermuteSingleSrc, MVT::v16i16, 4}, // vextractf128 + 2*vpperm
1314                                                   // + vinsertf128
1315       {TTI::SK_PermuteSingleSrc, MVT::v32i8, 4},  // vextractf128 + 2*vpperm
1316                                                   // + vinsertf128
1317 
1318       {TTI::SK_PermuteTwoSrc, MVT::v16i16, 9}, // 2*vextractf128 + 6*vpperm
1319                                                // + vinsertf128
1320       {TTI::SK_PermuteTwoSrc, MVT::v8i16, 1},  // vpperm
1321       {TTI::SK_PermuteTwoSrc, MVT::v32i8, 9},  // 2*vextractf128 + 6*vpperm
1322                                                // + vinsertf128
1323       {TTI::SK_PermuteTwoSrc, MVT::v16i8, 1},  // vpperm
1324   };
1325 
1326   if (ST->hasXOP())
1327     if (const auto *Entry = CostTableLookup(XOPShuffleTbl, Kind, LT.second))
1328       return LT.first * Entry->Cost;
1329 
1330   static const CostTblEntry AVX1ShuffleTbl[] = {
1331       {TTI::SK_Broadcast, MVT::v4f64, 2},  // vperm2f128 + vpermilpd
1332       {TTI::SK_Broadcast, MVT::v8f32, 2},  // vperm2f128 + vpermilps
1333       {TTI::SK_Broadcast, MVT::v4i64, 2},  // vperm2f128 + vpermilpd
1334       {TTI::SK_Broadcast, MVT::v8i32, 2},  // vperm2f128 + vpermilps
1335       {TTI::SK_Broadcast, MVT::v16i16, 3}, // vpshuflw + vpshufd + vinsertf128
1336       {TTI::SK_Broadcast, MVT::v32i8, 2},  // vpshufb + vinsertf128
1337 
1338       {TTI::SK_Reverse, MVT::v4f64, 2},  // vperm2f128 + vpermilpd
1339       {TTI::SK_Reverse, MVT::v8f32, 2},  // vperm2f128 + vpermilps
1340       {TTI::SK_Reverse, MVT::v4i64, 2},  // vperm2f128 + vpermilpd
1341       {TTI::SK_Reverse, MVT::v8i32, 2},  // vperm2f128 + vpermilps
1342       {TTI::SK_Reverse, MVT::v16i16, 4}, // vextractf128 + 2*pshufb
1343                                          // + vinsertf128
1344       {TTI::SK_Reverse, MVT::v32i8, 4},  // vextractf128 + 2*pshufb
1345                                          // + vinsertf128
1346 
1347       {TTI::SK_Select, MVT::v4i64, 1},  // vblendpd
1348       {TTI::SK_Select, MVT::v4f64, 1},  // vblendpd
1349       {TTI::SK_Select, MVT::v8i32, 1},  // vblendps
1350       {TTI::SK_Select, MVT::v8f32, 1},  // vblendps
1351       {TTI::SK_Select, MVT::v16i16, 3}, // vpand + vpandn + vpor
1352       {TTI::SK_Select, MVT::v32i8, 3},  // vpand + vpandn + vpor
1353 
1354       {TTI::SK_PermuteSingleSrc, MVT::v4f64, 2},  // vperm2f128 + vshufpd
1355       {TTI::SK_PermuteSingleSrc, MVT::v4i64, 2},  // vperm2f128 + vshufpd
1356       {TTI::SK_PermuteSingleSrc, MVT::v8f32, 4},  // 2*vperm2f128 + 2*vshufps
1357       {TTI::SK_PermuteSingleSrc, MVT::v8i32, 4},  // 2*vperm2f128 + 2*vshufps
1358       {TTI::SK_PermuteSingleSrc, MVT::v16i16, 8}, // vextractf128 + 4*pshufb
1359                                                   // + 2*por + vinsertf128
1360       {TTI::SK_PermuteSingleSrc, MVT::v32i8, 8},  // vextractf128 + 4*pshufb
1361                                                   // + 2*por + vinsertf128
1362 
1363       {TTI::SK_PermuteTwoSrc, MVT::v4f64, 3},   // 2*vperm2f128 + vshufpd
1364       {TTI::SK_PermuteTwoSrc, MVT::v4i64, 3},   // 2*vperm2f128 + vshufpd
1365       {TTI::SK_PermuteTwoSrc, MVT::v8f32, 4},   // 2*vperm2f128 + 2*vshufps
1366       {TTI::SK_PermuteTwoSrc, MVT::v8i32, 4},   // 2*vperm2f128 + 2*vshufps
1367       {TTI::SK_PermuteTwoSrc, MVT::v16i16, 15}, // 2*vextractf128 + 8*pshufb
1368                                                 // + 4*por + vinsertf128
1369       {TTI::SK_PermuteTwoSrc, MVT::v32i8, 15},  // 2*vextractf128 + 8*pshufb
1370                                                 // + 4*por + vinsertf128
1371   };
1372 
1373   if (ST->hasAVX())
1374     if (const auto *Entry = CostTableLookup(AVX1ShuffleTbl, Kind, LT.second))
1375       return LT.first * Entry->Cost;
1376 
1377   static const CostTblEntry SSE41ShuffleTbl[] = {
1378       {TTI::SK_Select, MVT::v2i64, 1}, // pblendw
1379       {TTI::SK_Select, MVT::v2f64, 1}, // movsd
1380       {TTI::SK_Select, MVT::v4i32, 1}, // pblendw
1381       {TTI::SK_Select, MVT::v4f32, 1}, // blendps
1382       {TTI::SK_Select, MVT::v8i16, 1}, // pblendw
1383       {TTI::SK_Select, MVT::v16i8, 1}  // pblendvb
1384   };
1385 
1386   if (ST->hasSSE41())
1387     if (const auto *Entry = CostTableLookup(SSE41ShuffleTbl, Kind, LT.second))
1388       return LT.first * Entry->Cost;
1389 
1390   static const CostTblEntry SSSE3ShuffleTbl[] = {
1391       {TTI::SK_Broadcast, MVT::v8i16, 1}, // pshufb
1392       {TTI::SK_Broadcast, MVT::v16i8, 1}, // pshufb
1393 
1394       {TTI::SK_Reverse, MVT::v8i16, 1}, // pshufb
1395       {TTI::SK_Reverse, MVT::v16i8, 1}, // pshufb
1396 
1397       {TTI::SK_Select, MVT::v8i16, 3}, // 2*pshufb + por
1398       {TTI::SK_Select, MVT::v16i8, 3}, // 2*pshufb + por
1399 
1400       {TTI::SK_PermuteSingleSrc, MVT::v8i16, 1}, // pshufb
1401       {TTI::SK_PermuteSingleSrc, MVT::v16i8, 1}, // pshufb
1402 
1403       {TTI::SK_PermuteTwoSrc, MVT::v8i16, 3}, // 2*pshufb + por
1404       {TTI::SK_PermuteTwoSrc, MVT::v16i8, 3}, // 2*pshufb + por
1405   };
1406 
1407   if (ST->hasSSSE3())
1408     if (const auto *Entry = CostTableLookup(SSSE3ShuffleTbl, Kind, LT.second))
1409       return LT.first * Entry->Cost;
1410 
1411   static const CostTblEntry SSE2ShuffleTbl[] = {
1412       {TTI::SK_Broadcast, MVT::v2f64, 1}, // shufpd
1413       {TTI::SK_Broadcast, MVT::v2i64, 1}, // pshufd
1414       {TTI::SK_Broadcast, MVT::v4i32, 1}, // pshufd
1415       {TTI::SK_Broadcast, MVT::v8i16, 2}, // pshuflw + pshufd
1416       {TTI::SK_Broadcast, MVT::v16i8, 3}, // unpck + pshuflw + pshufd
1417 
1418       {TTI::SK_Reverse, MVT::v2f64, 1}, // shufpd
1419       {TTI::SK_Reverse, MVT::v2i64, 1}, // pshufd
1420       {TTI::SK_Reverse, MVT::v4i32, 1}, // pshufd
1421       {TTI::SK_Reverse, MVT::v8i16, 3}, // pshuflw + pshufhw + pshufd
1422       {TTI::SK_Reverse, MVT::v16i8, 9}, // 2*pshuflw + 2*pshufhw
1423                                         // + 2*pshufd + 2*unpck + packus
1424 
1425       {TTI::SK_Select, MVT::v2i64, 1}, // movsd
1426       {TTI::SK_Select, MVT::v2f64, 1}, // movsd
1427       {TTI::SK_Select, MVT::v4i32, 2}, // 2*shufps
1428       {TTI::SK_Select, MVT::v8i16, 3}, // pand + pandn + por
1429       {TTI::SK_Select, MVT::v16i8, 3}, // pand + pandn + por
1430 
1431       {TTI::SK_PermuteSingleSrc, MVT::v2f64, 1}, // shufpd
1432       {TTI::SK_PermuteSingleSrc, MVT::v2i64, 1}, // pshufd
1433       {TTI::SK_PermuteSingleSrc, MVT::v4i32, 1}, // pshufd
1434       {TTI::SK_PermuteSingleSrc, MVT::v8i16, 5}, // 2*pshuflw + 2*pshufhw
1435                                                   // + pshufd/unpck
1436     { TTI::SK_PermuteSingleSrc, MVT::v16i8, 10 }, // 2*pshuflw + 2*pshufhw
1437                                                   // + 2*pshufd + 2*unpck + 2*packus
1438 
1439     { TTI::SK_PermuteTwoSrc,    MVT::v2f64,  1 }, // shufpd
1440     { TTI::SK_PermuteTwoSrc,    MVT::v2i64,  1 }, // shufpd
1441     { TTI::SK_PermuteTwoSrc,    MVT::v4i32,  2 }, // 2*{unpck,movsd,pshufd}
1442     { TTI::SK_PermuteTwoSrc,    MVT::v8i16,  8 }, // blend+permute
1443     { TTI::SK_PermuteTwoSrc,    MVT::v16i8, 13 }, // blend+permute
1444   };
1445 
1446   if (ST->hasSSE2())
1447     if (const auto *Entry = CostTableLookup(SSE2ShuffleTbl, Kind, LT.second))
1448       return LT.first * Entry->Cost;
1449 
1450   static const CostTblEntry SSE1ShuffleTbl[] = {
1451     { TTI::SK_Broadcast,        MVT::v4f32, 1 }, // shufps
1452     { TTI::SK_Reverse,          MVT::v4f32, 1 }, // shufps
1453     { TTI::SK_Select,           MVT::v4f32, 2 }, // 2*shufps
1454     { TTI::SK_PermuteSingleSrc, MVT::v4f32, 1 }, // shufps
1455     { TTI::SK_PermuteTwoSrc,    MVT::v4f32, 2 }, // 2*shufps
1456   };
1457 
1458   if (ST->hasSSE1())
1459     if (const auto *Entry = CostTableLookup(SSE1ShuffleTbl, Kind, LT.second))
1460       return LT.first * Entry->Cost;
1461 
1462   return BaseT::getShuffleCost(Kind, BaseTp, Mask, Index, SubTp);
1463 }
1464 
getCastInstrCost(unsigned Opcode,Type * Dst,Type * Src,TTI::CastContextHint CCH,TTI::TargetCostKind CostKind,const Instruction * I)1465 InstructionCost X86TTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst,
1466                                              Type *Src,
1467                                              TTI::CastContextHint CCH,
1468                                              TTI::TargetCostKind CostKind,
1469                                              const Instruction *I) {
1470   int ISD = TLI->InstructionOpcodeToISD(Opcode);
1471   assert(ISD && "Invalid opcode");
1472 
1473   // TODO: Allow non-throughput costs that aren't binary.
1474   auto AdjustCost = [&CostKind](InstructionCost Cost) -> InstructionCost {
1475     if (CostKind != TTI::TCK_RecipThroughput)
1476       return Cost == 0 ? 0 : 1;
1477     return Cost;
1478   };
1479 
1480   // FIXME: Need a better design of the cost table to handle non-simple types of
1481   // potential massive combinations (elem_num x src_type x dst_type).
1482 
1483   static const TypeConversionCostTblEntry AVX512BWConversionTbl[] {
1484     { ISD::SIGN_EXTEND, MVT::v32i16, MVT::v32i8, 1 },
1485     { ISD::ZERO_EXTEND, MVT::v32i16, MVT::v32i8, 1 },
1486 
1487     // Mask sign extend has an instruction.
1488     { ISD::SIGN_EXTEND, MVT::v2i8,   MVT::v2i1,  1 },
1489     { ISD::SIGN_EXTEND, MVT::v2i16,  MVT::v2i1,  1 },
1490     { ISD::SIGN_EXTEND, MVT::v4i8,   MVT::v4i1,  1 },
1491     { ISD::SIGN_EXTEND, MVT::v4i16,  MVT::v4i1,  1 },
1492     { ISD::SIGN_EXTEND, MVT::v8i8,   MVT::v8i1,  1 },
1493     { ISD::SIGN_EXTEND, MVT::v8i16,  MVT::v8i1,  1 },
1494     { ISD::SIGN_EXTEND, MVT::v16i8,  MVT::v16i1, 1 },
1495     { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i1, 1 },
1496     { ISD::SIGN_EXTEND, MVT::v32i8,  MVT::v32i1, 1 },
1497     { ISD::SIGN_EXTEND, MVT::v32i16, MVT::v32i1, 1 },
1498     { ISD::SIGN_EXTEND, MVT::v64i8,  MVT::v64i1, 1 },
1499 
1500     // Mask zero extend is a sext + shift.
1501     { ISD::ZERO_EXTEND, MVT::v2i8,   MVT::v2i1,  2 },
1502     { ISD::ZERO_EXTEND, MVT::v2i16,  MVT::v2i1,  2 },
1503     { ISD::ZERO_EXTEND, MVT::v4i8,   MVT::v4i1,  2 },
1504     { ISD::ZERO_EXTEND, MVT::v4i16,  MVT::v4i1,  2 },
1505     { ISD::ZERO_EXTEND, MVT::v8i8,   MVT::v8i1,  2 },
1506     { ISD::ZERO_EXTEND, MVT::v8i16,  MVT::v8i1,  2 },
1507     { ISD::ZERO_EXTEND, MVT::v16i8,  MVT::v16i1, 2 },
1508     { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i1, 2 },
1509     { ISD::ZERO_EXTEND, MVT::v32i8,  MVT::v32i1, 2 },
1510     { ISD::ZERO_EXTEND, MVT::v32i16, MVT::v32i1, 2 },
1511     { ISD::ZERO_EXTEND, MVT::v64i8,  MVT::v64i1, 2 },
1512 
1513     { ISD::TRUNCATE,    MVT::v32i8,  MVT::v32i16, 2 },
1514     { ISD::TRUNCATE,    MVT::v16i8,  MVT::v16i16, 2 }, // widen to zmm
1515     { ISD::TRUNCATE,    MVT::v2i1,   MVT::v2i8,   2 }, // widen to zmm
1516     { ISD::TRUNCATE,    MVT::v2i1,   MVT::v2i16,  2 }, // widen to zmm
1517     { ISD::TRUNCATE,    MVT::v4i1,   MVT::v4i8,   2 }, // widen to zmm
1518     { ISD::TRUNCATE,    MVT::v4i1,   MVT::v4i16,  2 }, // widen to zmm
1519     { ISD::TRUNCATE,    MVT::v8i1,   MVT::v8i8,   2 }, // widen to zmm
1520     { ISD::TRUNCATE,    MVT::v8i1,   MVT::v8i16,  2 }, // widen to zmm
1521     { ISD::TRUNCATE,    MVT::v16i1,  MVT::v16i8,  2 }, // widen to zmm
1522     { ISD::TRUNCATE,    MVT::v16i1,  MVT::v16i16, 2 }, // widen to zmm
1523     { ISD::TRUNCATE,    MVT::v32i1,  MVT::v32i8,  2 }, // widen to zmm
1524     { ISD::TRUNCATE,    MVT::v32i1,  MVT::v32i16, 2 },
1525     { ISD::TRUNCATE,    MVT::v64i1,  MVT::v64i8,  2 },
1526   };
1527 
1528   static const TypeConversionCostTblEntry AVX512DQConversionTbl[] = {
1529     { ISD::SINT_TO_FP,  MVT::v8f32,  MVT::v8i64,  1 },
1530     { ISD::SINT_TO_FP,  MVT::v8f64,  MVT::v8i64,  1 },
1531 
1532     { ISD::UINT_TO_FP,  MVT::v8f32,  MVT::v8i64,  1 },
1533     { ISD::UINT_TO_FP,  MVT::v8f64,  MVT::v8i64,  1 },
1534 
1535     { ISD::FP_TO_SINT,  MVT::v8i64,  MVT::v8f32,  1 },
1536     { ISD::FP_TO_SINT,  MVT::v8i64,  MVT::v8f64,  1 },
1537 
1538     { ISD::FP_TO_UINT,  MVT::v8i64,  MVT::v8f32,  1 },
1539     { ISD::FP_TO_UINT,  MVT::v8i64,  MVT::v8f64,  1 },
1540   };
1541 
1542   // TODO: For AVX512DQ + AVX512VL, we also have cheap casts for 128-bit and
1543   // 256-bit wide vectors.
1544 
1545   static const TypeConversionCostTblEntry AVX512FConversionTbl[] = {
1546     { ISD::FP_EXTEND, MVT::v8f64,   MVT::v8f32,  1 },
1547     { ISD::FP_EXTEND, MVT::v8f64,   MVT::v16f32, 3 },
1548     { ISD::FP_ROUND,  MVT::v8f32,   MVT::v8f64,  1 },
1549 
1550     { ISD::TRUNCATE,  MVT::v2i1,    MVT::v2i8,   3 }, // sext+vpslld+vptestmd
1551     { ISD::TRUNCATE,  MVT::v4i1,    MVT::v4i8,   3 }, // sext+vpslld+vptestmd
1552     { ISD::TRUNCATE,  MVT::v8i1,    MVT::v8i8,   3 }, // sext+vpslld+vptestmd
1553     { ISD::TRUNCATE,  MVT::v16i1,   MVT::v16i8,  3 }, // sext+vpslld+vptestmd
1554     { ISD::TRUNCATE,  MVT::v2i1,    MVT::v2i16,  3 }, // sext+vpsllq+vptestmq
1555     { ISD::TRUNCATE,  MVT::v4i1,    MVT::v4i16,  3 }, // sext+vpsllq+vptestmq
1556     { ISD::TRUNCATE,  MVT::v8i1,    MVT::v8i16,  3 }, // sext+vpsllq+vptestmq
1557     { ISD::TRUNCATE,  MVT::v16i1,   MVT::v16i16, 3 }, // sext+vpslld+vptestmd
1558     { ISD::TRUNCATE,  MVT::v2i1,    MVT::v2i32,  2 }, // zmm vpslld+vptestmd
1559     { ISD::TRUNCATE,  MVT::v4i1,    MVT::v4i32,  2 }, // zmm vpslld+vptestmd
1560     { ISD::TRUNCATE,  MVT::v8i1,    MVT::v8i32,  2 }, // zmm vpslld+vptestmd
1561     { ISD::TRUNCATE,  MVT::v16i1,   MVT::v16i32, 2 }, // vpslld+vptestmd
1562     { ISD::TRUNCATE,  MVT::v2i1,    MVT::v2i64,  2 }, // zmm vpsllq+vptestmq
1563     { ISD::TRUNCATE,  MVT::v4i1,    MVT::v4i64,  2 }, // zmm vpsllq+vptestmq
1564     { ISD::TRUNCATE,  MVT::v8i1,    MVT::v8i64,  2 }, // vpsllq+vptestmq
1565     { ISD::TRUNCATE,  MVT::v16i8,   MVT::v16i32, 2 },
1566     { ISD::TRUNCATE,  MVT::v16i16,  MVT::v16i32, 2 },
1567     { ISD::TRUNCATE,  MVT::v8i8,    MVT::v8i64,  2 },
1568     { ISD::TRUNCATE,  MVT::v8i16,   MVT::v8i64,  2 },
1569     { ISD::TRUNCATE,  MVT::v8i32,   MVT::v8i64,  1 },
1570     { ISD::TRUNCATE,  MVT::v4i32,   MVT::v4i64,  1 }, // zmm vpmovqd
1571     { ISD::TRUNCATE,  MVT::v16i8,   MVT::v16i64, 5 },// 2*vpmovqd+concat+vpmovdb
1572 
1573     { ISD::TRUNCATE,  MVT::v16i8,  MVT::v16i16,  3 }, // extend to v16i32
1574     { ISD::TRUNCATE,  MVT::v32i8,  MVT::v32i16,  8 },
1575 
1576     // Sign extend is zmm vpternlogd+vptruncdb.
1577     // Zero extend is zmm broadcast load+vptruncdw.
1578     { ISD::SIGN_EXTEND, MVT::v2i8,   MVT::v2i1,   3 },
1579     { ISD::ZERO_EXTEND, MVT::v2i8,   MVT::v2i1,   4 },
1580     { ISD::SIGN_EXTEND, MVT::v4i8,   MVT::v4i1,   3 },
1581     { ISD::ZERO_EXTEND, MVT::v4i8,   MVT::v4i1,   4 },
1582     { ISD::SIGN_EXTEND, MVT::v8i8,   MVT::v8i1,   3 },
1583     { ISD::ZERO_EXTEND, MVT::v8i8,   MVT::v8i1,   4 },
1584     { ISD::SIGN_EXTEND, MVT::v16i8,  MVT::v16i1,  3 },
1585     { ISD::ZERO_EXTEND, MVT::v16i8,  MVT::v16i1,  4 },
1586 
1587     // Sign extend is zmm vpternlogd+vptruncdw.
1588     // Zero extend is zmm vpternlogd+vptruncdw+vpsrlw.
1589     { ISD::SIGN_EXTEND, MVT::v2i16,  MVT::v2i1,   3 },
1590     { ISD::ZERO_EXTEND, MVT::v2i16,  MVT::v2i1,   4 },
1591     { ISD::SIGN_EXTEND, MVT::v4i16,  MVT::v4i1,   3 },
1592     { ISD::ZERO_EXTEND, MVT::v4i16,  MVT::v4i1,   4 },
1593     { ISD::SIGN_EXTEND, MVT::v8i16,  MVT::v8i1,   3 },
1594     { ISD::ZERO_EXTEND, MVT::v8i16,  MVT::v8i1,   4 },
1595     { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i1,  3 },
1596     { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i1,  4 },
1597 
1598     { ISD::SIGN_EXTEND, MVT::v2i32,  MVT::v2i1,   1 }, // zmm vpternlogd
1599     { ISD::ZERO_EXTEND, MVT::v2i32,  MVT::v2i1,   2 }, // zmm vpternlogd+psrld
1600     { ISD::SIGN_EXTEND, MVT::v4i32,  MVT::v4i1,   1 }, // zmm vpternlogd
1601     { ISD::ZERO_EXTEND, MVT::v4i32,  MVT::v4i1,   2 }, // zmm vpternlogd+psrld
1602     { ISD::SIGN_EXTEND, MVT::v8i32,  MVT::v8i1,   1 }, // zmm vpternlogd
1603     { ISD::ZERO_EXTEND, MVT::v8i32,  MVT::v8i1,   2 }, // zmm vpternlogd+psrld
1604     { ISD::SIGN_EXTEND, MVT::v2i64,  MVT::v2i1,   1 }, // zmm vpternlogq
1605     { ISD::ZERO_EXTEND, MVT::v2i64,  MVT::v2i1,   2 }, // zmm vpternlogq+psrlq
1606     { ISD::SIGN_EXTEND, MVT::v4i64,  MVT::v4i1,   1 }, // zmm vpternlogq
1607     { ISD::ZERO_EXTEND, MVT::v4i64,  MVT::v4i1,   2 }, // zmm vpternlogq+psrlq
1608 
1609     { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i1,  1 }, // vpternlogd
1610     { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i1,  2 }, // vpternlogd+psrld
1611     { ISD::SIGN_EXTEND, MVT::v8i64,  MVT::v8i1,   1 }, // vpternlogq
1612     { ISD::ZERO_EXTEND, MVT::v8i64,  MVT::v8i1,   2 }, // vpternlogq+psrlq
1613 
1614     { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8,  1 },
1615     { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8,  1 },
1616     { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i16, 1 },
1617     { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i16, 1 },
1618     { ISD::SIGN_EXTEND, MVT::v8i64,  MVT::v8i8,   1 },
1619     { ISD::ZERO_EXTEND, MVT::v8i64,  MVT::v8i8,   1 },
1620     { ISD::SIGN_EXTEND, MVT::v8i64,  MVT::v8i16,  1 },
1621     { ISD::ZERO_EXTEND, MVT::v8i64,  MVT::v8i16,  1 },
1622     { ISD::SIGN_EXTEND, MVT::v8i64,  MVT::v8i32,  1 },
1623     { ISD::ZERO_EXTEND, MVT::v8i64,  MVT::v8i32,  1 },
1624 
1625     { ISD::SIGN_EXTEND, MVT::v32i16, MVT::v32i8, 3 }, // FIXME: May not be right
1626     { ISD::ZERO_EXTEND, MVT::v32i16, MVT::v32i8, 3 }, // FIXME: May not be right
1627 
1628     { ISD::SINT_TO_FP,  MVT::v8f64,  MVT::v8i1,   4 },
1629     { ISD::SINT_TO_FP,  MVT::v16f32, MVT::v16i1,  3 },
1630     { ISD::SINT_TO_FP,  MVT::v8f64,  MVT::v8i8,   2 },
1631     { ISD::SINT_TO_FP,  MVT::v16f32, MVT::v16i8,  2 },
1632     { ISD::SINT_TO_FP,  MVT::v8f64,  MVT::v8i16,  2 },
1633     { ISD::SINT_TO_FP,  MVT::v16f32, MVT::v16i16, 2 },
1634     { ISD::SINT_TO_FP,  MVT::v16f32, MVT::v16i32, 1 },
1635     { ISD::SINT_TO_FP,  MVT::v8f64,  MVT::v8i32,  1 },
1636 
1637     { ISD::UINT_TO_FP,  MVT::v8f64,  MVT::v8i1,   4 },
1638     { ISD::UINT_TO_FP,  MVT::v16f32, MVT::v16i1,  3 },
1639     { ISD::UINT_TO_FP,  MVT::v8f64,  MVT::v8i8,   2 },
1640     { ISD::UINT_TO_FP,  MVT::v16f32, MVT::v16i8,  2 },
1641     { ISD::UINT_TO_FP,  MVT::v8f64,  MVT::v8i16,  2 },
1642     { ISD::UINT_TO_FP,  MVT::v16f32, MVT::v16i16, 2 },
1643     { ISD::UINT_TO_FP,  MVT::v8f64,  MVT::v8i32,  1 },
1644     { ISD::UINT_TO_FP,  MVT::v16f32, MVT::v16i32, 1 },
1645     { ISD::UINT_TO_FP,  MVT::v8f32,  MVT::v8i64, 26 },
1646     { ISD::UINT_TO_FP,  MVT::v8f64,  MVT::v8i64,  5 },
1647 
1648     { ISD::FP_TO_SINT,  MVT::v8i8,   MVT::v8f64,  3 },
1649     { ISD::FP_TO_SINT,  MVT::v8i16,  MVT::v8f64,  3 },
1650     { ISD::FP_TO_SINT,  MVT::v16i8,  MVT::v16f32, 3 },
1651     { ISD::FP_TO_SINT,  MVT::v16i16, MVT::v16f32, 3 },
1652 
1653     { ISD::FP_TO_UINT,  MVT::v8i32,  MVT::v8f64,  1 },
1654     { ISD::FP_TO_UINT,  MVT::v8i16,  MVT::v8f64,  3 },
1655     { ISD::FP_TO_UINT,  MVT::v8i8,   MVT::v8f64,  3 },
1656     { ISD::FP_TO_UINT,  MVT::v16i32, MVT::v16f32, 1 },
1657     { ISD::FP_TO_UINT,  MVT::v16i16, MVT::v16f32, 3 },
1658     { ISD::FP_TO_UINT,  MVT::v16i8,  MVT::v16f32, 3 },
1659   };
1660 
1661   static const TypeConversionCostTblEntry AVX512BWVLConversionTbl[] {
1662     // Mask sign extend has an instruction.
1663     { ISD::SIGN_EXTEND, MVT::v2i8,   MVT::v2i1,  1 },
1664     { ISD::SIGN_EXTEND, MVT::v2i16,  MVT::v2i1,  1 },
1665     { ISD::SIGN_EXTEND, MVT::v4i8,   MVT::v4i1,  1 },
1666     { ISD::SIGN_EXTEND, MVT::v4i16,  MVT::v4i1,  1 },
1667     { ISD::SIGN_EXTEND, MVT::v8i8,   MVT::v8i1,  1 },
1668     { ISD::SIGN_EXTEND, MVT::v8i16,  MVT::v8i1,  1 },
1669     { ISD::SIGN_EXTEND, MVT::v16i8,  MVT::v16i1, 1 },
1670     { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i1, 1 },
1671     { ISD::SIGN_EXTEND, MVT::v32i8,  MVT::v32i1, 1 },
1672 
1673     // Mask zero extend is a sext + shift.
1674     { ISD::ZERO_EXTEND, MVT::v2i8,   MVT::v2i1,  2 },
1675     { ISD::ZERO_EXTEND, MVT::v2i16,  MVT::v2i1,  2 },
1676     { ISD::ZERO_EXTEND, MVT::v4i8,   MVT::v4i1,  2 },
1677     { ISD::ZERO_EXTEND, MVT::v4i16,  MVT::v4i1,  2 },
1678     { ISD::ZERO_EXTEND, MVT::v8i8,   MVT::v8i1,  2 },
1679     { ISD::ZERO_EXTEND, MVT::v8i16,  MVT::v8i1,  2 },
1680     { ISD::ZERO_EXTEND, MVT::v16i8,  MVT::v16i1, 2 },
1681     { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i1, 2 },
1682     { ISD::ZERO_EXTEND, MVT::v32i8,  MVT::v32i1, 2 },
1683 
1684     { ISD::TRUNCATE,    MVT::v16i8,  MVT::v16i16, 2 },
1685     { ISD::TRUNCATE,    MVT::v2i1,   MVT::v2i8,   2 }, // vpsllw+vptestmb
1686     { ISD::TRUNCATE,    MVT::v2i1,   MVT::v2i16,  2 }, // vpsllw+vptestmw
1687     { ISD::TRUNCATE,    MVT::v4i1,   MVT::v4i8,   2 }, // vpsllw+vptestmb
1688     { ISD::TRUNCATE,    MVT::v4i1,   MVT::v4i16,  2 }, // vpsllw+vptestmw
1689     { ISD::TRUNCATE,    MVT::v8i1,   MVT::v8i8,   2 }, // vpsllw+vptestmb
1690     { ISD::TRUNCATE,    MVT::v8i1,   MVT::v8i16,  2 }, // vpsllw+vptestmw
1691     { ISD::TRUNCATE,    MVT::v16i1,  MVT::v16i8,  2 }, // vpsllw+vptestmb
1692     { ISD::TRUNCATE,    MVT::v16i1,  MVT::v16i16, 2 }, // vpsllw+vptestmw
1693     { ISD::TRUNCATE,    MVT::v32i1,  MVT::v32i8,  2 }, // vpsllw+vptestmb
1694   };
1695 
1696   static const TypeConversionCostTblEntry AVX512DQVLConversionTbl[] = {
1697     { ISD::SINT_TO_FP,  MVT::v2f32,  MVT::v2i64,  1 },
1698     { ISD::SINT_TO_FP,  MVT::v2f64,  MVT::v2i64,  1 },
1699     { ISD::SINT_TO_FP,  MVT::v4f32,  MVT::v4i64,  1 },
1700     { ISD::SINT_TO_FP,  MVT::v4f64,  MVT::v4i64,  1 },
1701 
1702     { ISD::UINT_TO_FP,  MVT::v2f32,  MVT::v2i64,  1 },
1703     { ISD::UINT_TO_FP,  MVT::v2f64,  MVT::v2i64,  1 },
1704     { ISD::UINT_TO_FP,  MVT::v4f32,  MVT::v4i64,  1 },
1705     { ISD::UINT_TO_FP,  MVT::v4f64,  MVT::v4i64,  1 },
1706 
1707     { ISD::FP_TO_SINT,  MVT::v2i64,  MVT::v2f32,  1 },
1708     { ISD::FP_TO_SINT,  MVT::v4i64,  MVT::v4f32,  1 },
1709     { ISD::FP_TO_SINT,  MVT::v2i64,  MVT::v2f64,  1 },
1710     { ISD::FP_TO_SINT,  MVT::v4i64,  MVT::v4f64,  1 },
1711 
1712     { ISD::FP_TO_UINT,  MVT::v2i64,  MVT::v2f32,  1 },
1713     { ISD::FP_TO_UINT,  MVT::v4i64,  MVT::v4f32,  1 },
1714     { ISD::FP_TO_UINT,  MVT::v2i64,  MVT::v2f64,  1 },
1715     { ISD::FP_TO_UINT,  MVT::v4i64,  MVT::v4f64,  1 },
1716   };
1717 
1718   static const TypeConversionCostTblEntry AVX512VLConversionTbl[] = {
1719     { ISD::TRUNCATE,  MVT::v2i1,    MVT::v2i8,   3 }, // sext+vpslld+vptestmd
1720     { ISD::TRUNCATE,  MVT::v4i1,    MVT::v4i8,   3 }, // sext+vpslld+vptestmd
1721     { ISD::TRUNCATE,  MVT::v8i1,    MVT::v8i8,   3 }, // sext+vpslld+vptestmd
1722     { ISD::TRUNCATE,  MVT::v16i1,   MVT::v16i8,  8 }, // split+2*v8i8
1723     { ISD::TRUNCATE,  MVT::v2i1,    MVT::v2i16,  3 }, // sext+vpsllq+vptestmq
1724     { ISD::TRUNCATE,  MVT::v4i1,    MVT::v4i16,  3 }, // sext+vpsllq+vptestmq
1725     { ISD::TRUNCATE,  MVT::v8i1,    MVT::v8i16,  3 }, // sext+vpsllq+vptestmq
1726     { ISD::TRUNCATE,  MVT::v16i1,   MVT::v16i16, 8 }, // split+2*v8i16
1727     { ISD::TRUNCATE,  MVT::v2i1,    MVT::v2i32,  2 }, // vpslld+vptestmd
1728     { ISD::TRUNCATE,  MVT::v4i1,    MVT::v4i32,  2 }, // vpslld+vptestmd
1729     { ISD::TRUNCATE,  MVT::v8i1,    MVT::v8i32,  2 }, // vpslld+vptestmd
1730     { ISD::TRUNCATE,  MVT::v2i1,    MVT::v2i64,  2 }, // vpsllq+vptestmq
1731     { ISD::TRUNCATE,  MVT::v4i1,    MVT::v4i64,  2 }, // vpsllq+vptestmq
1732     { ISD::TRUNCATE,  MVT::v4i32,   MVT::v4i64,  1 }, // vpmovqd
1733     { ISD::TRUNCATE,  MVT::v4i8,    MVT::v4i64,  2 }, // vpmovqb
1734     { ISD::TRUNCATE,  MVT::v4i16,   MVT::v4i64,  2 }, // vpmovqw
1735     { ISD::TRUNCATE,  MVT::v8i8,    MVT::v8i32,  2 }, // vpmovwb
1736 
1737     // sign extend is vpcmpeq+maskedmove+vpmovdw+vpacksswb
1738     // zero extend is vpcmpeq+maskedmove+vpmovdw+vpsrlw+vpackuswb
1739     { ISD::SIGN_EXTEND, MVT::v2i8,   MVT::v2i1,   5 },
1740     { ISD::ZERO_EXTEND, MVT::v2i8,   MVT::v2i1,   6 },
1741     { ISD::SIGN_EXTEND, MVT::v4i8,   MVT::v4i1,   5 },
1742     { ISD::ZERO_EXTEND, MVT::v4i8,   MVT::v4i1,   6 },
1743     { ISD::SIGN_EXTEND, MVT::v8i8,   MVT::v8i1,   5 },
1744     { ISD::ZERO_EXTEND, MVT::v8i8,   MVT::v8i1,   6 },
1745     { ISD::SIGN_EXTEND, MVT::v16i8,  MVT::v16i1, 10 },
1746     { ISD::ZERO_EXTEND, MVT::v16i8,  MVT::v16i1, 12 },
1747 
1748     // sign extend is vpcmpeq+maskedmove+vpmovdw
1749     // zero extend is vpcmpeq+maskedmove+vpmovdw+vpsrlw
1750     { ISD::SIGN_EXTEND, MVT::v2i16,  MVT::v2i1,   4 },
1751     { ISD::ZERO_EXTEND, MVT::v2i16,  MVT::v2i1,   5 },
1752     { ISD::SIGN_EXTEND, MVT::v4i16,  MVT::v4i1,   4 },
1753     { ISD::ZERO_EXTEND, MVT::v4i16,  MVT::v4i1,   5 },
1754     { ISD::SIGN_EXTEND, MVT::v8i16,  MVT::v8i1,   4 },
1755     { ISD::ZERO_EXTEND, MVT::v8i16,  MVT::v8i1,   5 },
1756     { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i1, 10 },
1757     { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i1, 12 },
1758 
1759     { ISD::SIGN_EXTEND, MVT::v2i32,  MVT::v2i1,   1 }, // vpternlogd
1760     { ISD::ZERO_EXTEND, MVT::v2i32,  MVT::v2i1,   2 }, // vpternlogd+psrld
1761     { ISD::SIGN_EXTEND, MVT::v4i32,  MVT::v4i1,   1 }, // vpternlogd
1762     { ISD::ZERO_EXTEND, MVT::v4i32,  MVT::v4i1,   2 }, // vpternlogd+psrld
1763     { ISD::SIGN_EXTEND, MVT::v8i32,  MVT::v8i1,   1 }, // vpternlogd
1764     { ISD::ZERO_EXTEND, MVT::v8i32,  MVT::v8i1,   2 }, // vpternlogd+psrld
1765     { ISD::SIGN_EXTEND, MVT::v2i64,  MVT::v2i1,   1 }, // vpternlogq
1766     { ISD::ZERO_EXTEND, MVT::v2i64,  MVT::v2i1,   2 }, // vpternlogq+psrlq
1767     { ISD::SIGN_EXTEND, MVT::v4i64,  MVT::v4i1,   1 }, // vpternlogq
1768     { ISD::ZERO_EXTEND, MVT::v4i64,  MVT::v4i1,   2 }, // vpternlogq+psrlq
1769 
1770     { ISD::UINT_TO_FP,  MVT::v2f64,  MVT::v2i8,   2 },
1771     { ISD::UINT_TO_FP,  MVT::v4f64,  MVT::v4i8,   2 },
1772     { ISD::UINT_TO_FP,  MVT::v8f32,  MVT::v8i8,   2 },
1773     { ISD::UINT_TO_FP,  MVT::v2f64,  MVT::v2i16,  5 },
1774     { ISD::UINT_TO_FP,  MVT::v4f64,  MVT::v4i16,  2 },
1775     { ISD::UINT_TO_FP,  MVT::v8f32,  MVT::v8i16,  2 },
1776     { ISD::UINT_TO_FP,  MVT::v2f32,  MVT::v2i32,  2 },
1777     { ISD::UINT_TO_FP,  MVT::v2f64,  MVT::v2i32,  1 },
1778     { ISD::UINT_TO_FP,  MVT::v4f32,  MVT::v4i32,  1 },
1779     { ISD::UINT_TO_FP,  MVT::v4f64,  MVT::v4i32,  1 },
1780     { ISD::UINT_TO_FP,  MVT::v8f32,  MVT::v8i32,  1 },
1781     { ISD::UINT_TO_FP,  MVT::v2f32,  MVT::v2i64,  5 },
1782     { ISD::UINT_TO_FP,  MVT::v2f64,  MVT::v2i64,  5 },
1783     { ISD::UINT_TO_FP,  MVT::v4f64,  MVT::v4i64,  5 },
1784 
1785     { ISD::UINT_TO_FP,  MVT::f32,    MVT::i64,    1 },
1786     { ISD::UINT_TO_FP,  MVT::f64,    MVT::i64,    1 },
1787 
1788     { ISD::FP_TO_SINT,  MVT::v8i8,   MVT::v8f32,  3 },
1789     { ISD::FP_TO_UINT,  MVT::v8i8,   MVT::v8f32,  3 },
1790 
1791     { ISD::FP_TO_UINT,  MVT::i64,    MVT::f32,    1 },
1792     { ISD::FP_TO_UINT,  MVT::i64,    MVT::f64,    1 },
1793 
1794     { ISD::FP_TO_UINT,  MVT::v2i32,  MVT::v2f32,  1 },
1795     { ISD::FP_TO_UINT,  MVT::v4i32,  MVT::v4f32,  1 },
1796     { ISD::FP_TO_UINT,  MVT::v2i32,  MVT::v2f64,  1 },
1797     { ISD::FP_TO_UINT,  MVT::v4i32,  MVT::v4f64,  1 },
1798     { ISD::FP_TO_UINT,  MVT::v8i32,  MVT::v8f32,  1 },
1799   };
1800 
1801   static const TypeConversionCostTblEntry AVX2ConversionTbl[] = {
1802     { ISD::SIGN_EXTEND, MVT::v4i64,  MVT::v4i1,   3 },
1803     { ISD::ZERO_EXTEND, MVT::v4i64,  MVT::v4i1,   3 },
1804     { ISD::SIGN_EXTEND, MVT::v8i32,  MVT::v8i1,   3 },
1805     { ISD::ZERO_EXTEND, MVT::v8i32,  MVT::v8i1,   3 },
1806     { ISD::SIGN_EXTEND, MVT::v4i64,  MVT::v4i8,   1 },
1807     { ISD::ZERO_EXTEND, MVT::v4i64,  MVT::v4i8,   1 },
1808     { ISD::SIGN_EXTEND, MVT::v8i32,  MVT::v8i8,   1 },
1809     { ISD::ZERO_EXTEND, MVT::v8i32,  MVT::v8i8,   1 },
1810     { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i1,  1 },
1811     { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i1,  1 },
1812     { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8,  1 },
1813     { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8,  1 },
1814     { ISD::SIGN_EXTEND, MVT::v4i64,  MVT::v4i16,  1 },
1815     { ISD::ZERO_EXTEND, MVT::v4i64,  MVT::v4i16,  1 },
1816     { ISD::SIGN_EXTEND, MVT::v8i32,  MVT::v8i16,  1 },
1817     { ISD::ZERO_EXTEND, MVT::v8i32,  MVT::v8i16,  1 },
1818     { ISD::SIGN_EXTEND, MVT::v4i64,  MVT::v4i32,  1 },
1819     { ISD::ZERO_EXTEND, MVT::v4i64,  MVT::v4i32,  1 },
1820     { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i16, 3 },
1821     { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i16, 3 },
1822 
1823     { ISD::TRUNCATE,    MVT::v4i32,  MVT::v4i64,  2 },
1824     { ISD::TRUNCATE,    MVT::v8i1,   MVT::v8i32,  2 },
1825     { ISD::TRUNCATE,    MVT::v8i16,  MVT::v8i32,  2 },
1826 
1827     { ISD::FP_EXTEND,   MVT::v8f64,  MVT::v8f32,  3 },
1828     { ISD::FP_ROUND,    MVT::v8f32,  MVT::v8f64,  3 },
1829 
1830     { ISD::UINT_TO_FP,  MVT::v8f32,  MVT::v8i32,  5 },
1831   };
1832 
1833   static const TypeConversionCostTblEntry AVXConversionTbl[] = {
1834     { ISD::SIGN_EXTEND, MVT::v4i64,  MVT::v4i1,  6 },
1835     { ISD::ZERO_EXTEND, MVT::v4i64,  MVT::v4i1,  4 },
1836     { ISD::SIGN_EXTEND, MVT::v8i32,  MVT::v8i1,  7 },
1837     { ISD::ZERO_EXTEND, MVT::v8i32,  MVT::v8i1,  4 },
1838     { ISD::SIGN_EXTEND, MVT::v4i64,  MVT::v4i8,  4 },
1839     { ISD::ZERO_EXTEND, MVT::v4i64,  MVT::v4i8,  4 },
1840     { ISD::SIGN_EXTEND, MVT::v8i32,  MVT::v8i8,  4 },
1841     { ISD::ZERO_EXTEND, MVT::v8i32,  MVT::v8i8,  4 },
1842     { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i1, 4 },
1843     { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i1, 4 },
1844     { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 4 },
1845     { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 4 },
1846     { ISD::SIGN_EXTEND, MVT::v4i64,  MVT::v4i16, 4 },
1847     { ISD::ZERO_EXTEND, MVT::v4i64,  MVT::v4i16, 3 },
1848     { ISD::SIGN_EXTEND, MVT::v8i32,  MVT::v8i16, 4 },
1849     { ISD::ZERO_EXTEND, MVT::v8i32,  MVT::v8i16, 4 },
1850     { ISD::SIGN_EXTEND, MVT::v4i64,  MVT::v4i32, 4 },
1851     { ISD::ZERO_EXTEND, MVT::v4i64,  MVT::v4i32, 4 },
1852 
1853     { ISD::TRUNCATE,    MVT::v4i1,  MVT::v4i64,  4 },
1854     { ISD::TRUNCATE,    MVT::v8i1,  MVT::v8i32,  5 },
1855     { ISD::TRUNCATE,    MVT::v16i1, MVT::v16i16, 4 },
1856     { ISD::TRUNCATE,    MVT::v8i1,  MVT::v8i64,  9 },
1857     { ISD::TRUNCATE,    MVT::v16i1, MVT::v16i64, 11 },
1858 
1859     { ISD::TRUNCATE,    MVT::v16i8, MVT::v16i16, 4 },
1860     { ISD::TRUNCATE,    MVT::v8i8,  MVT::v8i32,  4 },
1861     { ISD::TRUNCATE,    MVT::v8i16, MVT::v8i32,  5 },
1862     { ISD::TRUNCATE,    MVT::v4i8,  MVT::v4i64,  4 },
1863     { ISD::TRUNCATE,    MVT::v4i16, MVT::v4i64,  4 },
1864     { ISD::TRUNCATE,    MVT::v4i32, MVT::v4i64,  2 },
1865     { ISD::TRUNCATE,    MVT::v8i8,  MVT::v8i64, 11 },
1866     { ISD::TRUNCATE,    MVT::v8i16, MVT::v8i64,  9 },
1867     { ISD::TRUNCATE,    MVT::v8i32, MVT::v8i64,  3 },
1868     { ISD::TRUNCATE,    MVT::v16i8, MVT::v16i64, 11 },
1869 
1870     { ISD::SINT_TO_FP,  MVT::v4f32, MVT::v4i1,  3 },
1871     { ISD::SINT_TO_FP,  MVT::v4f64, MVT::v4i1,  3 },
1872     { ISD::SINT_TO_FP,  MVT::v8f32, MVT::v8i1,  8 },
1873     { ISD::SINT_TO_FP,  MVT::v4f32, MVT::v4i8,  3 },
1874     { ISD::SINT_TO_FP,  MVT::v4f64, MVT::v4i8,  3 },
1875     { ISD::SINT_TO_FP,  MVT::v8f32, MVT::v8i8,  8 },
1876     { ISD::SINT_TO_FP,  MVT::v4f32, MVT::v4i16, 3 },
1877     { ISD::SINT_TO_FP,  MVT::v4f64, MVT::v4i16, 3 },
1878     { ISD::SINT_TO_FP,  MVT::v8f32, MVT::v8i16, 5 },
1879     { ISD::SINT_TO_FP,  MVT::v4f32, MVT::v4i32, 1 },
1880     { ISD::SINT_TO_FP,  MVT::v4f64, MVT::v4i32, 1 },
1881     { ISD::SINT_TO_FP,  MVT::v8f32, MVT::v8i32, 1 },
1882 
1883     { ISD::UINT_TO_FP,  MVT::v4f32, MVT::v4i1,  7 },
1884     { ISD::UINT_TO_FP,  MVT::v4f64, MVT::v4i1,  7 },
1885     { ISD::UINT_TO_FP,  MVT::v8f32, MVT::v8i1,  6 },
1886     { ISD::UINT_TO_FP,  MVT::v4f32, MVT::v4i8,  2 },
1887     { ISD::UINT_TO_FP,  MVT::v4f64, MVT::v4i8,  2 },
1888     { ISD::UINT_TO_FP,  MVT::v8f32, MVT::v8i8,  5 },
1889     { ISD::UINT_TO_FP,  MVT::v4f32, MVT::v4i16, 2 },
1890     { ISD::UINT_TO_FP,  MVT::v4f64, MVT::v4i16, 2 },
1891     { ISD::UINT_TO_FP,  MVT::v8f32, MVT::v8i16, 5 },
1892     { ISD::UINT_TO_FP,  MVT::v2f64, MVT::v2i32, 6 },
1893     { ISD::UINT_TO_FP,  MVT::v4f32, MVT::v4i32, 6 },
1894     { ISD::UINT_TO_FP,  MVT::v4f64, MVT::v4i32, 6 },
1895     { ISD::UINT_TO_FP,  MVT::v8f32, MVT::v8i32, 8 },
1896     { ISD::UINT_TO_FP,  MVT::v2f64, MVT::v2i64, 5 },
1897     { ISD::UINT_TO_FP,  MVT::v4f64, MVT::v4i64, 6 },
1898     // The generic code to compute the scalar overhead is currently broken.
1899     // Workaround this limitation by estimating the scalarization overhead
1900     // here. We have roughly 10 instructions per scalar element.
1901     // Multiply that by the vector width.
1902     // FIXME: remove that when PR19268 is fixed.
1903     { ISD::SINT_TO_FP,  MVT::v4f64, MVT::v4i64, 13 },
1904     { ISD::SINT_TO_FP,  MVT::v4f64, MVT::v4i64, 13 },
1905 
1906     { ISD::FP_TO_SINT,  MVT::v8i8,  MVT::v8f32, 4 },
1907     { ISD::FP_TO_SINT,  MVT::v4i8,  MVT::v4f64, 3 },
1908     { ISD::FP_TO_SINT,  MVT::v4i16, MVT::v4f64, 2 },
1909     { ISD::FP_TO_SINT,  MVT::v8i16, MVT::v8f32, 3 },
1910 
1911     { ISD::FP_TO_UINT,  MVT::v4i8,  MVT::v4f64, 3 },
1912     { ISD::FP_TO_UINT,  MVT::v4i16, MVT::v4f64, 2 },
1913     { ISD::FP_TO_UINT,  MVT::v8i8,  MVT::v8f32, 4 },
1914     { ISD::FP_TO_UINT,  MVT::v8i16, MVT::v8f32, 3 },
1915     { ISD::FP_TO_UINT,  MVT::v8i32, MVT::v8f32, 9 },
1916     // This node is expanded into scalarized operations but BasicTTI is overly
1917     // optimistic estimating its cost.  It computes 3 per element (one
1918     // vector-extract, one scalar conversion and one vector-insert).  The
1919     // problem is that the inserts form a read-modify-write chain so latency
1920     // should be factored in too.  Inflating the cost per element by 1.
1921     { ISD::FP_TO_UINT,  MVT::v4i32, MVT::v4f64, 4*4 },
1922 
1923     { ISD::FP_EXTEND,   MVT::v4f64,  MVT::v4f32,  1 },
1924     { ISD::FP_ROUND,    MVT::v4f32,  MVT::v4f64,  1 },
1925   };
1926 
1927   static const TypeConversionCostTblEntry SSE41ConversionTbl[] = {
1928     { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i8,    2 },
1929     { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8,    2 },
1930     { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16,   2 },
1931     { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16,   2 },
1932     { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32,   2 },
1933     { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32,   2 },
1934 
1935     { ISD::ZERO_EXTEND, MVT::v4i16,  MVT::v4i8,   1 },
1936     { ISD::SIGN_EXTEND, MVT::v4i16,  MVT::v4i8,   2 },
1937     { ISD::ZERO_EXTEND, MVT::v4i32,  MVT::v4i8,   1 },
1938     { ISD::SIGN_EXTEND, MVT::v4i32,  MVT::v4i8,   1 },
1939     { ISD::ZERO_EXTEND, MVT::v8i16,  MVT::v8i8,   1 },
1940     { ISD::SIGN_EXTEND, MVT::v8i16,  MVT::v8i8,   1 },
1941     { ISD::ZERO_EXTEND, MVT::v8i32,  MVT::v8i8,   2 },
1942     { ISD::SIGN_EXTEND, MVT::v8i32,  MVT::v8i8,   2 },
1943     { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8,  2 },
1944     { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8,  2 },
1945     { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8,  4 },
1946     { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8,  4 },
1947     { ISD::ZERO_EXTEND, MVT::v4i32,  MVT::v4i16,  1 },
1948     { ISD::SIGN_EXTEND, MVT::v4i32,  MVT::v4i16,  1 },
1949     { ISD::ZERO_EXTEND, MVT::v8i32,  MVT::v8i16,  2 },
1950     { ISD::SIGN_EXTEND, MVT::v8i32,  MVT::v8i16,  2 },
1951     { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i16, 4 },
1952     { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i16, 4 },
1953 
1954     // These truncates end up widening elements.
1955     { ISD::TRUNCATE,    MVT::v2i1,   MVT::v2i8,   1 }, // PMOVXZBQ
1956     { ISD::TRUNCATE,    MVT::v2i1,   MVT::v2i16,  1 }, // PMOVXZWQ
1957     { ISD::TRUNCATE,    MVT::v4i1,   MVT::v4i8,   1 }, // PMOVXZBD
1958 
1959     { ISD::TRUNCATE,    MVT::v2i8,   MVT::v2i16,  1 },
1960     { ISD::TRUNCATE,    MVT::v4i8,   MVT::v4i16,  1 },
1961     { ISD::TRUNCATE,    MVT::v8i8,   MVT::v8i16,  1 },
1962     { ISD::TRUNCATE,    MVT::v4i8,   MVT::v4i32,  1 },
1963     { ISD::TRUNCATE,    MVT::v4i16,  MVT::v4i32,  1 },
1964     { ISD::TRUNCATE,    MVT::v8i8,   MVT::v8i32,  3 },
1965     { ISD::TRUNCATE,    MVT::v8i16,  MVT::v8i32,  3 },
1966     { ISD::TRUNCATE,    MVT::v16i16, MVT::v16i32, 6 },
1967     { ISD::TRUNCATE,    MVT::v2i8,   MVT::v2i64,  1 }, // PSHUFB
1968 
1969     { ISD::UINT_TO_FP,  MVT::f32,    MVT::i64,    4 },
1970     { ISD::UINT_TO_FP,  MVT::f64,    MVT::i64,    4 },
1971 
1972     { ISD::FP_TO_SINT,  MVT::v2i8,   MVT::v2f32,  3 },
1973     { ISD::FP_TO_SINT,  MVT::v2i8,   MVT::v2f64,  3 },
1974 
1975     { ISD::FP_TO_UINT,  MVT::v2i8,   MVT::v2f32,  3 },
1976     { ISD::FP_TO_UINT,  MVT::v2i8,   MVT::v2f64,  3 },
1977     { ISD::FP_TO_UINT,  MVT::v4i16,  MVT::v4f32,  2 },
1978   };
1979 
1980   static const TypeConversionCostTblEntry SSE2ConversionTbl[] = {
1981     // These are somewhat magic numbers justified by looking at the output of
1982     // Intel's IACA, running some kernels and making sure when we take
1983     // legalization into account the throughput will be overestimated.
1984     { ISD::SINT_TO_FP, MVT::v4f32, MVT::v16i8, 8 },
1985     { ISD::SINT_TO_FP, MVT::v2f64, MVT::v16i8, 16*10 },
1986     { ISD::SINT_TO_FP, MVT::v4f32, MVT::v8i16, 15 },
1987     { ISD::SINT_TO_FP, MVT::v2f64, MVT::v8i16, 8*10 },
1988     { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 5 },
1989     { ISD::SINT_TO_FP, MVT::v2f64, MVT::v4i32, 2*10 },
1990     { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i32, 2*10 },
1991     { ISD::SINT_TO_FP, MVT::v4f32, MVT::v2i64, 15 },
1992     { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i64, 2*10 },
1993 
1994     { ISD::UINT_TO_FP, MVT::v2f64, MVT::v16i8, 16*10 },
1995     { ISD::UINT_TO_FP, MVT::v4f32, MVT::v16i8, 8 },
1996     { ISD::UINT_TO_FP, MVT::v4f32, MVT::v8i16, 15 },
1997     { ISD::UINT_TO_FP, MVT::v2f64, MVT::v8i16, 8*10 },
1998     { ISD::UINT_TO_FP, MVT::v2f64, MVT::v4i32, 4*10 },
1999     { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 5 },
2000     { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 6 },
2001     { ISD::UINT_TO_FP, MVT::v4f32, MVT::v2i64, 15 },
2002 
2003     { ISD::FP_TO_SINT,  MVT::v2i8,   MVT::v2f32,  4 },
2004     { ISD::FP_TO_SINT,  MVT::v2i16,  MVT::v2f32,  2 },
2005     { ISD::FP_TO_SINT,  MVT::v4i8,   MVT::v4f32,  3 },
2006     { ISD::FP_TO_SINT,  MVT::v4i16,  MVT::v4f32,  2 },
2007     { ISD::FP_TO_SINT,  MVT::v2i16,  MVT::v2f64,  2 },
2008     { ISD::FP_TO_SINT,  MVT::v2i8,   MVT::v2f64,  4 },
2009 
2010     { ISD::FP_TO_SINT,  MVT::v2i32,  MVT::v2f64,  1 },
2011 
2012     { ISD::UINT_TO_FP,  MVT::f32,    MVT::i64,    6 },
2013     { ISD::UINT_TO_FP,  MVT::f64,    MVT::i64,    6 },
2014 
2015     { ISD::FP_TO_UINT,  MVT::i64,    MVT::f32,    4 },
2016     { ISD::FP_TO_UINT,  MVT::i64,    MVT::f64,    4 },
2017     { ISD::FP_TO_UINT,  MVT::v2i8,   MVT::v2f32,  4 },
2018     { ISD::FP_TO_UINT,  MVT::v2i8,   MVT::v2f64,  4 },
2019     { ISD::FP_TO_UINT,  MVT::v4i8,   MVT::v4f32,  3 },
2020     { ISD::FP_TO_UINT,  MVT::v2i16,  MVT::v2f32,  2 },
2021     { ISD::FP_TO_UINT,  MVT::v2i16,  MVT::v2f64,  2 },
2022     { ISD::FP_TO_UINT,  MVT::v4i16,  MVT::v4f32,  4 },
2023     { ISD::FP_TO_UINT,  MVT::v4i32,  MVT::v4f32,  8 },
2024 
2025     { ISD::ZERO_EXTEND, MVT::v4i16,  MVT::v4i8,   1 },
2026     { ISD::SIGN_EXTEND, MVT::v4i16,  MVT::v4i8,   6 },
2027     { ISD::ZERO_EXTEND, MVT::v4i32,  MVT::v4i8,   2 },
2028     { ISD::SIGN_EXTEND, MVT::v4i32,  MVT::v4i8,   3 },
2029     { ISD::ZERO_EXTEND, MVT::v4i64,  MVT::v4i8,   4 },
2030     { ISD::SIGN_EXTEND, MVT::v4i64,  MVT::v4i8,   8 },
2031     { ISD::ZERO_EXTEND, MVT::v8i16,  MVT::v8i8,   1 },
2032     { ISD::SIGN_EXTEND, MVT::v8i16,  MVT::v8i8,   2 },
2033     { ISD::ZERO_EXTEND, MVT::v8i32,  MVT::v8i8,   6 },
2034     { ISD::SIGN_EXTEND, MVT::v8i32,  MVT::v8i8,   6 },
2035     { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8,  3 },
2036     { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8,  4 },
2037     { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8,  9 },
2038     { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8,  12 },
2039     { ISD::ZERO_EXTEND, MVT::v4i32,  MVT::v4i16,  1 },
2040     { ISD::SIGN_EXTEND, MVT::v4i32,  MVT::v4i16,  2 },
2041     { ISD::ZERO_EXTEND, MVT::v4i64,  MVT::v4i16,  3 },
2042     { ISD::SIGN_EXTEND, MVT::v4i64,  MVT::v4i16,  10 },
2043     { ISD::ZERO_EXTEND, MVT::v8i32,  MVT::v8i16,  3 },
2044     { ISD::SIGN_EXTEND, MVT::v8i32,  MVT::v8i16,  4 },
2045     { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i16, 6 },
2046     { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i16, 8 },
2047     { ISD::ZERO_EXTEND, MVT::v4i64,  MVT::v4i32,  3 },
2048     { ISD::SIGN_EXTEND, MVT::v4i64,  MVT::v4i32,  5 },
2049 
2050     // These truncates are really widening elements.
2051     { ISD::TRUNCATE,    MVT::v2i1,   MVT::v2i32,  1 }, // PSHUFD
2052     { ISD::TRUNCATE,    MVT::v2i1,   MVT::v2i16,  2 }, // PUNPCKLWD+DQ
2053     { ISD::TRUNCATE,    MVT::v2i1,   MVT::v2i8,   3 }, // PUNPCKLBW+WD+PSHUFD
2054     { ISD::TRUNCATE,    MVT::v4i1,   MVT::v4i16,  1 }, // PUNPCKLWD
2055     { ISD::TRUNCATE,    MVT::v4i1,   MVT::v4i8,   2 }, // PUNPCKLBW+WD
2056     { ISD::TRUNCATE,    MVT::v8i1,   MVT::v8i8,   1 }, // PUNPCKLBW
2057 
2058     { ISD::TRUNCATE,    MVT::v2i8,   MVT::v2i16,  2 }, // PAND+PACKUSWB
2059     { ISD::TRUNCATE,    MVT::v4i8,   MVT::v4i16,  2 }, // PAND+PACKUSWB
2060     { ISD::TRUNCATE,    MVT::v8i8,   MVT::v8i16,  2 }, // PAND+PACKUSWB
2061     { ISD::TRUNCATE,    MVT::v16i8,  MVT::v16i16, 3 },
2062     { ISD::TRUNCATE,    MVT::v2i8,   MVT::v2i32,  3 }, // PAND+2*PACKUSWB
2063     { ISD::TRUNCATE,    MVT::v2i16,  MVT::v2i32,  1 },
2064     { ISD::TRUNCATE,    MVT::v4i8,   MVT::v4i32,  3 },
2065     { ISD::TRUNCATE,    MVT::v4i16,  MVT::v4i32,  3 },
2066     { ISD::TRUNCATE,    MVT::v8i8,   MVT::v8i32,  4 },
2067     { ISD::TRUNCATE,    MVT::v16i8,  MVT::v16i32, 7 },
2068     { ISD::TRUNCATE,    MVT::v8i16,  MVT::v8i32,  5 },
2069     { ISD::TRUNCATE,    MVT::v16i16, MVT::v16i32, 10 },
2070     { ISD::TRUNCATE,    MVT::v2i8,   MVT::v2i64,  4 }, // PAND+3*PACKUSWB
2071     { ISD::TRUNCATE,    MVT::v2i16,  MVT::v2i64,  2 }, // PSHUFD+PSHUFLW
2072     { ISD::TRUNCATE,    MVT::v2i32,  MVT::v2i64,  1 }, // PSHUFD
2073   };
2074 
2075   std::pair<InstructionCost, MVT> LTSrc = TLI->getTypeLegalizationCost(DL, Src);
2076   std::pair<InstructionCost, MVT> LTDest =
2077       TLI->getTypeLegalizationCost(DL, Dst);
2078 
2079   if (ST->hasSSE41() && !ST->hasAVX())
2080     if (const auto *Entry = ConvertCostTableLookup(SSE41ConversionTbl, ISD,
2081                                                    LTDest.second, LTSrc.second))
2082       return AdjustCost(LTSrc.first * Entry->Cost);
2083 
2084   if (ST->hasSSE2() && !ST->hasAVX())
2085     if (const auto *Entry = ConvertCostTableLookup(SSE2ConversionTbl, ISD,
2086                                                    LTDest.second, LTSrc.second))
2087       return AdjustCost(LTSrc.first * Entry->Cost);
2088 
2089   EVT SrcTy = TLI->getValueType(DL, Src);
2090   EVT DstTy = TLI->getValueType(DL, Dst);
2091 
2092   // The function getSimpleVT only handles simple value types.
2093   if (!SrcTy.isSimple() || !DstTy.isSimple())
2094     return AdjustCost(BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind));
2095 
2096   MVT SimpleSrcTy = SrcTy.getSimpleVT();
2097   MVT SimpleDstTy = DstTy.getSimpleVT();
2098 
2099   if (ST->useAVX512Regs()) {
2100     if (ST->hasBWI())
2101       if (const auto *Entry = ConvertCostTableLookup(AVX512BWConversionTbl, ISD,
2102                                                      SimpleDstTy, SimpleSrcTy))
2103         return AdjustCost(Entry->Cost);
2104 
2105     if (ST->hasDQI())
2106       if (const auto *Entry = ConvertCostTableLookup(AVX512DQConversionTbl, ISD,
2107                                                      SimpleDstTy, SimpleSrcTy))
2108         return AdjustCost(Entry->Cost);
2109 
2110     if (ST->hasAVX512())
2111       if (const auto *Entry = ConvertCostTableLookup(AVX512FConversionTbl, ISD,
2112                                                      SimpleDstTy, SimpleSrcTy))
2113         return AdjustCost(Entry->Cost);
2114   }
2115 
2116   if (ST->hasBWI())
2117     if (const auto *Entry = ConvertCostTableLookup(AVX512BWVLConversionTbl, ISD,
2118                                                    SimpleDstTy, SimpleSrcTy))
2119       return AdjustCost(Entry->Cost);
2120 
2121   if (ST->hasDQI())
2122     if (const auto *Entry = ConvertCostTableLookup(AVX512DQVLConversionTbl, ISD,
2123                                                    SimpleDstTy, SimpleSrcTy))
2124       return AdjustCost(Entry->Cost);
2125 
2126   if (ST->hasAVX512())
2127     if (const auto *Entry = ConvertCostTableLookup(AVX512VLConversionTbl, ISD,
2128                                                    SimpleDstTy, SimpleSrcTy))
2129       return AdjustCost(Entry->Cost);
2130 
2131   if (ST->hasAVX2()) {
2132     if (const auto *Entry = ConvertCostTableLookup(AVX2ConversionTbl, ISD,
2133                                                    SimpleDstTy, SimpleSrcTy))
2134       return AdjustCost(Entry->Cost);
2135   }
2136 
2137   if (ST->hasAVX()) {
2138     if (const auto *Entry = ConvertCostTableLookup(AVXConversionTbl, ISD,
2139                                                    SimpleDstTy, SimpleSrcTy))
2140       return AdjustCost(Entry->Cost);
2141   }
2142 
2143   if (ST->hasSSE41()) {
2144     if (const auto *Entry = ConvertCostTableLookup(SSE41ConversionTbl, ISD,
2145                                                    SimpleDstTy, SimpleSrcTy))
2146       return AdjustCost(Entry->Cost);
2147   }
2148 
2149   if (ST->hasSSE2()) {
2150     if (const auto *Entry = ConvertCostTableLookup(SSE2ConversionTbl, ISD,
2151                                                    SimpleDstTy, SimpleSrcTy))
2152       return AdjustCost(Entry->Cost);
2153   }
2154 
2155   return AdjustCost(
2156       BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I));
2157 }
2158 
getCmpSelInstrCost(unsigned Opcode,Type * ValTy,Type * CondTy,CmpInst::Predicate VecPred,TTI::TargetCostKind CostKind,const Instruction * I)2159 InstructionCost X86TTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
2160                                                Type *CondTy,
2161                                                CmpInst::Predicate VecPred,
2162                                                TTI::TargetCostKind CostKind,
2163                                                const Instruction *I) {
2164   // TODO: Handle other cost kinds.
2165   if (CostKind != TTI::TCK_RecipThroughput)
2166     return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind,
2167                                      I);
2168 
2169   // Legalize the type.
2170   std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy);
2171 
2172   MVT MTy = LT.second;
2173 
2174   int ISD = TLI->InstructionOpcodeToISD(Opcode);
2175   assert(ISD && "Invalid opcode");
2176 
2177   unsigned ExtraCost = 0;
2178   if (I && (Opcode == Instruction::ICmp || Opcode == Instruction::FCmp)) {
2179     // Some vector comparison predicates cost extra instructions.
2180     if (MTy.isVector() &&
2181         !((ST->hasXOP() && (!ST->hasAVX2() || MTy.is128BitVector())) ||
2182           (ST->hasAVX512() && 32 <= MTy.getScalarSizeInBits()) ||
2183           ST->hasBWI())) {
2184       switch (cast<CmpInst>(I)->getPredicate()) {
2185       case CmpInst::Predicate::ICMP_NE:
2186         // xor(cmpeq(x,y),-1)
2187         ExtraCost = 1;
2188         break;
2189       case CmpInst::Predicate::ICMP_SGE:
2190       case CmpInst::Predicate::ICMP_SLE:
2191         // xor(cmpgt(x,y),-1)
2192         ExtraCost = 1;
2193         break;
2194       case CmpInst::Predicate::ICMP_ULT:
2195       case CmpInst::Predicate::ICMP_UGT:
2196         // cmpgt(xor(x,signbit),xor(y,signbit))
2197         // xor(cmpeq(pmaxu(x,y),x),-1)
2198         ExtraCost = 2;
2199         break;
2200       case CmpInst::Predicate::ICMP_ULE:
2201       case CmpInst::Predicate::ICMP_UGE:
2202         if ((ST->hasSSE41() && MTy.getScalarSizeInBits() == 32) ||
2203             (ST->hasSSE2() && MTy.getScalarSizeInBits() < 32)) {
2204           // cmpeq(psubus(x,y),0)
2205           // cmpeq(pminu(x,y),x)
2206           ExtraCost = 1;
2207         } else {
2208           // xor(cmpgt(xor(x,signbit),xor(y,signbit)),-1)
2209           ExtraCost = 3;
2210         }
2211         break;
2212       default:
2213         break;
2214       }
2215     }
2216   }
2217 
2218   static const CostTblEntry SLMCostTbl[] = {
2219     // slm pcmpeq/pcmpgt throughput is 2
2220     { ISD::SETCC,   MVT::v2i64,   2 },
2221   };
2222 
2223   static const CostTblEntry AVX512BWCostTbl[] = {
2224     { ISD::SETCC,   MVT::v32i16,  1 },
2225     { ISD::SETCC,   MVT::v64i8,   1 },
2226 
2227     { ISD::SELECT,  MVT::v32i16,  1 },
2228     { ISD::SELECT,  MVT::v64i8,   1 },
2229   };
2230 
2231   static const CostTblEntry AVX512CostTbl[] = {
2232     { ISD::SETCC,   MVT::v8i64,   1 },
2233     { ISD::SETCC,   MVT::v16i32,  1 },
2234     { ISD::SETCC,   MVT::v8f64,   1 },
2235     { ISD::SETCC,   MVT::v16f32,  1 },
2236 
2237     { ISD::SELECT,  MVT::v8i64,   1 },
2238     { ISD::SELECT,  MVT::v16i32,  1 },
2239     { ISD::SELECT,  MVT::v8f64,   1 },
2240     { ISD::SELECT,  MVT::v16f32,  1 },
2241 
2242     { ISD::SETCC,   MVT::v32i16,  2 }, // FIXME: should probably be 4
2243     { ISD::SETCC,   MVT::v64i8,   2 }, // FIXME: should probably be 4
2244 
2245     { ISD::SELECT,  MVT::v32i16,  2 }, // FIXME: should be 3
2246     { ISD::SELECT,  MVT::v64i8,   2 }, // FIXME: should be 3
2247   };
2248 
2249   static const CostTblEntry AVX2CostTbl[] = {
2250     { ISD::SETCC,   MVT::v4i64,   1 },
2251     { ISD::SETCC,   MVT::v8i32,   1 },
2252     { ISD::SETCC,   MVT::v16i16,  1 },
2253     { ISD::SETCC,   MVT::v32i8,   1 },
2254 
2255     { ISD::SELECT,  MVT::v4i64,   1 }, // pblendvb
2256     { ISD::SELECT,  MVT::v8i32,   1 }, // pblendvb
2257     { ISD::SELECT,  MVT::v16i16,  1 }, // pblendvb
2258     { ISD::SELECT,  MVT::v32i8,   1 }, // pblendvb
2259   };
2260 
2261   static const CostTblEntry AVX1CostTbl[] = {
2262     { ISD::SETCC,   MVT::v4f64,   1 },
2263     { ISD::SETCC,   MVT::v8f32,   1 },
2264     // AVX1 does not support 8-wide integer compare.
2265     { ISD::SETCC,   MVT::v4i64,   4 },
2266     { ISD::SETCC,   MVT::v8i32,   4 },
2267     { ISD::SETCC,   MVT::v16i16,  4 },
2268     { ISD::SETCC,   MVT::v32i8,   4 },
2269 
2270     { ISD::SELECT,  MVT::v4f64,   1 }, // vblendvpd
2271     { ISD::SELECT,  MVT::v8f32,   1 }, // vblendvps
2272     { ISD::SELECT,  MVT::v4i64,   1 }, // vblendvpd
2273     { ISD::SELECT,  MVT::v8i32,   1 }, // vblendvps
2274     { ISD::SELECT,  MVT::v16i16,  3 }, // vandps + vandnps + vorps
2275     { ISD::SELECT,  MVT::v32i8,   3 }, // vandps + vandnps + vorps
2276   };
2277 
2278   static const CostTblEntry SSE42CostTbl[] = {
2279     { ISD::SETCC,   MVT::v2f64,   1 },
2280     { ISD::SETCC,   MVT::v4f32,   1 },
2281     { ISD::SETCC,   MVT::v2i64,   1 },
2282   };
2283 
2284   static const CostTblEntry SSE41CostTbl[] = {
2285     { ISD::SELECT,  MVT::v2f64,   1 }, // blendvpd
2286     { ISD::SELECT,  MVT::v4f32,   1 }, // blendvps
2287     { ISD::SELECT,  MVT::v2i64,   1 }, // pblendvb
2288     { ISD::SELECT,  MVT::v4i32,   1 }, // pblendvb
2289     { ISD::SELECT,  MVT::v8i16,   1 }, // pblendvb
2290     { ISD::SELECT,  MVT::v16i8,   1 }, // pblendvb
2291   };
2292 
2293   static const CostTblEntry SSE2CostTbl[] = {
2294     { ISD::SETCC,   MVT::v2f64,   2 },
2295     { ISD::SETCC,   MVT::f64,     1 },
2296     { ISD::SETCC,   MVT::v2i64,   8 },
2297     { ISD::SETCC,   MVT::v4i32,   1 },
2298     { ISD::SETCC,   MVT::v8i16,   1 },
2299     { ISD::SETCC,   MVT::v16i8,   1 },
2300 
2301     { ISD::SELECT,  MVT::v2f64,   3 }, // andpd + andnpd + orpd
2302     { ISD::SELECT,  MVT::v2i64,   3 }, // pand + pandn + por
2303     { ISD::SELECT,  MVT::v4i32,   3 }, // pand + pandn + por
2304     { ISD::SELECT,  MVT::v8i16,   3 }, // pand + pandn + por
2305     { ISD::SELECT,  MVT::v16i8,   3 }, // pand + pandn + por
2306   };
2307 
2308   static const CostTblEntry SSE1CostTbl[] = {
2309     { ISD::SETCC,   MVT::v4f32,   2 },
2310     { ISD::SETCC,   MVT::f32,     1 },
2311 
2312     { ISD::SELECT,  MVT::v4f32,   3 }, // andps + andnps + orps
2313   };
2314 
2315   if (ST->isSLM())
2316     if (const auto *Entry = CostTableLookup(SLMCostTbl, ISD, MTy))
2317       return LT.first * (ExtraCost + Entry->Cost);
2318 
2319   if (ST->hasBWI())
2320     if (const auto *Entry = CostTableLookup(AVX512BWCostTbl, ISD, MTy))
2321       return LT.first * (ExtraCost + Entry->Cost);
2322 
2323   if (ST->hasAVX512())
2324     if (const auto *Entry = CostTableLookup(AVX512CostTbl, ISD, MTy))
2325       return LT.first * (ExtraCost + Entry->Cost);
2326 
2327   if (ST->hasAVX2())
2328     if (const auto *Entry = CostTableLookup(AVX2CostTbl, ISD, MTy))
2329       return LT.first * (ExtraCost + Entry->Cost);
2330 
2331   if (ST->hasAVX())
2332     if (const auto *Entry = CostTableLookup(AVX1CostTbl, ISD, MTy))
2333       return LT.first * (ExtraCost + Entry->Cost);
2334 
2335   if (ST->hasSSE42())
2336     if (const auto *Entry = CostTableLookup(SSE42CostTbl, ISD, MTy))
2337       return LT.first * (ExtraCost + Entry->Cost);
2338 
2339   if (ST->hasSSE41())
2340     if (const auto *Entry = CostTableLookup(SSE41CostTbl, ISD, MTy))
2341       return LT.first * (ExtraCost + Entry->Cost);
2342 
2343   if (ST->hasSSE2())
2344     if (const auto *Entry = CostTableLookup(SSE2CostTbl, ISD, MTy))
2345       return LT.first * (ExtraCost + Entry->Cost);
2346 
2347   if (ST->hasSSE1())
2348     if (const auto *Entry = CostTableLookup(SSE1CostTbl, ISD, MTy))
2349       return LT.first * (ExtraCost + Entry->Cost);
2350 
2351   return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind, I);
2352 }
2353 
getAtomicMemIntrinsicMaxElementSize() const2354 unsigned X86TTIImpl::getAtomicMemIntrinsicMaxElementSize() const { return 16; }
2355 
2356 InstructionCost
getTypeBasedIntrinsicInstrCost(const IntrinsicCostAttributes & ICA,TTI::TargetCostKind CostKind)2357 X86TTIImpl::getTypeBasedIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
2358                                            TTI::TargetCostKind CostKind) {
2359 
2360   // Costs should match the codegen from:
2361   // BITREVERSE: llvm\test\CodeGen\X86\vector-bitreverse.ll
2362   // BSWAP: llvm\test\CodeGen\X86\bswap-vector.ll
2363   // CTLZ: llvm\test\CodeGen\X86\vector-lzcnt-*.ll
2364   // CTPOP: llvm\test\CodeGen\X86\vector-popcnt-*.ll
2365   // CTTZ: llvm\test\CodeGen\X86\vector-tzcnt-*.ll
2366 
2367   // TODO: Overflow intrinsics (*ADDO, *SUBO, *MULO) with vector types are not
2368   //       specialized in these tables yet.
2369   static const CostTblEntry AVX512CDCostTbl[] = {
2370     { ISD::CTLZ,       MVT::v8i64,   1 },
2371     { ISD::CTLZ,       MVT::v16i32,  1 },
2372     { ISD::CTLZ,       MVT::v32i16,  8 },
2373     { ISD::CTLZ,       MVT::v64i8,  20 },
2374     { ISD::CTLZ,       MVT::v4i64,   1 },
2375     { ISD::CTLZ,       MVT::v8i32,   1 },
2376     { ISD::CTLZ,       MVT::v16i16,  4 },
2377     { ISD::CTLZ,       MVT::v32i8,  10 },
2378     { ISD::CTLZ,       MVT::v2i64,   1 },
2379     { ISD::CTLZ,       MVT::v4i32,   1 },
2380     { ISD::CTLZ,       MVT::v8i16,   4 },
2381     { ISD::CTLZ,       MVT::v16i8,   4 },
2382   };
2383   static const CostTblEntry AVX512BWCostTbl[] = {
2384     { ISD::ABS,        MVT::v32i16,  1 },
2385     { ISD::ABS,        MVT::v64i8,   1 },
2386     { ISD::BITREVERSE, MVT::v8i64,   5 },
2387     { ISD::BITREVERSE, MVT::v16i32,  5 },
2388     { ISD::BITREVERSE, MVT::v32i16,  5 },
2389     { ISD::BITREVERSE, MVT::v64i8,   5 },
2390     { ISD::CTLZ,       MVT::v8i64,  23 },
2391     { ISD::CTLZ,       MVT::v16i32, 22 },
2392     { ISD::CTLZ,       MVT::v32i16, 18 },
2393     { ISD::CTLZ,       MVT::v64i8,  17 },
2394     { ISD::CTPOP,      MVT::v8i64,   7 },
2395     { ISD::CTPOP,      MVT::v16i32, 11 },
2396     { ISD::CTPOP,      MVT::v32i16,  9 },
2397     { ISD::CTPOP,      MVT::v64i8,   6 },
2398     { ISD::CTTZ,       MVT::v8i64,  10 },
2399     { ISD::CTTZ,       MVT::v16i32, 14 },
2400     { ISD::CTTZ,       MVT::v32i16, 12 },
2401     { ISD::CTTZ,       MVT::v64i8,   9 },
2402     { ISD::SADDSAT,    MVT::v32i16,  1 },
2403     { ISD::SADDSAT,    MVT::v64i8,   1 },
2404     { ISD::SMAX,       MVT::v32i16,  1 },
2405     { ISD::SMAX,       MVT::v64i8,   1 },
2406     { ISD::SMIN,       MVT::v32i16,  1 },
2407     { ISD::SMIN,       MVT::v64i8,   1 },
2408     { ISD::SSUBSAT,    MVT::v32i16,  1 },
2409     { ISD::SSUBSAT,    MVT::v64i8,   1 },
2410     { ISD::UADDSAT,    MVT::v32i16,  1 },
2411     { ISD::UADDSAT,    MVT::v64i8,   1 },
2412     { ISD::UMAX,       MVT::v32i16,  1 },
2413     { ISD::UMAX,       MVT::v64i8,   1 },
2414     { ISD::UMIN,       MVT::v32i16,  1 },
2415     { ISD::UMIN,       MVT::v64i8,   1 },
2416     { ISD::USUBSAT,    MVT::v32i16,  1 },
2417     { ISD::USUBSAT,    MVT::v64i8,   1 },
2418   };
2419   static const CostTblEntry AVX512CostTbl[] = {
2420     { ISD::ABS,        MVT::v8i64,   1 },
2421     { ISD::ABS,        MVT::v16i32,  1 },
2422     { ISD::ABS,        MVT::v32i16,  2 }, // FIXME: include split
2423     { ISD::ABS,        MVT::v64i8,   2 }, // FIXME: include split
2424     { ISD::ABS,        MVT::v4i64,   1 },
2425     { ISD::ABS,        MVT::v2i64,   1 },
2426     { ISD::BITREVERSE, MVT::v8i64,  36 },
2427     { ISD::BITREVERSE, MVT::v16i32, 24 },
2428     { ISD::BITREVERSE, MVT::v32i16, 10 },
2429     { ISD::BITREVERSE, MVT::v64i8,  10 },
2430     { ISD::CTLZ,       MVT::v8i64,  29 },
2431     { ISD::CTLZ,       MVT::v16i32, 35 },
2432     { ISD::CTLZ,       MVT::v32i16, 28 },
2433     { ISD::CTLZ,       MVT::v64i8,  18 },
2434     { ISD::CTPOP,      MVT::v8i64,  16 },
2435     { ISD::CTPOP,      MVT::v16i32, 24 },
2436     { ISD::CTPOP,      MVT::v32i16, 18 },
2437     { ISD::CTPOP,      MVT::v64i8,  12 },
2438     { ISD::CTTZ,       MVT::v8i64,  20 },
2439     { ISD::CTTZ,       MVT::v16i32, 28 },
2440     { ISD::CTTZ,       MVT::v32i16, 24 },
2441     { ISD::CTTZ,       MVT::v64i8,  18 },
2442     { ISD::SMAX,       MVT::v8i64,   1 },
2443     { ISD::SMAX,       MVT::v16i32,  1 },
2444     { ISD::SMAX,       MVT::v32i16,  2 }, // FIXME: include split
2445     { ISD::SMAX,       MVT::v64i8,   2 }, // FIXME: include split
2446     { ISD::SMAX,       MVT::v4i64,   1 },
2447     { ISD::SMAX,       MVT::v2i64,   1 },
2448     { ISD::SMIN,       MVT::v8i64,   1 },
2449     { ISD::SMIN,       MVT::v16i32,  1 },
2450     { ISD::SMIN,       MVT::v32i16,  2 }, // FIXME: include split
2451     { ISD::SMIN,       MVT::v64i8,   2 }, // FIXME: include split
2452     { ISD::SMIN,       MVT::v4i64,   1 },
2453     { ISD::SMIN,       MVT::v2i64,   1 },
2454     { ISD::UMAX,       MVT::v8i64,   1 },
2455     { ISD::UMAX,       MVT::v16i32,  1 },
2456     { ISD::UMAX,       MVT::v32i16,  2 }, // FIXME: include split
2457     { ISD::UMAX,       MVT::v64i8,   2 }, // FIXME: include split
2458     { ISD::UMAX,       MVT::v4i64,   1 },
2459     { ISD::UMAX,       MVT::v2i64,   1 },
2460     { ISD::UMIN,       MVT::v8i64,   1 },
2461     { ISD::UMIN,       MVT::v16i32,  1 },
2462     { ISD::UMIN,       MVT::v32i16,  2 }, // FIXME: include split
2463     { ISD::UMIN,       MVT::v64i8,   2 }, // FIXME: include split
2464     { ISD::UMIN,       MVT::v4i64,   1 },
2465     { ISD::UMIN,       MVT::v2i64,   1 },
2466     { ISD::USUBSAT,    MVT::v16i32,  2 }, // pmaxud + psubd
2467     { ISD::USUBSAT,    MVT::v2i64,   2 }, // pmaxuq + psubq
2468     { ISD::USUBSAT,    MVT::v4i64,   2 }, // pmaxuq + psubq
2469     { ISD::USUBSAT,    MVT::v8i64,   2 }, // pmaxuq + psubq
2470     { ISD::UADDSAT,    MVT::v16i32,  3 }, // not + pminud + paddd
2471     { ISD::UADDSAT,    MVT::v2i64,   3 }, // not + pminuq + paddq
2472     { ISD::UADDSAT,    MVT::v4i64,   3 }, // not + pminuq + paddq
2473     { ISD::UADDSAT,    MVT::v8i64,   3 }, // not + pminuq + paddq
2474     { ISD::SADDSAT,    MVT::v32i16,  2 }, // FIXME: include split
2475     { ISD::SADDSAT,    MVT::v64i8,   2 }, // FIXME: include split
2476     { ISD::SSUBSAT,    MVT::v32i16,  2 }, // FIXME: include split
2477     { ISD::SSUBSAT,    MVT::v64i8,   2 }, // FIXME: include split
2478     { ISD::UADDSAT,    MVT::v32i16,  2 }, // FIXME: include split
2479     { ISD::UADDSAT,    MVT::v64i8,   2 }, // FIXME: include split
2480     { ISD::USUBSAT,    MVT::v32i16,  2 }, // FIXME: include split
2481     { ISD::USUBSAT,    MVT::v64i8,   2 }, // FIXME: include split
2482     { ISD::FMAXNUM,    MVT::f32,     2 },
2483     { ISD::FMAXNUM,    MVT::v4f32,   2 },
2484     { ISD::FMAXNUM,    MVT::v8f32,   2 },
2485     { ISD::FMAXNUM,    MVT::v16f32,  2 },
2486     { ISD::FMAXNUM,    MVT::f64,     2 },
2487     { ISD::FMAXNUM,    MVT::v2f64,   2 },
2488     { ISD::FMAXNUM,    MVT::v4f64,   2 },
2489     { ISD::FMAXNUM,    MVT::v8f64,   2 },
2490   };
2491   static const CostTblEntry XOPCostTbl[] = {
2492     { ISD::BITREVERSE, MVT::v4i64,   4 },
2493     { ISD::BITREVERSE, MVT::v8i32,   4 },
2494     { ISD::BITREVERSE, MVT::v16i16,  4 },
2495     { ISD::BITREVERSE, MVT::v32i8,   4 },
2496     { ISD::BITREVERSE, MVT::v2i64,   1 },
2497     { ISD::BITREVERSE, MVT::v4i32,   1 },
2498     { ISD::BITREVERSE, MVT::v8i16,   1 },
2499     { ISD::BITREVERSE, MVT::v16i8,   1 },
2500     { ISD::BITREVERSE, MVT::i64,     3 },
2501     { ISD::BITREVERSE, MVT::i32,     3 },
2502     { ISD::BITREVERSE, MVT::i16,     3 },
2503     { ISD::BITREVERSE, MVT::i8,      3 }
2504   };
2505   static const CostTblEntry AVX2CostTbl[] = {
2506     { ISD::ABS,        MVT::v4i64,   2 }, // VBLENDVPD(X,VPSUBQ(0,X),X)
2507     { ISD::ABS,        MVT::v8i32,   1 },
2508     { ISD::ABS,        MVT::v16i16,  1 },
2509     { ISD::ABS,        MVT::v32i8,   1 },
2510     { ISD::BITREVERSE, MVT::v4i64,   5 },
2511     { ISD::BITREVERSE, MVT::v8i32,   5 },
2512     { ISD::BITREVERSE, MVT::v16i16,  5 },
2513     { ISD::BITREVERSE, MVT::v32i8,   5 },
2514     { ISD::BSWAP,      MVT::v4i64,   1 },
2515     { ISD::BSWAP,      MVT::v8i32,   1 },
2516     { ISD::BSWAP,      MVT::v16i16,  1 },
2517     { ISD::CTLZ,       MVT::v4i64,  23 },
2518     { ISD::CTLZ,       MVT::v8i32,  18 },
2519     { ISD::CTLZ,       MVT::v16i16, 14 },
2520     { ISD::CTLZ,       MVT::v32i8,   9 },
2521     { ISD::CTPOP,      MVT::v4i64,   7 },
2522     { ISD::CTPOP,      MVT::v8i32,  11 },
2523     { ISD::CTPOP,      MVT::v16i16,  9 },
2524     { ISD::CTPOP,      MVT::v32i8,   6 },
2525     { ISD::CTTZ,       MVT::v4i64,  10 },
2526     { ISD::CTTZ,       MVT::v8i32,  14 },
2527     { ISD::CTTZ,       MVT::v16i16, 12 },
2528     { ISD::CTTZ,       MVT::v32i8,   9 },
2529     { ISD::SADDSAT,    MVT::v16i16,  1 },
2530     { ISD::SADDSAT,    MVT::v32i8,   1 },
2531     { ISD::SMAX,       MVT::v8i32,   1 },
2532     { ISD::SMAX,       MVT::v16i16,  1 },
2533     { ISD::SMAX,       MVT::v32i8,   1 },
2534     { ISD::SMIN,       MVT::v8i32,   1 },
2535     { ISD::SMIN,       MVT::v16i16,  1 },
2536     { ISD::SMIN,       MVT::v32i8,   1 },
2537     { ISD::SSUBSAT,    MVT::v16i16,  1 },
2538     { ISD::SSUBSAT,    MVT::v32i8,   1 },
2539     { ISD::UADDSAT,    MVT::v16i16,  1 },
2540     { ISD::UADDSAT,    MVT::v32i8,   1 },
2541     { ISD::UADDSAT,    MVT::v8i32,   3 }, // not + pminud + paddd
2542     { ISD::UMAX,       MVT::v8i32,   1 },
2543     { ISD::UMAX,       MVT::v16i16,  1 },
2544     { ISD::UMAX,       MVT::v32i8,   1 },
2545     { ISD::UMIN,       MVT::v8i32,   1 },
2546     { ISD::UMIN,       MVT::v16i16,  1 },
2547     { ISD::UMIN,       MVT::v32i8,   1 },
2548     { ISD::USUBSAT,    MVT::v16i16,  1 },
2549     { ISD::USUBSAT,    MVT::v32i8,   1 },
2550     { ISD::USUBSAT,    MVT::v8i32,   2 }, // pmaxud + psubd
2551     { ISD::FMAXNUM,    MVT::v8f32,   3 }, // MAXPS + CMPUNORDPS + BLENDVPS
2552     { ISD::FMAXNUM,    MVT::v4f64,   3 }, // MAXPD + CMPUNORDPD + BLENDVPD
2553     { ISD::FSQRT,      MVT::f32,     7 }, // Haswell from http://www.agner.org/
2554     { ISD::FSQRT,      MVT::v4f32,   7 }, // Haswell from http://www.agner.org/
2555     { ISD::FSQRT,      MVT::v8f32,  14 }, // Haswell from http://www.agner.org/
2556     { ISD::FSQRT,      MVT::f64,    14 }, // Haswell from http://www.agner.org/
2557     { ISD::FSQRT,      MVT::v2f64,  14 }, // Haswell from http://www.agner.org/
2558     { ISD::FSQRT,      MVT::v4f64,  28 }, // Haswell from http://www.agner.org/
2559   };
2560   static const CostTblEntry AVX1CostTbl[] = {
2561     { ISD::ABS,        MVT::v4i64,   5 }, // VBLENDVPD(X,VPSUBQ(0,X),X)
2562     { ISD::ABS,        MVT::v8i32,   3 },
2563     { ISD::ABS,        MVT::v16i16,  3 },
2564     { ISD::ABS,        MVT::v32i8,   3 },
2565     { ISD::BITREVERSE, MVT::v4i64,  12 }, // 2 x 128-bit Op + extract/insert
2566     { ISD::BITREVERSE, MVT::v8i32,  12 }, // 2 x 128-bit Op + extract/insert
2567     { ISD::BITREVERSE, MVT::v16i16, 12 }, // 2 x 128-bit Op + extract/insert
2568     { ISD::BITREVERSE, MVT::v32i8,  12 }, // 2 x 128-bit Op + extract/insert
2569     { ISD::BSWAP,      MVT::v4i64,   4 },
2570     { ISD::BSWAP,      MVT::v8i32,   4 },
2571     { ISD::BSWAP,      MVT::v16i16,  4 },
2572     { ISD::CTLZ,       MVT::v4i64,  48 }, // 2 x 128-bit Op + extract/insert
2573     { ISD::CTLZ,       MVT::v8i32,  38 }, // 2 x 128-bit Op + extract/insert
2574     { ISD::CTLZ,       MVT::v16i16, 30 }, // 2 x 128-bit Op + extract/insert
2575     { ISD::CTLZ,       MVT::v32i8,  20 }, // 2 x 128-bit Op + extract/insert
2576     { ISD::CTPOP,      MVT::v4i64,  16 }, // 2 x 128-bit Op + extract/insert
2577     { ISD::CTPOP,      MVT::v8i32,  24 }, // 2 x 128-bit Op + extract/insert
2578     { ISD::CTPOP,      MVT::v16i16, 20 }, // 2 x 128-bit Op + extract/insert
2579     { ISD::CTPOP,      MVT::v32i8,  14 }, // 2 x 128-bit Op + extract/insert
2580     { ISD::CTTZ,       MVT::v4i64,  22 }, // 2 x 128-bit Op + extract/insert
2581     { ISD::CTTZ,       MVT::v8i32,  30 }, // 2 x 128-bit Op + extract/insert
2582     { ISD::CTTZ,       MVT::v16i16, 26 }, // 2 x 128-bit Op + extract/insert
2583     { ISD::CTTZ,       MVT::v32i8,  20 }, // 2 x 128-bit Op + extract/insert
2584     { ISD::SADDSAT,    MVT::v16i16,  4 }, // 2 x 128-bit Op + extract/insert
2585     { ISD::SADDSAT,    MVT::v32i8,   4 }, // 2 x 128-bit Op + extract/insert
2586     { ISD::SMAX,       MVT::v8i32,   4 }, // 2 x 128-bit Op + extract/insert
2587     { ISD::SMAX,       MVT::v16i16,  4 }, // 2 x 128-bit Op + extract/insert
2588     { ISD::SMAX,       MVT::v32i8,   4 }, // 2 x 128-bit Op + extract/insert
2589     { ISD::SMIN,       MVT::v8i32,   4 }, // 2 x 128-bit Op + extract/insert
2590     { ISD::SMIN,       MVT::v16i16,  4 }, // 2 x 128-bit Op + extract/insert
2591     { ISD::SMIN,       MVT::v32i8,   4 }, // 2 x 128-bit Op + extract/insert
2592     { ISD::SSUBSAT,    MVT::v16i16,  4 }, // 2 x 128-bit Op + extract/insert
2593     { ISD::SSUBSAT,    MVT::v32i8,   4 }, // 2 x 128-bit Op + extract/insert
2594     { ISD::UADDSAT,    MVT::v16i16,  4 }, // 2 x 128-bit Op + extract/insert
2595     { ISD::UADDSAT,    MVT::v32i8,   4 }, // 2 x 128-bit Op + extract/insert
2596     { ISD::UADDSAT,    MVT::v8i32,   8 }, // 2 x 128-bit Op + extract/insert
2597     { ISD::UMAX,       MVT::v8i32,   4 }, // 2 x 128-bit Op + extract/insert
2598     { ISD::UMAX,       MVT::v16i16,  4 }, // 2 x 128-bit Op + extract/insert
2599     { ISD::UMAX,       MVT::v32i8,   4 }, // 2 x 128-bit Op + extract/insert
2600     { ISD::UMIN,       MVT::v8i32,   4 }, // 2 x 128-bit Op + extract/insert
2601     { ISD::UMIN,       MVT::v16i16,  4 }, // 2 x 128-bit Op + extract/insert
2602     { ISD::UMIN,       MVT::v32i8,   4 }, // 2 x 128-bit Op + extract/insert
2603     { ISD::USUBSAT,    MVT::v16i16,  4 }, // 2 x 128-bit Op + extract/insert
2604     { ISD::USUBSAT,    MVT::v32i8,   4 }, // 2 x 128-bit Op + extract/insert
2605     { ISD::USUBSAT,    MVT::v8i32,   6 }, // 2 x 128-bit Op + extract/insert
2606     { ISD::FMAXNUM,    MVT::f32,     3 }, // MAXSS + CMPUNORDSS + BLENDVPS
2607     { ISD::FMAXNUM,    MVT::v4f32,   3 }, // MAXPS + CMPUNORDPS + BLENDVPS
2608     { ISD::FMAXNUM,    MVT::v8f32,   5 }, // MAXPS + CMPUNORDPS + BLENDVPS + ?
2609     { ISD::FMAXNUM,    MVT::f64,     3 }, // MAXSD + CMPUNORDSD + BLENDVPD
2610     { ISD::FMAXNUM,    MVT::v2f64,   3 }, // MAXPD + CMPUNORDPD + BLENDVPD
2611     { ISD::FMAXNUM,    MVT::v4f64,   5 }, // MAXPD + CMPUNORDPD + BLENDVPD + ?
2612     { ISD::FSQRT,      MVT::f32,    14 }, // SNB from http://www.agner.org/
2613     { ISD::FSQRT,      MVT::v4f32,  14 }, // SNB from http://www.agner.org/
2614     { ISD::FSQRT,      MVT::v8f32,  28 }, // SNB from http://www.agner.org/
2615     { ISD::FSQRT,      MVT::f64,    21 }, // SNB from http://www.agner.org/
2616     { ISD::FSQRT,      MVT::v2f64,  21 }, // SNB from http://www.agner.org/
2617     { ISD::FSQRT,      MVT::v4f64,  43 }, // SNB from http://www.agner.org/
2618   };
2619   static const CostTblEntry GLMCostTbl[] = {
2620     { ISD::FSQRT, MVT::f32,   19 }, // sqrtss
2621     { ISD::FSQRT, MVT::v4f32, 37 }, // sqrtps
2622     { ISD::FSQRT, MVT::f64,   34 }, // sqrtsd
2623     { ISD::FSQRT, MVT::v2f64, 67 }, // sqrtpd
2624   };
2625   static const CostTblEntry SLMCostTbl[] = {
2626     { ISD::FSQRT, MVT::f32,   20 }, // sqrtss
2627     { ISD::FSQRT, MVT::v4f32, 40 }, // sqrtps
2628     { ISD::FSQRT, MVT::f64,   35 }, // sqrtsd
2629     { ISD::FSQRT, MVT::v2f64, 70 }, // sqrtpd
2630   };
2631   static const CostTblEntry SSE42CostTbl[] = {
2632     { ISD::USUBSAT,    MVT::v4i32,   2 }, // pmaxud + psubd
2633     { ISD::UADDSAT,    MVT::v4i32,   3 }, // not + pminud + paddd
2634     { ISD::FSQRT,      MVT::f32,    18 }, // Nehalem from http://www.agner.org/
2635     { ISD::FSQRT,      MVT::v4f32,  18 }, // Nehalem from http://www.agner.org/
2636   };
2637   static const CostTblEntry SSE41CostTbl[] = {
2638     { ISD::ABS,        MVT::v2i64,   2 }, // BLENDVPD(X,PSUBQ(0,X),X)
2639     { ISD::SMAX,       MVT::v4i32,   1 },
2640     { ISD::SMAX,       MVT::v16i8,   1 },
2641     { ISD::SMIN,       MVT::v4i32,   1 },
2642     { ISD::SMIN,       MVT::v16i8,   1 },
2643     { ISD::UMAX,       MVT::v4i32,   1 },
2644     { ISD::UMAX,       MVT::v8i16,   1 },
2645     { ISD::UMIN,       MVT::v4i32,   1 },
2646     { ISD::UMIN,       MVT::v8i16,   1 },
2647   };
2648   static const CostTblEntry SSSE3CostTbl[] = {
2649     { ISD::ABS,        MVT::v4i32,   1 },
2650     { ISD::ABS,        MVT::v8i16,   1 },
2651     { ISD::ABS,        MVT::v16i8,   1 },
2652     { ISD::BITREVERSE, MVT::v2i64,   5 },
2653     { ISD::BITREVERSE, MVT::v4i32,   5 },
2654     { ISD::BITREVERSE, MVT::v8i16,   5 },
2655     { ISD::BITREVERSE, MVT::v16i8,   5 },
2656     { ISD::BSWAP,      MVT::v2i64,   1 },
2657     { ISD::BSWAP,      MVT::v4i32,   1 },
2658     { ISD::BSWAP,      MVT::v8i16,   1 },
2659     { ISD::CTLZ,       MVT::v2i64,  23 },
2660     { ISD::CTLZ,       MVT::v4i32,  18 },
2661     { ISD::CTLZ,       MVT::v8i16,  14 },
2662     { ISD::CTLZ,       MVT::v16i8,   9 },
2663     { ISD::CTPOP,      MVT::v2i64,   7 },
2664     { ISD::CTPOP,      MVT::v4i32,  11 },
2665     { ISD::CTPOP,      MVT::v8i16,   9 },
2666     { ISD::CTPOP,      MVT::v16i8,   6 },
2667     { ISD::CTTZ,       MVT::v2i64,  10 },
2668     { ISD::CTTZ,       MVT::v4i32,  14 },
2669     { ISD::CTTZ,       MVT::v8i16,  12 },
2670     { ISD::CTTZ,       MVT::v16i8,   9 }
2671   };
2672   static const CostTblEntry SSE2CostTbl[] = {
2673     { ISD::ABS,        MVT::v2i64,   4 },
2674     { ISD::ABS,        MVT::v4i32,   3 },
2675     { ISD::ABS,        MVT::v8i16,   2 },
2676     { ISD::ABS,        MVT::v16i8,   2 },
2677     { ISD::BITREVERSE, MVT::v2i64,  29 },
2678     { ISD::BITREVERSE, MVT::v4i32,  27 },
2679     { ISD::BITREVERSE, MVT::v8i16,  27 },
2680     { ISD::BITREVERSE, MVT::v16i8,  20 },
2681     { ISD::BSWAP,      MVT::v2i64,   7 },
2682     { ISD::BSWAP,      MVT::v4i32,   7 },
2683     { ISD::BSWAP,      MVT::v8i16,   7 },
2684     { ISD::CTLZ,       MVT::v2i64,  25 },
2685     { ISD::CTLZ,       MVT::v4i32,  26 },
2686     { ISD::CTLZ,       MVT::v8i16,  20 },
2687     { ISD::CTLZ,       MVT::v16i8,  17 },
2688     { ISD::CTPOP,      MVT::v2i64,  12 },
2689     { ISD::CTPOP,      MVT::v4i32,  15 },
2690     { ISD::CTPOP,      MVT::v8i16,  13 },
2691     { ISD::CTPOP,      MVT::v16i8,  10 },
2692     { ISD::CTTZ,       MVT::v2i64,  14 },
2693     { ISD::CTTZ,       MVT::v4i32,  18 },
2694     { ISD::CTTZ,       MVT::v8i16,  16 },
2695     { ISD::CTTZ,       MVT::v16i8,  13 },
2696     { ISD::SADDSAT,    MVT::v8i16,   1 },
2697     { ISD::SADDSAT,    MVT::v16i8,   1 },
2698     { ISD::SMAX,       MVT::v8i16,   1 },
2699     { ISD::SMIN,       MVT::v8i16,   1 },
2700     { ISD::SSUBSAT,    MVT::v8i16,   1 },
2701     { ISD::SSUBSAT,    MVT::v16i8,   1 },
2702     { ISD::UADDSAT,    MVT::v8i16,   1 },
2703     { ISD::UADDSAT,    MVT::v16i8,   1 },
2704     { ISD::UMAX,       MVT::v8i16,   2 },
2705     { ISD::UMAX,       MVT::v16i8,   1 },
2706     { ISD::UMIN,       MVT::v8i16,   2 },
2707     { ISD::UMIN,       MVT::v16i8,   1 },
2708     { ISD::USUBSAT,    MVT::v8i16,   1 },
2709     { ISD::USUBSAT,    MVT::v16i8,   1 },
2710     { ISD::FMAXNUM,    MVT::f64,     4 },
2711     { ISD::FMAXNUM,    MVT::v2f64,   4 },
2712     { ISD::FSQRT,      MVT::f64,    32 }, // Nehalem from http://www.agner.org/
2713     { ISD::FSQRT,      MVT::v2f64,  32 }, // Nehalem from http://www.agner.org/
2714   };
2715   static const CostTblEntry SSE1CostTbl[] = {
2716     { ISD::FMAXNUM,    MVT::f32,     4 },
2717     { ISD::FMAXNUM,    MVT::v4f32,   4 },
2718     { ISD::FSQRT,      MVT::f32,    28 }, // Pentium III from http://www.agner.org/
2719     { ISD::FSQRT,      MVT::v4f32,  56 }, // Pentium III from http://www.agner.org/
2720   };
2721   static const CostTblEntry BMI64CostTbl[] = { // 64-bit targets
2722     { ISD::CTTZ,       MVT::i64,     1 },
2723   };
2724   static const CostTblEntry BMI32CostTbl[] = { // 32 or 64-bit targets
2725     { ISD::CTTZ,       MVT::i32,     1 },
2726     { ISD::CTTZ,       MVT::i16,     1 },
2727     { ISD::CTTZ,       MVT::i8,      1 },
2728   };
2729   static const CostTblEntry LZCNT64CostTbl[] = { // 64-bit targets
2730     { ISD::CTLZ,       MVT::i64,     1 },
2731   };
2732   static const CostTblEntry LZCNT32CostTbl[] = { // 32 or 64-bit targets
2733     { ISD::CTLZ,       MVT::i32,     1 },
2734     { ISD::CTLZ,       MVT::i16,     1 },
2735     { ISD::CTLZ,       MVT::i8,      1 },
2736   };
2737   static const CostTblEntry POPCNT64CostTbl[] = { // 64-bit targets
2738     { ISD::CTPOP,      MVT::i64,     1 },
2739   };
2740   static const CostTblEntry POPCNT32CostTbl[] = { // 32 or 64-bit targets
2741     { ISD::CTPOP,      MVT::i32,     1 },
2742     { ISD::CTPOP,      MVT::i16,     1 },
2743     { ISD::CTPOP,      MVT::i8,      1 },
2744   };
2745   static const CostTblEntry X64CostTbl[] = { // 64-bit targets
2746     { ISD::ABS,        MVT::i64,     2 }, // SUB+CMOV
2747     { ISD::BITREVERSE, MVT::i64,    14 },
2748     { ISD::BSWAP,      MVT::i64,     1 },
2749     { ISD::CTLZ,       MVT::i64,     4 }, // BSR+XOR or BSR+XOR+CMOV
2750     { ISD::CTTZ,       MVT::i64,     3 }, // TEST+BSF+CMOV/BRANCH
2751     { ISD::CTPOP,      MVT::i64,    10 },
2752     { ISD::SADDO,      MVT::i64,     1 },
2753     { ISD::UADDO,      MVT::i64,     1 },
2754     { ISD::UMULO,      MVT::i64,     2 }, // mulq + seto
2755   };
2756   static const CostTblEntry X86CostTbl[] = { // 32 or 64-bit targets
2757     { ISD::ABS,        MVT::i32,     2 }, // SUB+CMOV
2758     { ISD::ABS,        MVT::i16,     2 }, // SUB+CMOV
2759     { ISD::BITREVERSE, MVT::i32,    14 },
2760     { ISD::BITREVERSE, MVT::i16,    14 },
2761     { ISD::BITREVERSE, MVT::i8,     11 },
2762     { ISD::BSWAP,      MVT::i32,     1 },
2763     { ISD::BSWAP,      MVT::i16,     1 }, // ROL
2764     { ISD::CTLZ,       MVT::i32,     4 }, // BSR+XOR or BSR+XOR+CMOV
2765     { ISD::CTLZ,       MVT::i16,     4 }, // BSR+XOR or BSR+XOR+CMOV
2766     { ISD::CTLZ,       MVT::i8,      4 }, // BSR+XOR or BSR+XOR+CMOV
2767     { ISD::CTTZ,       MVT::i32,     3 }, // TEST+BSF+CMOV/BRANCH
2768     { ISD::CTTZ,       MVT::i16,     3 }, // TEST+BSF+CMOV/BRANCH
2769     { ISD::CTTZ,       MVT::i8,      3 }, // TEST+BSF+CMOV/BRANCH
2770     { ISD::CTPOP,      MVT::i32,     8 },
2771     { ISD::CTPOP,      MVT::i16,     9 },
2772     { ISD::CTPOP,      MVT::i8,      7 },
2773     { ISD::SADDO,      MVT::i32,     1 },
2774     { ISD::SADDO,      MVT::i16,     1 },
2775     { ISD::SADDO,      MVT::i8,      1 },
2776     { ISD::UADDO,      MVT::i32,     1 },
2777     { ISD::UADDO,      MVT::i16,     1 },
2778     { ISD::UADDO,      MVT::i8,      1 },
2779     { ISD::UMULO,      MVT::i32,     2 }, // mul + seto
2780     { ISD::UMULO,      MVT::i16,     2 },
2781     { ISD::UMULO,      MVT::i8,      2 },
2782   };
2783 
2784   Type *RetTy = ICA.getReturnType();
2785   Type *OpTy = RetTy;
2786   Intrinsic::ID IID = ICA.getID();
2787   unsigned ISD = ISD::DELETED_NODE;
2788   switch (IID) {
2789   default:
2790     break;
2791   case Intrinsic::abs:
2792     ISD = ISD::ABS;
2793     break;
2794   case Intrinsic::bitreverse:
2795     ISD = ISD::BITREVERSE;
2796     break;
2797   case Intrinsic::bswap:
2798     ISD = ISD::BSWAP;
2799     break;
2800   case Intrinsic::ctlz:
2801     ISD = ISD::CTLZ;
2802     break;
2803   case Intrinsic::ctpop:
2804     ISD = ISD::CTPOP;
2805     break;
2806   case Intrinsic::cttz:
2807     ISD = ISD::CTTZ;
2808     break;
2809   case Intrinsic::maxnum:
2810   case Intrinsic::minnum:
2811     // FMINNUM has same costs so don't duplicate.
2812     ISD = ISD::FMAXNUM;
2813     break;
2814   case Intrinsic::sadd_sat:
2815     ISD = ISD::SADDSAT;
2816     break;
2817   case Intrinsic::smax:
2818     ISD = ISD::SMAX;
2819     break;
2820   case Intrinsic::smin:
2821     ISD = ISD::SMIN;
2822     break;
2823   case Intrinsic::ssub_sat:
2824     ISD = ISD::SSUBSAT;
2825     break;
2826   case Intrinsic::uadd_sat:
2827     ISD = ISD::UADDSAT;
2828     break;
2829   case Intrinsic::umax:
2830     ISD = ISD::UMAX;
2831     break;
2832   case Intrinsic::umin:
2833     ISD = ISD::UMIN;
2834     break;
2835   case Intrinsic::usub_sat:
2836     ISD = ISD::USUBSAT;
2837     break;
2838   case Intrinsic::sqrt:
2839     ISD = ISD::FSQRT;
2840     break;
2841   case Intrinsic::sadd_with_overflow:
2842   case Intrinsic::ssub_with_overflow:
2843     // SSUBO has same costs so don't duplicate.
2844     ISD = ISD::SADDO;
2845     OpTy = RetTy->getContainedType(0);
2846     break;
2847   case Intrinsic::uadd_with_overflow:
2848   case Intrinsic::usub_with_overflow:
2849     // USUBO has same costs so don't duplicate.
2850     ISD = ISD::UADDO;
2851     OpTy = RetTy->getContainedType(0);
2852     break;
2853   case Intrinsic::umul_with_overflow:
2854   case Intrinsic::smul_with_overflow:
2855     // SMULO has same costs so don't duplicate.
2856     ISD = ISD::UMULO;
2857     OpTy = RetTy->getContainedType(0);
2858     break;
2859   }
2860 
2861   if (ISD != ISD::DELETED_NODE) {
2862     // Legalize the type.
2863     std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, OpTy);
2864     MVT MTy = LT.second;
2865 
2866     // Attempt to lookup cost.
2867     if (ISD == ISD::BITREVERSE && ST->hasGFNI() && ST->hasSSSE3() &&
2868         MTy.isVector()) {
2869       // With PSHUFB the code is very similar for all types. If we have integer
2870       // byte operations, we just need a GF2P8AFFINEQB for vXi8. For other types
2871       // we also need a PSHUFB.
2872       unsigned Cost = MTy.getVectorElementType() == MVT::i8 ? 1 : 2;
2873 
2874       // Without byte operations, we need twice as many GF2P8AFFINEQB and PSHUFB
2875       // instructions. We also need an extract and an insert.
2876       if (!(MTy.is128BitVector() || (ST->hasAVX2() && MTy.is256BitVector()) ||
2877             (ST->hasBWI() && MTy.is512BitVector())))
2878         Cost = Cost * 2 + 2;
2879 
2880       return LT.first * Cost;
2881     }
2882 
2883     auto adjustTableCost = [](const CostTblEntry &Entry,
2884                               InstructionCost LegalizationCost,
2885                               FastMathFlags FMF) {
2886       // If there are no NANs to deal with, then these are reduced to a
2887       // single MIN** or MAX** instruction instead of the MIN/CMP/SELECT that we
2888       // assume is used in the non-fast case.
2889       if (Entry.ISD == ISD::FMAXNUM || Entry.ISD == ISD::FMINNUM) {
2890         if (FMF.noNaNs())
2891           return LegalizationCost * 1;
2892       }
2893       return LegalizationCost * (int)Entry.Cost;
2894     };
2895 
2896     if (ST->useGLMDivSqrtCosts())
2897       if (const auto *Entry = CostTableLookup(GLMCostTbl, ISD, MTy))
2898         return adjustTableCost(*Entry, LT.first, ICA.getFlags());
2899 
2900     if (ST->isSLM())
2901       if (const auto *Entry = CostTableLookup(SLMCostTbl, ISD, MTy))
2902         return adjustTableCost(*Entry, LT.first, ICA.getFlags());
2903 
2904     if (ST->hasCDI())
2905       if (const auto *Entry = CostTableLookup(AVX512CDCostTbl, ISD, MTy))
2906         return adjustTableCost(*Entry, LT.first, ICA.getFlags());
2907 
2908     if (ST->hasBWI())
2909       if (const auto *Entry = CostTableLookup(AVX512BWCostTbl, ISD, MTy))
2910         return adjustTableCost(*Entry, LT.first, ICA.getFlags());
2911 
2912     if (ST->hasAVX512())
2913       if (const auto *Entry = CostTableLookup(AVX512CostTbl, ISD, MTy))
2914         return adjustTableCost(*Entry, LT.first, ICA.getFlags());
2915 
2916     if (ST->hasXOP())
2917       if (const auto *Entry = CostTableLookup(XOPCostTbl, ISD, MTy))
2918         return adjustTableCost(*Entry, LT.first, ICA.getFlags());
2919 
2920     if (ST->hasAVX2())
2921       if (const auto *Entry = CostTableLookup(AVX2CostTbl, ISD, MTy))
2922         return adjustTableCost(*Entry, LT.first, ICA.getFlags());
2923 
2924     if (ST->hasAVX())
2925       if (const auto *Entry = CostTableLookup(AVX1CostTbl, ISD, MTy))
2926         return adjustTableCost(*Entry, LT.first, ICA.getFlags());
2927 
2928     if (ST->hasSSE42())
2929       if (const auto *Entry = CostTableLookup(SSE42CostTbl, ISD, MTy))
2930         return adjustTableCost(*Entry, LT.first, ICA.getFlags());
2931 
2932     if (ST->hasSSE41())
2933       if (const auto *Entry = CostTableLookup(SSE41CostTbl, ISD, MTy))
2934         return adjustTableCost(*Entry, LT.first, ICA.getFlags());
2935 
2936     if (ST->hasSSSE3())
2937       if (const auto *Entry = CostTableLookup(SSSE3CostTbl, ISD, MTy))
2938         return adjustTableCost(*Entry, LT.first, ICA.getFlags());
2939 
2940     if (ST->hasSSE2())
2941       if (const auto *Entry = CostTableLookup(SSE2CostTbl, ISD, MTy))
2942         return adjustTableCost(*Entry, LT.first, ICA.getFlags());
2943 
2944     if (ST->hasSSE1())
2945       if (const auto *Entry = CostTableLookup(SSE1CostTbl, ISD, MTy))
2946         return adjustTableCost(*Entry, LT.first, ICA.getFlags());
2947 
2948     if (ST->hasBMI()) {
2949       if (ST->is64Bit())
2950         if (const auto *Entry = CostTableLookup(BMI64CostTbl, ISD, MTy))
2951           return adjustTableCost(*Entry, LT.first, ICA.getFlags());
2952 
2953       if (const auto *Entry = CostTableLookup(BMI32CostTbl, ISD, MTy))
2954         return adjustTableCost(*Entry, LT.first, ICA.getFlags());
2955     }
2956 
2957     if (ST->hasLZCNT()) {
2958       if (ST->is64Bit())
2959         if (const auto *Entry = CostTableLookup(LZCNT64CostTbl, ISD, MTy))
2960           return adjustTableCost(*Entry, LT.first, ICA.getFlags());
2961 
2962       if (const auto *Entry = CostTableLookup(LZCNT32CostTbl, ISD, MTy))
2963         return adjustTableCost(*Entry, LT.first, ICA.getFlags());
2964     }
2965 
2966     if (ST->hasPOPCNT()) {
2967       if (ST->is64Bit())
2968         if (const auto *Entry = CostTableLookup(POPCNT64CostTbl, ISD, MTy))
2969           return adjustTableCost(*Entry, LT.first, ICA.getFlags());
2970 
2971       if (const auto *Entry = CostTableLookup(POPCNT32CostTbl, ISD, MTy))
2972         return adjustTableCost(*Entry, LT.first, ICA.getFlags());
2973     }
2974 
2975     if (ISD == ISD::BSWAP && ST->hasMOVBE() && ST->hasFastMOVBE()) {
2976       if (const Instruction *II = ICA.getInst()) {
2977         if (II->hasOneUse() && isa<StoreInst>(II->user_back()))
2978           return TTI::TCC_Free;
2979         if (auto *LI = dyn_cast<LoadInst>(II->getOperand(0))) {
2980           if (LI->hasOneUse())
2981             return TTI::TCC_Free;
2982         }
2983       }
2984     }
2985 
2986     // TODO - add BMI (TZCNT) scalar handling
2987 
2988     if (ST->is64Bit())
2989       if (const auto *Entry = CostTableLookup(X64CostTbl, ISD, MTy))
2990         return adjustTableCost(*Entry, LT.first, ICA.getFlags());
2991 
2992     if (const auto *Entry = CostTableLookup(X86CostTbl, ISD, MTy))
2993       return adjustTableCost(*Entry, LT.first, ICA.getFlags());
2994   }
2995 
2996   return BaseT::getIntrinsicInstrCost(ICA, CostKind);
2997 }
2998 
2999 InstructionCost
getIntrinsicInstrCost(const IntrinsicCostAttributes & ICA,TTI::TargetCostKind CostKind)3000 X86TTIImpl::getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
3001                                   TTI::TargetCostKind CostKind) {
3002   if (ICA.isTypeBasedOnly())
3003     return getTypeBasedIntrinsicInstrCost(ICA, CostKind);
3004 
3005   static const CostTblEntry AVX512CostTbl[] = {
3006     { ISD::ROTL,       MVT::v8i64,   1 },
3007     { ISD::ROTL,       MVT::v4i64,   1 },
3008     { ISD::ROTL,       MVT::v2i64,   1 },
3009     { ISD::ROTL,       MVT::v16i32,  1 },
3010     { ISD::ROTL,       MVT::v8i32,   1 },
3011     { ISD::ROTL,       MVT::v4i32,   1 },
3012     { ISD::ROTR,       MVT::v8i64,   1 },
3013     { ISD::ROTR,       MVT::v4i64,   1 },
3014     { ISD::ROTR,       MVT::v2i64,   1 },
3015     { ISD::ROTR,       MVT::v16i32,  1 },
3016     { ISD::ROTR,       MVT::v8i32,   1 },
3017     { ISD::ROTR,       MVT::v4i32,   1 }
3018   };
3019   // XOP: ROTL = VPROT(X,Y), ROTR = VPROT(X,SUB(0,Y))
3020   static const CostTblEntry XOPCostTbl[] = {
3021     { ISD::ROTL,       MVT::v4i64,   4 },
3022     { ISD::ROTL,       MVT::v8i32,   4 },
3023     { ISD::ROTL,       MVT::v16i16,  4 },
3024     { ISD::ROTL,       MVT::v32i8,   4 },
3025     { ISD::ROTL,       MVT::v2i64,   1 },
3026     { ISD::ROTL,       MVT::v4i32,   1 },
3027     { ISD::ROTL,       MVT::v8i16,   1 },
3028     { ISD::ROTL,       MVT::v16i8,   1 },
3029     { ISD::ROTR,       MVT::v4i64,   6 },
3030     { ISD::ROTR,       MVT::v8i32,   6 },
3031     { ISD::ROTR,       MVT::v16i16,  6 },
3032     { ISD::ROTR,       MVT::v32i8,   6 },
3033     { ISD::ROTR,       MVT::v2i64,   2 },
3034     { ISD::ROTR,       MVT::v4i32,   2 },
3035     { ISD::ROTR,       MVT::v8i16,   2 },
3036     { ISD::ROTR,       MVT::v16i8,   2 }
3037   };
3038   static const CostTblEntry X64CostTbl[] = { // 64-bit targets
3039     { ISD::ROTL,       MVT::i64,     1 },
3040     { ISD::ROTR,       MVT::i64,     1 },
3041     { ISD::FSHL,       MVT::i64,     4 }
3042   };
3043   static const CostTblEntry X86CostTbl[] = { // 32 or 64-bit targets
3044     { ISD::ROTL,       MVT::i32,     1 },
3045     { ISD::ROTL,       MVT::i16,     1 },
3046     { ISD::ROTL,       MVT::i8,      1 },
3047     { ISD::ROTR,       MVT::i32,     1 },
3048     { ISD::ROTR,       MVT::i16,     1 },
3049     { ISD::ROTR,       MVT::i8,      1 },
3050     { ISD::FSHL,       MVT::i32,     4 },
3051     { ISD::FSHL,       MVT::i16,     4 },
3052     { ISD::FSHL,       MVT::i8,      4 }
3053   };
3054 
3055   Intrinsic::ID IID = ICA.getID();
3056   Type *RetTy = ICA.getReturnType();
3057   const SmallVectorImpl<const Value *> &Args = ICA.getArgs();
3058   unsigned ISD = ISD::DELETED_NODE;
3059   switch (IID) {
3060   default:
3061     break;
3062   case Intrinsic::fshl:
3063     ISD = ISD::FSHL;
3064     if (Args[0] == Args[1])
3065       ISD = ISD::ROTL;
3066     break;
3067   case Intrinsic::fshr:
3068     // FSHR has same costs so don't duplicate.
3069     ISD = ISD::FSHL;
3070     if (Args[0] == Args[1])
3071       ISD = ISD::ROTR;
3072     break;
3073   }
3074 
3075   if (ISD != ISD::DELETED_NODE) {
3076     // Legalize the type.
3077     std::pair<InstructionCost, MVT> LT =
3078         TLI->getTypeLegalizationCost(DL, RetTy);
3079     MVT MTy = LT.second;
3080 
3081     // Attempt to lookup cost.
3082     if (ST->hasAVX512())
3083       if (const auto *Entry = CostTableLookup(AVX512CostTbl, ISD, MTy))
3084         return LT.first * Entry->Cost;
3085 
3086     if (ST->hasXOP())
3087       if (const auto *Entry = CostTableLookup(XOPCostTbl, ISD, MTy))
3088         return LT.first * Entry->Cost;
3089 
3090     if (ST->is64Bit())
3091       if (const auto *Entry = CostTableLookup(X64CostTbl, ISD, MTy))
3092         return LT.first * Entry->Cost;
3093 
3094     if (const auto *Entry = CostTableLookup(X86CostTbl, ISD, MTy))
3095       return LT.first * Entry->Cost;
3096   }
3097 
3098   return BaseT::getIntrinsicInstrCost(ICA, CostKind);
3099 }
3100 
getVectorInstrCost(unsigned Opcode,Type * Val,unsigned Index)3101 InstructionCost X86TTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val,
3102                                                unsigned Index) {
3103   static const CostTblEntry SLMCostTbl[] = {
3104      { ISD::EXTRACT_VECTOR_ELT,       MVT::i8,      4 },
3105      { ISD::EXTRACT_VECTOR_ELT,       MVT::i16,     4 },
3106      { ISD::EXTRACT_VECTOR_ELT,       MVT::i32,     4 },
3107      { ISD::EXTRACT_VECTOR_ELT,       MVT::i64,     7 }
3108    };
3109 
3110   assert(Val->isVectorTy() && "This must be a vector type");
3111   Type *ScalarType = Val->getScalarType();
3112   int RegisterFileMoveCost = 0;
3113 
3114   if (Index != -1U && (Opcode == Instruction::ExtractElement ||
3115                        Opcode == Instruction::InsertElement)) {
3116     // Legalize the type.
3117     std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, Val);
3118 
3119     // This type is legalized to a scalar type.
3120     if (!LT.second.isVector())
3121       return 0;
3122 
3123     // The type may be split. Normalize the index to the new type.
3124     unsigned NumElts = LT.second.getVectorNumElements();
3125     unsigned SubNumElts = NumElts;
3126     Index = Index % NumElts;
3127 
3128     // For >128-bit vectors, we need to extract higher 128-bit subvectors.
3129     // For inserts, we also need to insert the subvector back.
3130     if (LT.second.getSizeInBits() > 128) {
3131       assert((LT.second.getSizeInBits() % 128) == 0 && "Illegal vector");
3132       unsigned NumSubVecs = LT.second.getSizeInBits() / 128;
3133       SubNumElts = NumElts / NumSubVecs;
3134       if (SubNumElts <= Index) {
3135         RegisterFileMoveCost += (Opcode == Instruction::InsertElement ? 2 : 1);
3136         Index %= SubNumElts;
3137       }
3138     }
3139 
3140     if (Index == 0) {
3141       // Floating point scalars are already located in index #0.
3142       // Many insertions to #0 can fold away for scalar fp-ops, so let's assume
3143       // true for all.
3144       if (ScalarType->isFloatingPointTy())
3145         return RegisterFileMoveCost;
3146 
3147       // Assume movd/movq XMM -> GPR is relatively cheap on all targets.
3148       if (ScalarType->isIntegerTy() && Opcode == Instruction::ExtractElement)
3149         return 1 + RegisterFileMoveCost;
3150     }
3151 
3152     int ISD = TLI->InstructionOpcodeToISD(Opcode);
3153     assert(ISD && "Unexpected vector opcode");
3154     MVT MScalarTy = LT.second.getScalarType();
3155     if (ST->isSLM())
3156       if (auto *Entry = CostTableLookup(SLMCostTbl, ISD, MScalarTy))
3157         return Entry->Cost + RegisterFileMoveCost;
3158 
3159     // Assume pinsr/pextr XMM <-> GPR is relatively cheap on all targets.
3160     if ((MScalarTy == MVT::i16 && ST->hasSSE2()) ||
3161         (MScalarTy.isInteger() && ST->hasSSE41()))
3162       return 1 + RegisterFileMoveCost;
3163 
3164     // Assume insertps is relatively cheap on all targets.
3165     if (MScalarTy == MVT::f32 && ST->hasSSE41() &&
3166         Opcode == Instruction::InsertElement)
3167       return 1 + RegisterFileMoveCost;
3168 
3169     // For extractions we just need to shuffle the element to index 0, which
3170     // should be very cheap (assume cost = 1). For insertions we need to shuffle
3171     // the elements to its destination. In both cases we must handle the
3172     // subvector move(s).
3173     // If the vector type is already less than 128-bits then don't reduce it.
3174     // TODO: Under what circumstances should we shuffle using the full width?
3175     InstructionCost ShuffleCost = 1;
3176     if (Opcode == Instruction::InsertElement) {
3177       auto *SubTy = cast<VectorType>(Val);
3178       EVT VT = TLI->getValueType(DL, Val);
3179       if (VT.getScalarType() != MScalarTy || VT.getSizeInBits() >= 128)
3180         SubTy = FixedVectorType::get(ScalarType, SubNumElts);
3181       ShuffleCost =
3182           getShuffleCost(TTI::SK_PermuteTwoSrc, SubTy, None, 0, SubTy);
3183     }
3184     int IntOrFpCost = ScalarType->isFloatingPointTy() ? 0 : 1;
3185     return ShuffleCost + IntOrFpCost + RegisterFileMoveCost;
3186   }
3187 
3188   // Add to the base cost if we know that the extracted element of a vector is
3189   // destined to be moved to and used in the integer register file.
3190   if (Opcode == Instruction::ExtractElement && ScalarType->isPointerTy())
3191     RegisterFileMoveCost += 1;
3192 
3193   return BaseT::getVectorInstrCost(Opcode, Val, Index) + RegisterFileMoveCost;
3194 }
3195 
getScalarizationOverhead(VectorType * Ty,const APInt & DemandedElts,bool Insert,bool Extract)3196 InstructionCost X86TTIImpl::getScalarizationOverhead(VectorType *Ty,
3197                                                      const APInt &DemandedElts,
3198                                                      bool Insert,
3199                                                      bool Extract) {
3200   InstructionCost Cost = 0;
3201 
3202   // For insertions, a ISD::BUILD_VECTOR style vector initialization can be much
3203   // cheaper than an accumulation of ISD::INSERT_VECTOR_ELT.
3204   if (Insert) {
3205     std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
3206     MVT MScalarTy = LT.second.getScalarType();
3207 
3208     if ((MScalarTy == MVT::i16 && ST->hasSSE2()) ||
3209         (MScalarTy.isInteger() && ST->hasSSE41()) ||
3210         (MScalarTy == MVT::f32 && ST->hasSSE41())) {
3211       // For types we can insert directly, insertion into 128-bit sub vectors is
3212       // cheap, followed by a cheap chain of concatenations.
3213       if (LT.second.getSizeInBits() <= 128) {
3214         Cost +=
3215             BaseT::getScalarizationOverhead(Ty, DemandedElts, Insert, false);
3216       } else {
3217         // In each 128-lane, if at least one index is demanded but not all
3218         // indices are demanded and this 128-lane is not the first 128-lane of
3219         // the legalized-vector, then this 128-lane needs a extracti128; If in
3220         // each 128-lane, there is at least one demanded index, this 128-lane
3221         // needs a inserti128.
3222 
3223         // The following cases will help you build a better understanding:
3224         // Assume we insert several elements into a v8i32 vector in avx2,
3225         // Case#1: inserting into 1th index needs vpinsrd + inserti128.
3226         // Case#2: inserting into 5th index needs extracti128 + vpinsrd +
3227         // inserti128.
3228         // Case#3: inserting into 4,5,6,7 index needs 4*vpinsrd + inserti128.
3229         const int CostValue = *LT.first.getValue();
3230         assert(CostValue >= 0 && "Negative cost!");
3231         unsigned Num128Lanes = LT.second.getSizeInBits() / 128 * CostValue;
3232         unsigned NumElts = LT.second.getVectorNumElements() * CostValue;
3233         APInt WidenedDemandedElts = DemandedElts.zextOrSelf(NumElts);
3234         unsigned Scale = NumElts / Num128Lanes;
3235         // We iterate each 128-lane, and check if we need a
3236         // extracti128/inserti128 for this 128-lane.
3237         for (unsigned I = 0; I < NumElts; I += Scale) {
3238           APInt Mask = WidenedDemandedElts.getBitsSet(NumElts, I, I + Scale);
3239           APInt MaskedDE = Mask & WidenedDemandedElts;
3240           unsigned Population = MaskedDE.countPopulation();
3241           Cost += (Population > 0 && Population != Scale &&
3242                    I % LT.second.getVectorNumElements() != 0);
3243           Cost += Population > 0;
3244         }
3245         Cost += DemandedElts.countPopulation();
3246 
3247         // For vXf32 cases, insertion into the 0'th index in each v4f32
3248         // 128-bit vector is free.
3249         // NOTE: This assumes legalization widens vXf32 vectors.
3250         if (MScalarTy == MVT::f32)
3251           for (unsigned i = 0, e = cast<FixedVectorType>(Ty)->getNumElements();
3252                i < e; i += 4)
3253             if (DemandedElts[i])
3254               Cost--;
3255       }
3256     } else if (LT.second.isVector()) {
3257       // Without fast insertion, we need to use MOVD/MOVQ to pass each demanded
3258       // integer element as a SCALAR_TO_VECTOR, then we build the vector as a
3259       // series of UNPCK followed by CONCAT_VECTORS - all of these can be
3260       // considered cheap.
3261       if (Ty->isIntOrIntVectorTy())
3262         Cost += DemandedElts.countPopulation();
3263 
3264       // Get the smaller of the legalized or original pow2-extended number of
3265       // vector elements, which represents the number of unpacks we'll end up
3266       // performing.
3267       unsigned NumElts = LT.second.getVectorNumElements();
3268       unsigned Pow2Elts =
3269           PowerOf2Ceil(cast<FixedVectorType>(Ty)->getNumElements());
3270       Cost += (std::min<unsigned>(NumElts, Pow2Elts) - 1) * LT.first;
3271     }
3272   }
3273 
3274   // TODO: Use default extraction for now, but we should investigate extending this
3275   // to handle repeated subvector extraction.
3276   if (Extract)
3277     Cost += BaseT::getScalarizationOverhead(Ty, DemandedElts, false, Extract);
3278 
3279   return Cost;
3280 }
3281 
getMemoryOpCost(unsigned Opcode,Type * Src,MaybeAlign Alignment,unsigned AddressSpace,TTI::TargetCostKind CostKind,const Instruction * I)3282 InstructionCost X86TTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src,
3283                                             MaybeAlign Alignment,
3284                                             unsigned AddressSpace,
3285                                             TTI::TargetCostKind CostKind,
3286                                             const Instruction *I) {
3287   // TODO: Handle other cost kinds.
3288   if (CostKind != TTI::TCK_RecipThroughput) {
3289     if (auto *SI = dyn_cast_or_null<StoreInst>(I)) {
3290       // Store instruction with index and scale costs 2 Uops.
3291       // Check the preceding GEP to identify non-const indices.
3292       if (auto *GEP = dyn_cast<GetElementPtrInst>(SI->getPointerOperand())) {
3293         if (!all_of(GEP->indices(), [](Value *V) { return isa<Constant>(V); }))
3294           return TTI::TCC_Basic * 2;
3295       }
3296     }
3297     return TTI::TCC_Basic;
3298   }
3299 
3300   assert((Opcode == Instruction::Load || Opcode == Instruction::Store) &&
3301          "Invalid Opcode");
3302   // Type legalization can't handle structs
3303   if (TLI->getValueType(DL, Src, true) == MVT::Other)
3304     return BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
3305                                   CostKind);
3306 
3307   // Legalize the type.
3308   std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, Src);
3309 
3310   auto *VTy = dyn_cast<FixedVectorType>(Src);
3311 
3312   // Handle the simple case of non-vectors.
3313   // NOTE: this assumes that legalization never creates vector from scalars!
3314   if (!VTy || !LT.second.isVector())
3315     // Each load/store unit costs 1.
3316     return LT.first * 1;
3317 
3318   bool IsLoad = Opcode == Instruction::Load;
3319 
3320   Type *EltTy = VTy->getElementType();
3321 
3322   const int EltTyBits = DL.getTypeSizeInBits(EltTy);
3323 
3324   InstructionCost Cost = 0;
3325 
3326   // Source of truth: how many elements were there in the original IR vector?
3327   const unsigned SrcNumElt = VTy->getNumElements();
3328 
3329   // How far have we gotten?
3330   int NumEltRemaining = SrcNumElt;
3331   // Note that we intentionally capture by-reference, NumEltRemaining changes.
3332   auto NumEltDone = [&]() { return SrcNumElt - NumEltRemaining; };
3333 
3334   const int MaxLegalOpSizeBytes = divideCeil(LT.second.getSizeInBits(), 8);
3335 
3336   // Note that even if we can store 64 bits of an XMM, we still operate on XMM.
3337   const unsigned XMMBits = 128;
3338   if (XMMBits % EltTyBits != 0)
3339     // Vector size must be a multiple of the element size. I.e. no padding.
3340     return BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
3341                                   CostKind);
3342   const int NumEltPerXMM = XMMBits / EltTyBits;
3343 
3344   auto *XMMVecTy = FixedVectorType::get(EltTy, NumEltPerXMM);
3345 
3346   for (int CurrOpSizeBytes = MaxLegalOpSizeBytes, SubVecEltsLeft = 0;
3347        NumEltRemaining > 0; CurrOpSizeBytes /= 2) {
3348     // How many elements would a single op deal with at once?
3349     if ((8 * CurrOpSizeBytes) % EltTyBits != 0)
3350       // Vector size must be a multiple of the element size. I.e. no padding.
3351       return BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
3352                                     CostKind);
3353     int CurrNumEltPerOp = (8 * CurrOpSizeBytes) / EltTyBits;
3354 
3355     assert(CurrOpSizeBytes > 0 && CurrNumEltPerOp > 0 && "How'd we get here?");
3356     assert((((NumEltRemaining * EltTyBits) < (2 * 8 * CurrOpSizeBytes)) ||
3357             (CurrOpSizeBytes == MaxLegalOpSizeBytes)) &&
3358            "Unless we haven't halved the op size yet, "
3359            "we have less than two op's sized units of work left.");
3360 
3361     auto *CurrVecTy = CurrNumEltPerOp > NumEltPerXMM
3362                           ? FixedVectorType::get(EltTy, CurrNumEltPerOp)
3363                           : XMMVecTy;
3364 
3365     assert(CurrVecTy->getNumElements() % CurrNumEltPerOp == 0 &&
3366            "After halving sizes, the vector elt count is no longer a multiple "
3367            "of number of elements per operation?");
3368     auto *CoalescedVecTy =
3369         CurrNumEltPerOp == 1
3370             ? CurrVecTy
3371             : FixedVectorType::get(
3372                   IntegerType::get(Src->getContext(),
3373                                    EltTyBits * CurrNumEltPerOp),
3374                   CurrVecTy->getNumElements() / CurrNumEltPerOp);
3375     assert(DL.getTypeSizeInBits(CoalescedVecTy) ==
3376                DL.getTypeSizeInBits(CurrVecTy) &&
3377            "coalesciing elements doesn't change vector width.");
3378 
3379     while (NumEltRemaining > 0) {
3380       assert(SubVecEltsLeft >= 0 && "Subreg element count overconsumtion?");
3381 
3382       // Can we use this vector size, as per the remaining element count?
3383       // Iff the vector is naturally aligned, we can do a wide load regardless.
3384       if (NumEltRemaining < CurrNumEltPerOp &&
3385           (!IsLoad || Alignment.valueOrOne() < CurrOpSizeBytes) &&
3386           CurrOpSizeBytes != 1)
3387         break; // Try smalled vector size.
3388 
3389       bool Is0thSubVec = (NumEltDone() % LT.second.getVectorNumElements()) == 0;
3390 
3391       // If we have fully processed the previous reg, we need to replenish it.
3392       if (SubVecEltsLeft == 0) {
3393         SubVecEltsLeft += CurrVecTy->getNumElements();
3394         // And that's free only for the 0'th subvector of a legalized vector.
3395         if (!Is0thSubVec)
3396           Cost += getShuffleCost(IsLoad ? TTI::ShuffleKind::SK_InsertSubvector
3397                                         : TTI::ShuffleKind::SK_ExtractSubvector,
3398                                  VTy, None, NumEltDone(), CurrVecTy);
3399       }
3400 
3401       // While we can directly load/store ZMM, YMM, and 64-bit halves of XMM,
3402       // for smaller widths (32/16/8) we have to insert/extract them separately.
3403       // Again, it's free for the 0'th subreg (if op is 32/64 bit wide,
3404       // but let's pretend that it is also true for 16/8 bit wide ops...)
3405       if (CurrOpSizeBytes <= 32 / 8 && !Is0thSubVec) {
3406         int NumEltDoneInCurrXMM = NumEltDone() % NumEltPerXMM;
3407         assert(NumEltDoneInCurrXMM % CurrNumEltPerOp == 0 && "");
3408         int CoalescedVecEltIdx = NumEltDoneInCurrXMM / CurrNumEltPerOp;
3409         APInt DemandedElts =
3410             APInt::getBitsSet(CoalescedVecTy->getNumElements(),
3411                               CoalescedVecEltIdx, CoalescedVecEltIdx + 1);
3412         assert(DemandedElts.countPopulation() == 1 && "Inserting single value");
3413         Cost += getScalarizationOverhead(CoalescedVecTy, DemandedElts, IsLoad,
3414                                          !IsLoad);
3415       }
3416 
3417       // This isn't exactly right. We're using slow unaligned 32-byte accesses
3418       // as a proxy for a double-pumped AVX memory interface such as on
3419       // Sandybridge.
3420       if (CurrOpSizeBytes == 32 && ST->isUnalignedMem32Slow())
3421         Cost += 2;
3422       else
3423         Cost += 1;
3424 
3425       SubVecEltsLeft -= CurrNumEltPerOp;
3426       NumEltRemaining -= CurrNumEltPerOp;
3427       Alignment = commonAlignment(Alignment.valueOrOne(), CurrOpSizeBytes);
3428     }
3429   }
3430 
3431   assert(NumEltRemaining <= 0 && "Should have processed all the elements.");
3432 
3433   return Cost;
3434 }
3435 
3436 InstructionCost
getMaskedMemoryOpCost(unsigned Opcode,Type * SrcTy,Align Alignment,unsigned AddressSpace,TTI::TargetCostKind CostKind)3437 X86TTIImpl::getMaskedMemoryOpCost(unsigned Opcode, Type *SrcTy, Align Alignment,
3438                                   unsigned AddressSpace,
3439                                   TTI::TargetCostKind CostKind) {
3440   bool IsLoad = (Instruction::Load == Opcode);
3441   bool IsStore = (Instruction::Store == Opcode);
3442 
3443   auto *SrcVTy = dyn_cast<FixedVectorType>(SrcTy);
3444   if (!SrcVTy)
3445     // To calculate scalar take the regular cost, without mask
3446     return getMemoryOpCost(Opcode, SrcTy, Alignment, AddressSpace, CostKind);
3447 
3448   unsigned NumElem = SrcVTy->getNumElements();
3449   auto *MaskTy =
3450       FixedVectorType::get(Type::getInt8Ty(SrcVTy->getContext()), NumElem);
3451   if ((IsLoad && !isLegalMaskedLoad(SrcVTy, Alignment)) ||
3452       (IsStore && !isLegalMaskedStore(SrcVTy, Alignment)) ||
3453       !isPowerOf2_32(NumElem)) {
3454     // Scalarization
3455     APInt DemandedElts = APInt::getAllOnesValue(NumElem);
3456     InstructionCost MaskSplitCost =
3457         getScalarizationOverhead(MaskTy, DemandedElts, false, true);
3458     InstructionCost ScalarCompareCost = getCmpSelInstrCost(
3459         Instruction::ICmp, Type::getInt8Ty(SrcVTy->getContext()), nullptr,
3460         CmpInst::BAD_ICMP_PREDICATE, CostKind);
3461     InstructionCost BranchCost = getCFInstrCost(Instruction::Br, CostKind);
3462     InstructionCost MaskCmpCost = NumElem * (BranchCost + ScalarCompareCost);
3463     InstructionCost ValueSplitCost =
3464         getScalarizationOverhead(SrcVTy, DemandedElts, IsLoad, IsStore);
3465     InstructionCost MemopCost =
3466         NumElem * BaseT::getMemoryOpCost(Opcode, SrcVTy->getScalarType(),
3467                                          Alignment, AddressSpace, CostKind);
3468     return MemopCost + ValueSplitCost + MaskSplitCost + MaskCmpCost;
3469   }
3470 
3471   // Legalize the type.
3472   std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, SrcVTy);
3473   auto VT = TLI->getValueType(DL, SrcVTy);
3474   InstructionCost Cost = 0;
3475   if (VT.isSimple() && LT.second != VT.getSimpleVT() &&
3476       LT.second.getVectorNumElements() == NumElem)
3477     // Promotion requires expand/truncate for data and a shuffle for mask.
3478     Cost += getShuffleCost(TTI::SK_PermuteTwoSrc, SrcVTy, None, 0, nullptr) +
3479             getShuffleCost(TTI::SK_PermuteTwoSrc, MaskTy, None, 0, nullptr);
3480 
3481   else if (LT.second.getVectorNumElements() > NumElem) {
3482     auto *NewMaskTy = FixedVectorType::get(MaskTy->getElementType(),
3483                                            LT.second.getVectorNumElements());
3484     // Expanding requires fill mask with zeroes
3485     Cost += getShuffleCost(TTI::SK_InsertSubvector, NewMaskTy, None, 0, MaskTy);
3486   }
3487 
3488   // Pre-AVX512 - each maskmov load costs 2 + store costs ~8.
3489   if (!ST->hasAVX512())
3490     return Cost + LT.first * (IsLoad ? 2 : 8);
3491 
3492   // AVX-512 masked load/store is cheapper
3493   return Cost + LT.first;
3494 }
3495 
getAddressComputationCost(Type * Ty,ScalarEvolution * SE,const SCEV * Ptr)3496 InstructionCost X86TTIImpl::getAddressComputationCost(Type *Ty,
3497                                                       ScalarEvolution *SE,
3498                                                       const SCEV *Ptr) {
3499   // Address computations in vectorized code with non-consecutive addresses will
3500   // likely result in more instructions compared to scalar code where the
3501   // computation can more often be merged into the index mode. The resulting
3502   // extra micro-ops can significantly decrease throughput.
3503   const unsigned NumVectorInstToHideOverhead = 10;
3504 
3505   // Cost modeling of Strided Access Computation is hidden by the indexing
3506   // modes of X86 regardless of the stride value. We dont believe that there
3507   // is a difference between constant strided access in gerenal and constant
3508   // strided value which is less than or equal to 64.
3509   // Even in the case of (loop invariant) stride whose value is not known at
3510   // compile time, the address computation will not incur more than one extra
3511   // ADD instruction.
3512   if (Ty->isVectorTy() && SE) {
3513     if (!BaseT::isStridedAccess(Ptr))
3514       return NumVectorInstToHideOverhead;
3515     if (!BaseT::getConstantStrideStep(SE, Ptr))
3516       return 1;
3517   }
3518 
3519   return BaseT::getAddressComputationCost(Ty, SE, Ptr);
3520 }
3521 
3522 InstructionCost
getArithmeticReductionCost(unsigned Opcode,VectorType * ValTy,bool IsPairwise,TTI::TargetCostKind CostKind)3523 X86TTIImpl::getArithmeticReductionCost(unsigned Opcode, VectorType *ValTy,
3524                                        bool IsPairwise,
3525                                        TTI::TargetCostKind CostKind) {
3526   // Just use the default implementation for pair reductions.
3527   if (IsPairwise)
3528     return BaseT::getArithmeticReductionCost(Opcode, ValTy, IsPairwise, CostKind);
3529 
3530   // We use the Intel Architecture Code Analyzer(IACA) to measure the throughput
3531   // and make it as the cost.
3532 
3533   static const CostTblEntry SLMCostTblNoPairWise[] = {
3534     { ISD::FADD,  MVT::v2f64,   3 },
3535     { ISD::ADD,   MVT::v2i64,   5 },
3536   };
3537 
3538   static const CostTblEntry SSE2CostTblNoPairWise[] = {
3539     { ISD::FADD,  MVT::v2f64,   2 },
3540     { ISD::FADD,  MVT::v2f32,   2 },
3541     { ISD::FADD,  MVT::v4f32,   4 },
3542     { ISD::ADD,   MVT::v2i64,   2 },      // The data reported by the IACA tool is "1.6".
3543     { ISD::ADD,   MVT::v2i32,   2 }, // FIXME: chosen to be less than v4i32
3544     { ISD::ADD,   MVT::v4i32,   3 },      // The data reported by the IACA tool is "3.3".
3545     { ISD::ADD,   MVT::v2i16,   2 },      // The data reported by the IACA tool is "4.3".
3546     { ISD::ADD,   MVT::v4i16,   3 },      // The data reported by the IACA tool is "4.3".
3547     { ISD::ADD,   MVT::v8i16,   4 },      // The data reported by the IACA tool is "4.3".
3548     { ISD::ADD,   MVT::v2i8,    2 },
3549     { ISD::ADD,   MVT::v4i8,    2 },
3550     { ISD::ADD,   MVT::v8i8,    2 },
3551     { ISD::ADD,   MVT::v16i8,   3 },
3552   };
3553 
3554   static const CostTblEntry AVX1CostTblNoPairWise[] = {
3555     { ISD::FADD,  MVT::v4f64,   3 },
3556     { ISD::FADD,  MVT::v4f32,   3 },
3557     { ISD::FADD,  MVT::v8f32,   4 },
3558     { ISD::ADD,   MVT::v2i64,   1 },      // The data reported by the IACA tool is "1.5".
3559     { ISD::ADD,   MVT::v4i64,   3 },
3560     { ISD::ADD,   MVT::v8i32,   5 },
3561     { ISD::ADD,   MVT::v16i16,  5 },
3562     { ISD::ADD,   MVT::v32i8,   4 },
3563   };
3564 
3565   int ISD = TLI->InstructionOpcodeToISD(Opcode);
3566   assert(ISD && "Invalid opcode");
3567 
3568   // Before legalizing the type, give a chance to look up illegal narrow types
3569   // in the table.
3570   // FIXME: Is there a better way to do this?
3571   EVT VT = TLI->getValueType(DL, ValTy);
3572   if (VT.isSimple()) {
3573     MVT MTy = VT.getSimpleVT();
3574     if (ST->isSLM())
3575       if (const auto *Entry = CostTableLookup(SLMCostTblNoPairWise, ISD, MTy))
3576         return Entry->Cost;
3577 
3578     if (ST->hasAVX())
3579       if (const auto *Entry = CostTableLookup(AVX1CostTblNoPairWise, ISD, MTy))
3580         return Entry->Cost;
3581 
3582     if (ST->hasSSE2())
3583       if (const auto *Entry = CostTableLookup(SSE2CostTblNoPairWise, ISD, MTy))
3584         return Entry->Cost;
3585   }
3586 
3587   std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy);
3588 
3589   MVT MTy = LT.second;
3590 
3591   auto *ValVTy = cast<FixedVectorType>(ValTy);
3592 
3593   // Special case: vXi8 mul reductions are performed as vXi16.
3594   if (ISD == ISD::MUL && MTy.getScalarType() == MVT::i8) {
3595     auto *WideSclTy = IntegerType::get(ValVTy->getContext(), 16);
3596     auto *WideVecTy = FixedVectorType::get(WideSclTy, ValVTy->getNumElements());
3597     return getCastInstrCost(Instruction::ZExt, WideVecTy, ValTy,
3598                             TargetTransformInfo::CastContextHint::None,
3599                             CostKind) +
3600            getArithmeticReductionCost(Opcode, WideVecTy, IsPairwise, CostKind);
3601   }
3602 
3603   InstructionCost ArithmeticCost = 0;
3604   if (LT.first != 1 && MTy.isVector() &&
3605       MTy.getVectorNumElements() < ValVTy->getNumElements()) {
3606     // Type needs to be split. We need LT.first - 1 arithmetic ops.
3607     auto *SingleOpTy = FixedVectorType::get(ValVTy->getElementType(),
3608                                             MTy.getVectorNumElements());
3609     ArithmeticCost = getArithmeticInstrCost(Opcode, SingleOpTy, CostKind);
3610     ArithmeticCost *= LT.first - 1;
3611   }
3612 
3613   if (ST->isSLM())
3614     if (const auto *Entry = CostTableLookup(SLMCostTblNoPairWise, ISD, MTy))
3615       return ArithmeticCost + Entry->Cost;
3616 
3617   if (ST->hasAVX())
3618     if (const auto *Entry = CostTableLookup(AVX1CostTblNoPairWise, ISD, MTy))
3619       return ArithmeticCost + Entry->Cost;
3620 
3621   if (ST->hasSSE2())
3622     if (const auto *Entry = CostTableLookup(SSE2CostTblNoPairWise, ISD, MTy))
3623       return ArithmeticCost + Entry->Cost;
3624 
3625   // FIXME: These assume a naive kshift+binop lowering, which is probably
3626   // conservative in most cases.
3627   static const CostTblEntry AVX512BoolReduction[] = {
3628     { ISD::AND,  MVT::v2i1,   3 },
3629     { ISD::AND,  MVT::v4i1,   5 },
3630     { ISD::AND,  MVT::v8i1,   7 },
3631     { ISD::AND,  MVT::v16i1,  9 },
3632     { ISD::AND,  MVT::v32i1, 11 },
3633     { ISD::AND,  MVT::v64i1, 13 },
3634     { ISD::OR,   MVT::v2i1,   3 },
3635     { ISD::OR,   MVT::v4i1,   5 },
3636     { ISD::OR,   MVT::v8i1,   7 },
3637     { ISD::OR,   MVT::v16i1,  9 },
3638     { ISD::OR,   MVT::v32i1, 11 },
3639     { ISD::OR,   MVT::v64i1, 13 },
3640   };
3641 
3642   static const CostTblEntry AVX2BoolReduction[] = {
3643     { ISD::AND,  MVT::v16i16,  2 }, // vpmovmskb + cmp
3644     { ISD::AND,  MVT::v32i8,   2 }, // vpmovmskb + cmp
3645     { ISD::OR,   MVT::v16i16,  2 }, // vpmovmskb + cmp
3646     { ISD::OR,   MVT::v32i8,   2 }, // vpmovmskb + cmp
3647   };
3648 
3649   static const CostTblEntry AVX1BoolReduction[] = {
3650     { ISD::AND,  MVT::v4i64,   2 }, // vmovmskpd + cmp
3651     { ISD::AND,  MVT::v8i32,   2 }, // vmovmskps + cmp
3652     { ISD::AND,  MVT::v16i16,  4 }, // vextractf128 + vpand + vpmovmskb + cmp
3653     { ISD::AND,  MVT::v32i8,   4 }, // vextractf128 + vpand + vpmovmskb + cmp
3654     { ISD::OR,   MVT::v4i64,   2 }, // vmovmskpd + cmp
3655     { ISD::OR,   MVT::v8i32,   2 }, // vmovmskps + cmp
3656     { ISD::OR,   MVT::v16i16,  4 }, // vextractf128 + vpor + vpmovmskb + cmp
3657     { ISD::OR,   MVT::v32i8,   4 }, // vextractf128 + vpor + vpmovmskb + cmp
3658   };
3659 
3660   static const CostTblEntry SSE2BoolReduction[] = {
3661     { ISD::AND,  MVT::v2i64,   2 }, // movmskpd + cmp
3662     { ISD::AND,  MVT::v4i32,   2 }, // movmskps + cmp
3663     { ISD::AND,  MVT::v8i16,   2 }, // pmovmskb + cmp
3664     { ISD::AND,  MVT::v16i8,   2 }, // pmovmskb + cmp
3665     { ISD::OR,   MVT::v2i64,   2 }, // movmskpd + cmp
3666     { ISD::OR,   MVT::v4i32,   2 }, // movmskps + cmp
3667     { ISD::OR,   MVT::v8i16,   2 }, // pmovmskb + cmp
3668     { ISD::OR,   MVT::v16i8,   2 }, // pmovmskb + cmp
3669   };
3670 
3671   // Handle bool allof/anyof patterns.
3672   if (ValVTy->getElementType()->isIntegerTy(1)) {
3673     InstructionCost ArithmeticCost = 0;
3674     if (LT.first != 1 && MTy.isVector() &&
3675         MTy.getVectorNumElements() < ValVTy->getNumElements()) {
3676       // Type needs to be split. We need LT.first - 1 arithmetic ops.
3677       auto *SingleOpTy = FixedVectorType::get(ValVTy->getElementType(),
3678                                               MTy.getVectorNumElements());
3679       ArithmeticCost = getArithmeticInstrCost(Opcode, SingleOpTy, CostKind);
3680       ArithmeticCost *= LT.first - 1;
3681     }
3682 
3683     if (ST->hasAVX512())
3684       if (const auto *Entry = CostTableLookup(AVX512BoolReduction, ISD, MTy))
3685         return ArithmeticCost + Entry->Cost;
3686     if (ST->hasAVX2())
3687       if (const auto *Entry = CostTableLookup(AVX2BoolReduction, ISD, MTy))
3688         return ArithmeticCost + Entry->Cost;
3689     if (ST->hasAVX())
3690       if (const auto *Entry = CostTableLookup(AVX1BoolReduction, ISD, MTy))
3691         return ArithmeticCost + Entry->Cost;
3692     if (ST->hasSSE2())
3693       if (const auto *Entry = CostTableLookup(SSE2BoolReduction, ISD, MTy))
3694         return ArithmeticCost + Entry->Cost;
3695 
3696     return BaseT::getArithmeticReductionCost(Opcode, ValVTy, IsPairwise,
3697                                              CostKind);
3698   }
3699 
3700   unsigned NumVecElts = ValVTy->getNumElements();
3701   unsigned ScalarSize = ValVTy->getScalarSizeInBits();
3702 
3703   // Special case power of 2 reductions where the scalar type isn't changed
3704   // by type legalization.
3705   if (!isPowerOf2_32(NumVecElts) || ScalarSize != MTy.getScalarSizeInBits())
3706     return BaseT::getArithmeticReductionCost(Opcode, ValVTy, IsPairwise,
3707                                              CostKind);
3708 
3709   InstructionCost ReductionCost = 0;
3710 
3711   auto *Ty = ValVTy;
3712   if (LT.first != 1 && MTy.isVector() &&
3713       MTy.getVectorNumElements() < ValVTy->getNumElements()) {
3714     // Type needs to be split. We need LT.first - 1 arithmetic ops.
3715     Ty = FixedVectorType::get(ValVTy->getElementType(),
3716                               MTy.getVectorNumElements());
3717     ReductionCost = getArithmeticInstrCost(Opcode, Ty, CostKind);
3718     ReductionCost *= LT.first - 1;
3719     NumVecElts = MTy.getVectorNumElements();
3720   }
3721 
3722   // Now handle reduction with the legal type, taking into account size changes
3723   // at each level.
3724   while (NumVecElts > 1) {
3725     // Determine the size of the remaining vector we need to reduce.
3726     unsigned Size = NumVecElts * ScalarSize;
3727     NumVecElts /= 2;
3728     // If we're reducing from 256/512 bits, use an extract_subvector.
3729     if (Size > 128) {
3730       auto *SubTy = FixedVectorType::get(ValVTy->getElementType(), NumVecElts);
3731       ReductionCost +=
3732           getShuffleCost(TTI::SK_ExtractSubvector, Ty, None, NumVecElts, SubTy);
3733       Ty = SubTy;
3734     } else if (Size == 128) {
3735       // Reducing from 128 bits is a permute of v2f64/v2i64.
3736       FixedVectorType *ShufTy;
3737       if (ValVTy->isFloatingPointTy())
3738         ShufTy =
3739             FixedVectorType::get(Type::getDoubleTy(ValVTy->getContext()), 2);
3740       else
3741         ShufTy =
3742             FixedVectorType::get(Type::getInt64Ty(ValVTy->getContext()), 2);
3743       ReductionCost +=
3744           getShuffleCost(TTI::SK_PermuteSingleSrc, ShufTy, None, 0, nullptr);
3745     } else if (Size == 64) {
3746       // Reducing from 64 bits is a shuffle of v4f32/v4i32.
3747       FixedVectorType *ShufTy;
3748       if (ValVTy->isFloatingPointTy())
3749         ShufTy =
3750             FixedVectorType::get(Type::getFloatTy(ValVTy->getContext()), 4);
3751       else
3752         ShufTy =
3753             FixedVectorType::get(Type::getInt32Ty(ValVTy->getContext()), 4);
3754       ReductionCost +=
3755           getShuffleCost(TTI::SK_PermuteSingleSrc, ShufTy, None, 0, nullptr);
3756     } else {
3757       // Reducing from smaller size is a shift by immediate.
3758       auto *ShiftTy = FixedVectorType::get(
3759           Type::getIntNTy(ValVTy->getContext(), Size), 128 / Size);
3760       ReductionCost += getArithmeticInstrCost(
3761           Instruction::LShr, ShiftTy, CostKind,
3762           TargetTransformInfo::OK_AnyValue,
3763           TargetTransformInfo::OK_UniformConstantValue,
3764           TargetTransformInfo::OP_None, TargetTransformInfo::OP_None);
3765     }
3766 
3767     // Add the arithmetic op for this level.
3768     ReductionCost += getArithmeticInstrCost(Opcode, Ty, CostKind);
3769   }
3770 
3771   // Add the final extract element to the cost.
3772   return ReductionCost + getVectorInstrCost(Instruction::ExtractElement, Ty, 0);
3773 }
3774 
getMinMaxCost(Type * Ty,Type * CondTy,bool IsUnsigned)3775 InstructionCost X86TTIImpl::getMinMaxCost(Type *Ty, Type *CondTy,
3776                                           bool IsUnsigned) {
3777   std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
3778 
3779   MVT MTy = LT.second;
3780 
3781   int ISD;
3782   if (Ty->isIntOrIntVectorTy()) {
3783     ISD = IsUnsigned ? ISD::UMIN : ISD::SMIN;
3784   } else {
3785     assert(Ty->isFPOrFPVectorTy() &&
3786            "Expected float point or integer vector type.");
3787     ISD = ISD::FMINNUM;
3788   }
3789 
3790   static const CostTblEntry SSE1CostTbl[] = {
3791     {ISD::FMINNUM, MVT::v4f32, 1},
3792   };
3793 
3794   static const CostTblEntry SSE2CostTbl[] = {
3795     {ISD::FMINNUM, MVT::v2f64, 1},
3796     {ISD::SMIN,    MVT::v8i16, 1},
3797     {ISD::UMIN,    MVT::v16i8, 1},
3798   };
3799 
3800   static const CostTblEntry SSE41CostTbl[] = {
3801     {ISD::SMIN,    MVT::v4i32, 1},
3802     {ISD::UMIN,    MVT::v4i32, 1},
3803     {ISD::UMIN,    MVT::v8i16, 1},
3804     {ISD::SMIN,    MVT::v16i8, 1},
3805   };
3806 
3807   static const CostTblEntry SSE42CostTbl[] = {
3808     {ISD::UMIN,    MVT::v2i64, 3}, // xor+pcmpgtq+blendvpd
3809   };
3810 
3811   static const CostTblEntry AVX1CostTbl[] = {
3812     {ISD::FMINNUM, MVT::v8f32,  1},
3813     {ISD::FMINNUM, MVT::v4f64,  1},
3814     {ISD::SMIN,    MVT::v8i32,  3},
3815     {ISD::UMIN,    MVT::v8i32,  3},
3816     {ISD::SMIN,    MVT::v16i16, 3},
3817     {ISD::UMIN,    MVT::v16i16, 3},
3818     {ISD::SMIN,    MVT::v32i8,  3},
3819     {ISD::UMIN,    MVT::v32i8,  3},
3820   };
3821 
3822   static const CostTblEntry AVX2CostTbl[] = {
3823     {ISD::SMIN,    MVT::v8i32,  1},
3824     {ISD::UMIN,    MVT::v8i32,  1},
3825     {ISD::SMIN,    MVT::v16i16, 1},
3826     {ISD::UMIN,    MVT::v16i16, 1},
3827     {ISD::SMIN,    MVT::v32i8,  1},
3828     {ISD::UMIN,    MVT::v32i8,  1},
3829   };
3830 
3831   static const CostTblEntry AVX512CostTbl[] = {
3832     {ISD::FMINNUM, MVT::v16f32, 1},
3833     {ISD::FMINNUM, MVT::v8f64,  1},
3834     {ISD::SMIN,    MVT::v2i64,  1},
3835     {ISD::UMIN,    MVT::v2i64,  1},
3836     {ISD::SMIN,    MVT::v4i64,  1},
3837     {ISD::UMIN,    MVT::v4i64,  1},
3838     {ISD::SMIN,    MVT::v8i64,  1},
3839     {ISD::UMIN,    MVT::v8i64,  1},
3840     {ISD::SMIN,    MVT::v16i32, 1},
3841     {ISD::UMIN,    MVT::v16i32, 1},
3842   };
3843 
3844   static const CostTblEntry AVX512BWCostTbl[] = {
3845     {ISD::SMIN,    MVT::v32i16, 1},
3846     {ISD::UMIN,    MVT::v32i16, 1},
3847     {ISD::SMIN,    MVT::v64i8,  1},
3848     {ISD::UMIN,    MVT::v64i8,  1},
3849   };
3850 
3851   // If we have a native MIN/MAX instruction for this type, use it.
3852   if (ST->hasBWI())
3853     if (const auto *Entry = CostTableLookup(AVX512BWCostTbl, ISD, MTy))
3854       return LT.first * Entry->Cost;
3855 
3856   if (ST->hasAVX512())
3857     if (const auto *Entry = CostTableLookup(AVX512CostTbl, ISD, MTy))
3858       return LT.first * Entry->Cost;
3859 
3860   if (ST->hasAVX2())
3861     if (const auto *Entry = CostTableLookup(AVX2CostTbl, ISD, MTy))
3862       return LT.first * Entry->Cost;
3863 
3864   if (ST->hasAVX())
3865     if (const auto *Entry = CostTableLookup(AVX1CostTbl, ISD, MTy))
3866       return LT.first * Entry->Cost;
3867 
3868   if (ST->hasSSE42())
3869     if (const auto *Entry = CostTableLookup(SSE42CostTbl, ISD, MTy))
3870       return LT.first * Entry->Cost;
3871 
3872   if (ST->hasSSE41())
3873     if (const auto *Entry = CostTableLookup(SSE41CostTbl, ISD, MTy))
3874       return LT.first * Entry->Cost;
3875 
3876   if (ST->hasSSE2())
3877     if (const auto *Entry = CostTableLookup(SSE2CostTbl, ISD, MTy))
3878       return LT.first * Entry->Cost;
3879 
3880   if (ST->hasSSE1())
3881     if (const auto *Entry = CostTableLookup(SSE1CostTbl, ISD, MTy))
3882       return LT.first * Entry->Cost;
3883 
3884   unsigned CmpOpcode;
3885   if (Ty->isFPOrFPVectorTy()) {
3886     CmpOpcode = Instruction::FCmp;
3887   } else {
3888     assert(Ty->isIntOrIntVectorTy() &&
3889            "expecting floating point or integer type for min/max reduction");
3890     CmpOpcode = Instruction::ICmp;
3891   }
3892 
3893   TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
3894   // Otherwise fall back to cmp+select.
3895   InstructionCost Result =
3896       getCmpSelInstrCost(CmpOpcode, Ty, CondTy, CmpInst::BAD_ICMP_PREDICATE,
3897                          CostKind) +
3898       getCmpSelInstrCost(Instruction::Select, Ty, CondTy,
3899                          CmpInst::BAD_ICMP_PREDICATE, CostKind);
3900   return Result;
3901 }
3902 
3903 InstructionCost
getMinMaxReductionCost(VectorType * ValTy,VectorType * CondTy,bool IsPairwise,bool IsUnsigned,TTI::TargetCostKind CostKind)3904 X86TTIImpl::getMinMaxReductionCost(VectorType *ValTy, VectorType *CondTy,
3905                                    bool IsPairwise, bool IsUnsigned,
3906                                    TTI::TargetCostKind CostKind) {
3907   // Just use the default implementation for pair reductions.
3908   if (IsPairwise)
3909     return BaseT::getMinMaxReductionCost(ValTy, CondTy, IsPairwise, IsUnsigned,
3910                                          CostKind);
3911 
3912   std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy);
3913 
3914   MVT MTy = LT.second;
3915 
3916   int ISD;
3917   if (ValTy->isIntOrIntVectorTy()) {
3918     ISD = IsUnsigned ? ISD::UMIN : ISD::SMIN;
3919   } else {
3920     assert(ValTy->isFPOrFPVectorTy() &&
3921            "Expected float point or integer vector type.");
3922     ISD = ISD::FMINNUM;
3923   }
3924 
3925   // We use the Intel Architecture Code Analyzer(IACA) to measure the throughput
3926   // and make it as the cost.
3927 
3928   static const CostTblEntry SSE2CostTblNoPairWise[] = {
3929       {ISD::UMIN, MVT::v2i16, 5}, // need pxors to use pminsw/pmaxsw
3930       {ISD::UMIN, MVT::v4i16, 7}, // need pxors to use pminsw/pmaxsw
3931       {ISD::UMIN, MVT::v8i16, 9}, // need pxors to use pminsw/pmaxsw
3932   };
3933 
3934   static const CostTblEntry SSE41CostTblNoPairWise[] = {
3935       {ISD::SMIN, MVT::v2i16, 3}, // same as sse2
3936       {ISD::SMIN, MVT::v4i16, 5}, // same as sse2
3937       {ISD::UMIN, MVT::v2i16, 5}, // same as sse2
3938       {ISD::UMIN, MVT::v4i16, 7}, // same as sse2
3939       {ISD::SMIN, MVT::v8i16, 4}, // phminposuw+xor
3940       {ISD::UMIN, MVT::v8i16, 4}, // FIXME: umin is cheaper than umax
3941       {ISD::SMIN, MVT::v2i8,  3}, // pminsb
3942       {ISD::SMIN, MVT::v4i8,  5}, // pminsb
3943       {ISD::SMIN, MVT::v8i8,  7}, // pminsb
3944       {ISD::SMIN, MVT::v16i8, 6},
3945       {ISD::UMIN, MVT::v2i8,  3}, // same as sse2
3946       {ISD::UMIN, MVT::v4i8,  5}, // same as sse2
3947       {ISD::UMIN, MVT::v8i8,  7}, // same as sse2
3948       {ISD::UMIN, MVT::v16i8, 6}, // FIXME: umin is cheaper than umax
3949   };
3950 
3951   static const CostTblEntry AVX1CostTblNoPairWise[] = {
3952       {ISD::SMIN, MVT::v16i16, 6},
3953       {ISD::UMIN, MVT::v16i16, 6}, // FIXME: umin is cheaper than umax
3954       {ISD::SMIN, MVT::v32i8, 8},
3955       {ISD::UMIN, MVT::v32i8, 8},
3956   };
3957 
3958   static const CostTblEntry AVX512BWCostTblNoPairWise[] = {
3959       {ISD::SMIN, MVT::v32i16, 8},
3960       {ISD::UMIN, MVT::v32i16, 8}, // FIXME: umin is cheaper than umax
3961       {ISD::SMIN, MVT::v64i8, 10},
3962       {ISD::UMIN, MVT::v64i8, 10},
3963   };
3964 
3965   // Before legalizing the type, give a chance to look up illegal narrow types
3966   // in the table.
3967   // FIXME: Is there a better way to do this?
3968   EVT VT = TLI->getValueType(DL, ValTy);
3969   if (VT.isSimple()) {
3970     MVT MTy = VT.getSimpleVT();
3971     if (ST->hasBWI())
3972       if (const auto *Entry = CostTableLookup(AVX512BWCostTblNoPairWise, ISD, MTy))
3973         return Entry->Cost;
3974 
3975     if (ST->hasAVX())
3976       if (const auto *Entry = CostTableLookup(AVX1CostTblNoPairWise, ISD, MTy))
3977         return Entry->Cost;
3978 
3979     if (ST->hasSSE41())
3980       if (const auto *Entry = CostTableLookup(SSE41CostTblNoPairWise, ISD, MTy))
3981         return Entry->Cost;
3982 
3983     if (ST->hasSSE2())
3984       if (const auto *Entry = CostTableLookup(SSE2CostTblNoPairWise, ISD, MTy))
3985         return Entry->Cost;
3986   }
3987 
3988   auto *ValVTy = cast<FixedVectorType>(ValTy);
3989   unsigned NumVecElts = ValVTy->getNumElements();
3990 
3991   auto *Ty = ValVTy;
3992   InstructionCost MinMaxCost = 0;
3993   if (LT.first != 1 && MTy.isVector() &&
3994       MTy.getVectorNumElements() < ValVTy->getNumElements()) {
3995     // Type needs to be split. We need LT.first - 1 operations ops.
3996     Ty = FixedVectorType::get(ValVTy->getElementType(),
3997                               MTy.getVectorNumElements());
3998     auto *SubCondTy = FixedVectorType::get(CondTy->getElementType(),
3999                                            MTy.getVectorNumElements());
4000     MinMaxCost = getMinMaxCost(Ty, SubCondTy, IsUnsigned);
4001     MinMaxCost *= LT.first - 1;
4002     NumVecElts = MTy.getVectorNumElements();
4003   }
4004 
4005   if (ST->hasBWI())
4006     if (const auto *Entry = CostTableLookup(AVX512BWCostTblNoPairWise, ISD, MTy))
4007       return MinMaxCost + Entry->Cost;
4008 
4009   if (ST->hasAVX())
4010     if (const auto *Entry = CostTableLookup(AVX1CostTblNoPairWise, ISD, MTy))
4011       return MinMaxCost + Entry->Cost;
4012 
4013   if (ST->hasSSE41())
4014     if (const auto *Entry = CostTableLookup(SSE41CostTblNoPairWise, ISD, MTy))
4015       return MinMaxCost + Entry->Cost;
4016 
4017   if (ST->hasSSE2())
4018     if (const auto *Entry = CostTableLookup(SSE2CostTblNoPairWise, ISD, MTy))
4019       return MinMaxCost + Entry->Cost;
4020 
4021   unsigned ScalarSize = ValTy->getScalarSizeInBits();
4022 
4023   // Special case power of 2 reductions where the scalar type isn't changed
4024   // by type legalization.
4025   if (!isPowerOf2_32(ValVTy->getNumElements()) ||
4026       ScalarSize != MTy.getScalarSizeInBits())
4027     return BaseT::getMinMaxReductionCost(ValTy, CondTy, IsPairwise, IsUnsigned,
4028                                          CostKind);
4029 
4030   // Now handle reduction with the legal type, taking into account size changes
4031   // at each level.
4032   while (NumVecElts > 1) {
4033     // Determine the size of the remaining vector we need to reduce.
4034     unsigned Size = NumVecElts * ScalarSize;
4035     NumVecElts /= 2;
4036     // If we're reducing from 256/512 bits, use an extract_subvector.
4037     if (Size > 128) {
4038       auto *SubTy = FixedVectorType::get(ValVTy->getElementType(), NumVecElts);
4039       MinMaxCost +=
4040           getShuffleCost(TTI::SK_ExtractSubvector, Ty, None, NumVecElts, SubTy);
4041       Ty = SubTy;
4042     } else if (Size == 128) {
4043       // Reducing from 128 bits is a permute of v2f64/v2i64.
4044       VectorType *ShufTy;
4045       if (ValTy->isFloatingPointTy())
4046         ShufTy =
4047             FixedVectorType::get(Type::getDoubleTy(ValTy->getContext()), 2);
4048       else
4049         ShufTy = FixedVectorType::get(Type::getInt64Ty(ValTy->getContext()), 2);
4050       MinMaxCost +=
4051           getShuffleCost(TTI::SK_PermuteSingleSrc, ShufTy, None, 0, nullptr);
4052     } else if (Size == 64) {
4053       // Reducing from 64 bits is a shuffle of v4f32/v4i32.
4054       FixedVectorType *ShufTy;
4055       if (ValTy->isFloatingPointTy())
4056         ShufTy = FixedVectorType::get(Type::getFloatTy(ValTy->getContext()), 4);
4057       else
4058         ShufTy = FixedVectorType::get(Type::getInt32Ty(ValTy->getContext()), 4);
4059       MinMaxCost +=
4060           getShuffleCost(TTI::SK_PermuteSingleSrc, ShufTy, None, 0, nullptr);
4061     } else {
4062       // Reducing from smaller size is a shift by immediate.
4063       auto *ShiftTy = FixedVectorType::get(
4064           Type::getIntNTy(ValTy->getContext(), Size), 128 / Size);
4065       MinMaxCost += getArithmeticInstrCost(
4066           Instruction::LShr, ShiftTy, TTI::TCK_RecipThroughput,
4067           TargetTransformInfo::OK_AnyValue,
4068           TargetTransformInfo::OK_UniformConstantValue,
4069           TargetTransformInfo::OP_None, TargetTransformInfo::OP_None);
4070     }
4071 
4072     // Add the arithmetic op for this level.
4073     auto *SubCondTy =
4074         FixedVectorType::get(CondTy->getElementType(), Ty->getNumElements());
4075     MinMaxCost += getMinMaxCost(Ty, SubCondTy, IsUnsigned);
4076   }
4077 
4078   // Add the final extract element to the cost.
4079   return MinMaxCost + getVectorInstrCost(Instruction::ExtractElement, Ty, 0);
4080 }
4081 
4082 /// Calculate the cost of materializing a 64-bit value. This helper
4083 /// method might only calculate a fraction of a larger immediate. Therefore it
4084 /// is valid to return a cost of ZERO.
getIntImmCost(int64_t Val)4085 InstructionCost X86TTIImpl::getIntImmCost(int64_t Val) {
4086   if (Val == 0)
4087     return TTI::TCC_Free;
4088 
4089   if (isInt<32>(Val))
4090     return TTI::TCC_Basic;
4091 
4092   return 2 * TTI::TCC_Basic;
4093 }
4094 
getIntImmCost(const APInt & Imm,Type * Ty,TTI::TargetCostKind CostKind)4095 InstructionCost X86TTIImpl::getIntImmCost(const APInt &Imm, Type *Ty,
4096                                           TTI::TargetCostKind CostKind) {
4097   assert(Ty->isIntegerTy());
4098 
4099   unsigned BitSize = Ty->getPrimitiveSizeInBits();
4100   if (BitSize == 0)
4101     return ~0U;
4102 
4103   // Never hoist constants larger than 128bit, because this might lead to
4104   // incorrect code generation or assertions in codegen.
4105   // Fixme: Create a cost model for types larger than i128 once the codegen
4106   // issues have been fixed.
4107   if (BitSize > 128)
4108     return TTI::TCC_Free;
4109 
4110   if (Imm == 0)
4111     return TTI::TCC_Free;
4112 
4113   // Sign-extend all constants to a multiple of 64-bit.
4114   APInt ImmVal = Imm;
4115   if (BitSize % 64 != 0)
4116     ImmVal = Imm.sext(alignTo(BitSize, 64));
4117 
4118   // Split the constant into 64-bit chunks and calculate the cost for each
4119   // chunk.
4120   InstructionCost Cost = 0;
4121   for (unsigned ShiftVal = 0; ShiftVal < BitSize; ShiftVal += 64) {
4122     APInt Tmp = ImmVal.ashr(ShiftVal).sextOrTrunc(64);
4123     int64_t Val = Tmp.getSExtValue();
4124     Cost += getIntImmCost(Val);
4125   }
4126   // We need at least one instruction to materialize the constant.
4127   return std::max<InstructionCost>(1, Cost);
4128 }
4129 
getIntImmCostInst(unsigned Opcode,unsigned Idx,const APInt & Imm,Type * Ty,TTI::TargetCostKind CostKind,Instruction * Inst)4130 InstructionCost X86TTIImpl::getIntImmCostInst(unsigned Opcode, unsigned Idx,
4131                                               const APInt &Imm, Type *Ty,
4132                                               TTI::TargetCostKind CostKind,
4133                                               Instruction *Inst) {
4134   assert(Ty->isIntegerTy());
4135 
4136   unsigned BitSize = Ty->getPrimitiveSizeInBits();
4137   // There is no cost model for constants with a bit size of 0. Return TCC_Free
4138   // here, so that constant hoisting will ignore this constant.
4139   if (BitSize == 0)
4140     return TTI::TCC_Free;
4141 
4142   unsigned ImmIdx = ~0U;
4143   switch (Opcode) {
4144   default:
4145     return TTI::TCC_Free;
4146   case Instruction::GetElementPtr:
4147     // Always hoist the base address of a GetElementPtr. This prevents the
4148     // creation of new constants for every base constant that gets constant
4149     // folded with the offset.
4150     if (Idx == 0)
4151       return 2 * TTI::TCC_Basic;
4152     return TTI::TCC_Free;
4153   case Instruction::Store:
4154     ImmIdx = 0;
4155     break;
4156   case Instruction::ICmp:
4157     // This is an imperfect hack to prevent constant hoisting of
4158     // compares that might be trying to check if a 64-bit value fits in
4159     // 32-bits. The backend can optimize these cases using a right shift by 32.
4160     // Ideally we would check the compare predicate here. There also other
4161     // similar immediates the backend can use shifts for.
4162     if (Idx == 1 && Imm.getBitWidth() == 64) {
4163       uint64_t ImmVal = Imm.getZExtValue();
4164       if (ImmVal == 0x100000000ULL || ImmVal == 0xffffffff)
4165         return TTI::TCC_Free;
4166     }
4167     ImmIdx = 1;
4168     break;
4169   case Instruction::And:
4170     // We support 64-bit ANDs with immediates with 32-bits of leading zeroes
4171     // by using a 32-bit operation with implicit zero extension. Detect such
4172     // immediates here as the normal path expects bit 31 to be sign extended.
4173     if (Idx == 1 && Imm.getBitWidth() == 64 && isUInt<32>(Imm.getZExtValue()))
4174       return TTI::TCC_Free;
4175     ImmIdx = 1;
4176     break;
4177   case Instruction::Add:
4178   case Instruction::Sub:
4179     // For add/sub, we can use the opposite instruction for INT32_MIN.
4180     if (Idx == 1 && Imm.getBitWidth() == 64 && Imm.getZExtValue() == 0x80000000)
4181       return TTI::TCC_Free;
4182     ImmIdx = 1;
4183     break;
4184   case Instruction::UDiv:
4185   case Instruction::SDiv:
4186   case Instruction::URem:
4187   case Instruction::SRem:
4188     // Division by constant is typically expanded later into a different
4189     // instruction sequence. This completely changes the constants.
4190     // Report them as "free" to stop ConstantHoist from marking them as opaque.
4191     return TTI::TCC_Free;
4192   case Instruction::Mul:
4193   case Instruction::Or:
4194   case Instruction::Xor:
4195     ImmIdx = 1;
4196     break;
4197   // Always return TCC_Free for the shift value of a shift instruction.
4198   case Instruction::Shl:
4199   case Instruction::LShr:
4200   case Instruction::AShr:
4201     if (Idx == 1)
4202       return TTI::TCC_Free;
4203     break;
4204   case Instruction::Trunc:
4205   case Instruction::ZExt:
4206   case Instruction::SExt:
4207   case Instruction::IntToPtr:
4208   case Instruction::PtrToInt:
4209   case Instruction::BitCast:
4210   case Instruction::PHI:
4211   case Instruction::Call:
4212   case Instruction::Select:
4213   case Instruction::Ret:
4214   case Instruction::Load:
4215     break;
4216   }
4217 
4218   if (Idx == ImmIdx) {
4219     int NumConstants = divideCeil(BitSize, 64);
4220     InstructionCost Cost = X86TTIImpl::getIntImmCost(Imm, Ty, CostKind);
4221     return (Cost <= NumConstants * TTI::TCC_Basic)
4222                ? static_cast<int>(TTI::TCC_Free)
4223                : Cost;
4224   }
4225 
4226   return X86TTIImpl::getIntImmCost(Imm, Ty, CostKind);
4227 }
4228 
getIntImmCostIntrin(Intrinsic::ID IID,unsigned Idx,const APInt & Imm,Type * Ty,TTI::TargetCostKind CostKind)4229 InstructionCost X86TTIImpl::getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx,
4230                                                 const APInt &Imm, Type *Ty,
4231                                                 TTI::TargetCostKind CostKind) {
4232   assert(Ty->isIntegerTy());
4233 
4234   unsigned BitSize = Ty->getPrimitiveSizeInBits();
4235   // There is no cost model for constants with a bit size of 0. Return TCC_Free
4236   // here, so that constant hoisting will ignore this constant.
4237   if (BitSize == 0)
4238     return TTI::TCC_Free;
4239 
4240   switch (IID) {
4241   default:
4242     return TTI::TCC_Free;
4243   case Intrinsic::sadd_with_overflow:
4244   case Intrinsic::uadd_with_overflow:
4245   case Intrinsic::ssub_with_overflow:
4246   case Intrinsic::usub_with_overflow:
4247   case Intrinsic::smul_with_overflow:
4248   case Intrinsic::umul_with_overflow:
4249     if ((Idx == 1) && Imm.getBitWidth() <= 64 && isInt<32>(Imm.getSExtValue()))
4250       return TTI::TCC_Free;
4251     break;
4252   case Intrinsic::experimental_stackmap:
4253     if ((Idx < 2) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
4254       return TTI::TCC_Free;
4255     break;
4256   case Intrinsic::experimental_patchpoint_void:
4257   case Intrinsic::experimental_patchpoint_i64:
4258     if ((Idx < 4) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
4259       return TTI::TCC_Free;
4260     break;
4261   }
4262   return X86TTIImpl::getIntImmCost(Imm, Ty, CostKind);
4263 }
4264 
getCFInstrCost(unsigned Opcode,TTI::TargetCostKind CostKind,const Instruction * I)4265 InstructionCost X86TTIImpl::getCFInstrCost(unsigned Opcode,
4266                                            TTI::TargetCostKind CostKind,
4267                                            const Instruction *I) {
4268   if (CostKind != TTI::TCK_RecipThroughput)
4269     return Opcode == Instruction::PHI ? 0 : 1;
4270   // Branches are assumed to be predicted.
4271   return 0;
4272 }
4273 
getGatherOverhead() const4274 int X86TTIImpl::getGatherOverhead() const {
4275   // Some CPUs have more overhead for gather. The specified overhead is relative
4276   // to the Load operation. "2" is the number provided by Intel architects. This
4277   // parameter is used for cost estimation of Gather Op and comparison with
4278   // other alternatives.
4279   // TODO: Remove the explicit hasAVX512()?, That would mean we would only
4280   // enable gather with a -march.
4281   if (ST->hasAVX512() || (ST->hasAVX2() && ST->hasFastGather()))
4282     return 2;
4283 
4284   return 1024;
4285 }
4286 
getScatterOverhead() const4287 int X86TTIImpl::getScatterOverhead() const {
4288   if (ST->hasAVX512())
4289     return 2;
4290 
4291   return 1024;
4292 }
4293 
4294 // Return an average cost of Gather / Scatter instruction, maybe improved later.
4295 // FIXME: Add TargetCostKind support.
getGSVectorCost(unsigned Opcode,Type * SrcVTy,const Value * Ptr,Align Alignment,unsigned AddressSpace)4296 InstructionCost X86TTIImpl::getGSVectorCost(unsigned Opcode, Type *SrcVTy,
4297                                             const Value *Ptr, Align Alignment,
4298                                             unsigned AddressSpace) {
4299 
4300   assert(isa<VectorType>(SrcVTy) && "Unexpected type in getGSVectorCost");
4301   unsigned VF = cast<FixedVectorType>(SrcVTy)->getNumElements();
4302 
4303   // Try to reduce index size from 64 bit (default for GEP)
4304   // to 32. It is essential for VF 16. If the index can't be reduced to 32, the
4305   // operation will use 16 x 64 indices which do not fit in a zmm and needs
4306   // to split. Also check that the base pointer is the same for all lanes,
4307   // and that there's at most one variable index.
4308   auto getIndexSizeInBits = [](const Value *Ptr, const DataLayout &DL) {
4309     unsigned IndexSize = DL.getPointerSizeInBits();
4310     const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr);
4311     if (IndexSize < 64 || !GEP)
4312       return IndexSize;
4313 
4314     unsigned NumOfVarIndices = 0;
4315     const Value *Ptrs = GEP->getPointerOperand();
4316     if (Ptrs->getType()->isVectorTy() && !getSplatValue(Ptrs))
4317       return IndexSize;
4318     for (unsigned i = 1; i < GEP->getNumOperands(); ++i) {
4319       if (isa<Constant>(GEP->getOperand(i)))
4320         continue;
4321       Type *IndxTy = GEP->getOperand(i)->getType();
4322       if (auto *IndexVTy = dyn_cast<VectorType>(IndxTy))
4323         IndxTy = IndexVTy->getElementType();
4324       if ((IndxTy->getPrimitiveSizeInBits() == 64 &&
4325           !isa<SExtInst>(GEP->getOperand(i))) ||
4326          ++NumOfVarIndices > 1)
4327         return IndexSize; // 64
4328     }
4329     return (unsigned)32;
4330   };
4331 
4332   // Trying to reduce IndexSize to 32 bits for vector 16.
4333   // By default the IndexSize is equal to pointer size.
4334   unsigned IndexSize = (ST->hasAVX512() && VF >= 16)
4335                            ? getIndexSizeInBits(Ptr, DL)
4336                            : DL.getPointerSizeInBits();
4337 
4338   auto *IndexVTy = FixedVectorType::get(
4339       IntegerType::get(SrcVTy->getContext(), IndexSize), VF);
4340   std::pair<InstructionCost, MVT> IdxsLT =
4341       TLI->getTypeLegalizationCost(DL, IndexVTy);
4342   std::pair<InstructionCost, MVT> SrcLT =
4343       TLI->getTypeLegalizationCost(DL, SrcVTy);
4344   InstructionCost::CostType SplitFactor =
4345       *std::max(IdxsLT.first, SrcLT.first).getValue();
4346   if (SplitFactor > 1) {
4347     // Handle splitting of vector of pointers
4348     auto *SplitSrcTy =
4349         FixedVectorType::get(SrcVTy->getScalarType(), VF / SplitFactor);
4350     return SplitFactor * getGSVectorCost(Opcode, SplitSrcTy, Ptr, Alignment,
4351                                          AddressSpace);
4352   }
4353 
4354   // The gather / scatter cost is given by Intel architects. It is a rough
4355   // number since we are looking at one instruction in a time.
4356   const int GSOverhead = (Opcode == Instruction::Load)
4357                              ? getGatherOverhead()
4358                              : getScatterOverhead();
4359   return GSOverhead + VF * getMemoryOpCost(Opcode, SrcVTy->getScalarType(),
4360                                            MaybeAlign(Alignment), AddressSpace,
4361                                            TTI::TCK_RecipThroughput);
4362 }
4363 
4364 /// Return the cost of full scalarization of gather / scatter operation.
4365 ///
4366 /// Opcode - Load or Store instruction.
4367 /// SrcVTy - The type of the data vector that should be gathered or scattered.
4368 /// VariableMask - The mask is non-constant at compile time.
4369 /// Alignment - Alignment for one element.
4370 /// AddressSpace - pointer[s] address space.
4371 ///
4372 /// FIXME: Add TargetCostKind support.
getGSScalarCost(unsigned Opcode,Type * SrcVTy,bool VariableMask,Align Alignment,unsigned AddressSpace)4373 InstructionCost X86TTIImpl::getGSScalarCost(unsigned Opcode, Type *SrcVTy,
4374                                             bool VariableMask, Align Alignment,
4375                                             unsigned AddressSpace) {
4376   unsigned VF = cast<FixedVectorType>(SrcVTy)->getNumElements();
4377   APInt DemandedElts = APInt::getAllOnesValue(VF);
4378   TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
4379 
4380   InstructionCost MaskUnpackCost = 0;
4381   if (VariableMask) {
4382     auto *MaskTy =
4383         FixedVectorType::get(Type::getInt1Ty(SrcVTy->getContext()), VF);
4384     MaskUnpackCost =
4385         getScalarizationOverhead(MaskTy, DemandedElts, false, true);
4386     InstructionCost ScalarCompareCost = getCmpSelInstrCost(
4387         Instruction::ICmp, Type::getInt1Ty(SrcVTy->getContext()), nullptr,
4388         CmpInst::BAD_ICMP_PREDICATE, CostKind);
4389     InstructionCost BranchCost = getCFInstrCost(Instruction::Br, CostKind);
4390     MaskUnpackCost += VF * (BranchCost + ScalarCompareCost);
4391   }
4392 
4393   // The cost of the scalar loads/stores.
4394   InstructionCost MemoryOpCost =
4395       VF * getMemoryOpCost(Opcode, SrcVTy->getScalarType(),
4396                            MaybeAlign(Alignment), AddressSpace, CostKind);
4397 
4398   InstructionCost InsertExtractCost = 0;
4399   if (Opcode == Instruction::Load)
4400     for (unsigned i = 0; i < VF; ++i)
4401       // Add the cost of inserting each scalar load into the vector
4402       InsertExtractCost +=
4403         getVectorInstrCost(Instruction::InsertElement, SrcVTy, i);
4404   else
4405     for (unsigned i = 0; i < VF; ++i)
4406       // Add the cost of extracting each element out of the data vector
4407       InsertExtractCost +=
4408         getVectorInstrCost(Instruction::ExtractElement, SrcVTy, i);
4409 
4410   return MemoryOpCost + MaskUnpackCost + InsertExtractCost;
4411 }
4412 
4413 /// Calculate the cost of Gather / Scatter operation
getGatherScatterOpCost(unsigned Opcode,Type * SrcVTy,const Value * Ptr,bool VariableMask,Align Alignment,TTI::TargetCostKind CostKind,const Instruction * I=nullptr)4414 InstructionCost X86TTIImpl::getGatherScatterOpCost(
4415     unsigned Opcode, Type *SrcVTy, const Value *Ptr, bool VariableMask,
4416     Align Alignment, TTI::TargetCostKind CostKind,
4417     const Instruction *I = nullptr) {
4418   if (CostKind != TTI::TCK_RecipThroughput) {
4419     if ((Opcode == Instruction::Load &&
4420          isLegalMaskedGather(SrcVTy, Align(Alignment))) ||
4421         (Opcode == Instruction::Store &&
4422          isLegalMaskedScatter(SrcVTy, Align(Alignment))))
4423       return 1;
4424     return BaseT::getGatherScatterOpCost(Opcode, SrcVTy, Ptr, VariableMask,
4425                                          Alignment, CostKind, I);
4426   }
4427 
4428   assert(SrcVTy->isVectorTy() && "Unexpected data type for Gather/Scatter");
4429   unsigned VF = cast<FixedVectorType>(SrcVTy)->getNumElements();
4430   PointerType *PtrTy = dyn_cast<PointerType>(Ptr->getType());
4431   if (!PtrTy && Ptr->getType()->isVectorTy())
4432     PtrTy = dyn_cast<PointerType>(
4433         cast<VectorType>(Ptr->getType())->getElementType());
4434   assert(PtrTy && "Unexpected type for Ptr argument");
4435   unsigned AddressSpace = PtrTy->getAddressSpace();
4436 
4437   bool Scalarize = false;
4438   if ((Opcode == Instruction::Load &&
4439        !isLegalMaskedGather(SrcVTy, Align(Alignment))) ||
4440       (Opcode == Instruction::Store &&
4441        !isLegalMaskedScatter(SrcVTy, Align(Alignment))))
4442     Scalarize = true;
4443   // Gather / Scatter for vector 2 is not profitable on KNL / SKX
4444   // Vector-4 of gather/scatter instruction does not exist on KNL.
4445   // We can extend it to 8 elements, but zeroing upper bits of
4446   // the mask vector will add more instructions. Right now we give the scalar
4447   // cost of vector-4 for KNL. TODO: Check, maybe the gather/scatter instruction
4448   // is better in the VariableMask case.
4449   if (ST->hasAVX512() && (VF == 2 || (VF == 4 && !ST->hasVLX())))
4450     Scalarize = true;
4451 
4452   if (Scalarize)
4453     return getGSScalarCost(Opcode, SrcVTy, VariableMask, Alignment,
4454                            AddressSpace);
4455 
4456   return getGSVectorCost(Opcode, SrcVTy, Ptr, Alignment, AddressSpace);
4457 }
4458 
isLSRCostLess(TargetTransformInfo::LSRCost & C1,TargetTransformInfo::LSRCost & C2)4459 bool X86TTIImpl::isLSRCostLess(TargetTransformInfo::LSRCost &C1,
4460                                TargetTransformInfo::LSRCost &C2) {
4461     // X86 specific here are "instruction number 1st priority".
4462     return std::tie(C1.Insns, C1.NumRegs, C1.AddRecCost,
4463                     C1.NumIVMuls, C1.NumBaseAdds,
4464                     C1.ScaleCost, C1.ImmCost, C1.SetupCost) <
4465            std::tie(C2.Insns, C2.NumRegs, C2.AddRecCost,
4466                     C2.NumIVMuls, C2.NumBaseAdds,
4467                     C2.ScaleCost, C2.ImmCost, C2.SetupCost);
4468 }
4469 
canMacroFuseCmp()4470 bool X86TTIImpl::canMacroFuseCmp() {
4471   return ST->hasMacroFusion() || ST->hasBranchFusion();
4472 }
4473 
isLegalMaskedLoad(Type * DataTy,Align Alignment)4474 bool X86TTIImpl::isLegalMaskedLoad(Type *DataTy, Align Alignment) {
4475   if (!ST->hasAVX())
4476     return false;
4477 
4478   // The backend can't handle a single element vector.
4479   if (isa<VectorType>(DataTy) &&
4480       cast<FixedVectorType>(DataTy)->getNumElements() == 1)
4481     return false;
4482   Type *ScalarTy = DataTy->getScalarType();
4483 
4484   if (ScalarTy->isPointerTy())
4485     return true;
4486 
4487   if (ScalarTy->isFloatTy() || ScalarTy->isDoubleTy())
4488     return true;
4489 
4490   if (!ScalarTy->isIntegerTy())
4491     return false;
4492 
4493   unsigned IntWidth = ScalarTy->getIntegerBitWidth();
4494   return IntWidth == 32 || IntWidth == 64 ||
4495          ((IntWidth == 8 || IntWidth == 16) && ST->hasBWI());
4496 }
4497 
isLegalMaskedStore(Type * DataType,Align Alignment)4498 bool X86TTIImpl::isLegalMaskedStore(Type *DataType, Align Alignment) {
4499   return isLegalMaskedLoad(DataType, Alignment);
4500 }
4501 
isLegalNTLoad(Type * DataType,Align Alignment)4502 bool X86TTIImpl::isLegalNTLoad(Type *DataType, Align Alignment) {
4503   unsigned DataSize = DL.getTypeStoreSize(DataType);
4504   // The only supported nontemporal loads are for aligned vectors of 16 or 32
4505   // bytes.  Note that 32-byte nontemporal vector loads are supported by AVX2
4506   // (the equivalent stores only require AVX).
4507   if (Alignment >= DataSize && (DataSize == 16 || DataSize == 32))
4508     return DataSize == 16 ?  ST->hasSSE1() : ST->hasAVX2();
4509 
4510   return false;
4511 }
4512 
isLegalNTStore(Type * DataType,Align Alignment)4513 bool X86TTIImpl::isLegalNTStore(Type *DataType, Align Alignment) {
4514   unsigned DataSize = DL.getTypeStoreSize(DataType);
4515 
4516   // SSE4A supports nontemporal stores of float and double at arbitrary
4517   // alignment.
4518   if (ST->hasSSE4A() && (DataType->isFloatTy() || DataType->isDoubleTy()))
4519     return true;
4520 
4521   // Besides the SSE4A subtarget exception above, only aligned stores are
4522   // available nontemporaly on any other subtarget.  And only stores with a size
4523   // of 4..32 bytes (powers of 2, only) are permitted.
4524   if (Alignment < DataSize || DataSize < 4 || DataSize > 32 ||
4525       !isPowerOf2_32(DataSize))
4526     return false;
4527 
4528   // 32-byte vector nontemporal stores are supported by AVX (the equivalent
4529   // loads require AVX2).
4530   if (DataSize == 32)
4531     return ST->hasAVX();
4532   else if (DataSize == 16)
4533     return ST->hasSSE1();
4534   return true;
4535 }
4536 
isLegalMaskedExpandLoad(Type * DataTy)4537 bool X86TTIImpl::isLegalMaskedExpandLoad(Type *DataTy) {
4538   if (!isa<VectorType>(DataTy))
4539     return false;
4540 
4541   if (!ST->hasAVX512())
4542     return false;
4543 
4544   // The backend can't handle a single element vector.
4545   if (cast<FixedVectorType>(DataTy)->getNumElements() == 1)
4546     return false;
4547 
4548   Type *ScalarTy = cast<VectorType>(DataTy)->getElementType();
4549 
4550   if (ScalarTy->isFloatTy() || ScalarTy->isDoubleTy())
4551     return true;
4552 
4553   if (!ScalarTy->isIntegerTy())
4554     return false;
4555 
4556   unsigned IntWidth = ScalarTy->getIntegerBitWidth();
4557   return IntWidth == 32 || IntWidth == 64 ||
4558          ((IntWidth == 8 || IntWidth == 16) && ST->hasVBMI2());
4559 }
4560 
isLegalMaskedCompressStore(Type * DataTy)4561 bool X86TTIImpl::isLegalMaskedCompressStore(Type *DataTy) {
4562   return isLegalMaskedExpandLoad(DataTy);
4563 }
4564 
isLegalMaskedGather(Type * DataTy,Align Alignment)4565 bool X86TTIImpl::isLegalMaskedGather(Type *DataTy, Align Alignment) {
4566   // Some CPUs have better gather performance than others.
4567   // TODO: Remove the explicit ST->hasAVX512()?, That would mean we would only
4568   // enable gather with a -march.
4569   if (!(ST->hasAVX512() || (ST->hasFastGather() && ST->hasAVX2())))
4570     return false;
4571 
4572   // This function is called now in two cases: from the Loop Vectorizer
4573   // and from the Scalarizer.
4574   // When the Loop Vectorizer asks about legality of the feature,
4575   // the vectorization factor is not calculated yet. The Loop Vectorizer
4576   // sends a scalar type and the decision is based on the width of the
4577   // scalar element.
4578   // Later on, the cost model will estimate usage this intrinsic based on
4579   // the vector type.
4580   // The Scalarizer asks again about legality. It sends a vector type.
4581   // In this case we can reject non-power-of-2 vectors.
4582   // We also reject single element vectors as the type legalizer can't
4583   // scalarize it.
4584   if (auto *DataVTy = dyn_cast<FixedVectorType>(DataTy)) {
4585     unsigned NumElts = DataVTy->getNumElements();
4586     if (NumElts == 1)
4587       return false;
4588   }
4589   Type *ScalarTy = DataTy->getScalarType();
4590   if (ScalarTy->isPointerTy())
4591     return true;
4592 
4593   if (ScalarTy->isFloatTy() || ScalarTy->isDoubleTy())
4594     return true;
4595 
4596   if (!ScalarTy->isIntegerTy())
4597     return false;
4598 
4599   unsigned IntWidth = ScalarTy->getIntegerBitWidth();
4600   return IntWidth == 32 || IntWidth == 64;
4601 }
4602 
isLegalMaskedScatter(Type * DataType,Align Alignment)4603 bool X86TTIImpl::isLegalMaskedScatter(Type *DataType, Align Alignment) {
4604   // AVX2 doesn't support scatter
4605   if (!ST->hasAVX512())
4606     return false;
4607   return isLegalMaskedGather(DataType, Alignment);
4608 }
4609 
hasDivRemOp(Type * DataType,bool IsSigned)4610 bool X86TTIImpl::hasDivRemOp(Type *DataType, bool IsSigned) {
4611   EVT VT = TLI->getValueType(DL, DataType);
4612   return TLI->isOperationLegal(IsSigned ? ISD::SDIVREM : ISD::UDIVREM, VT);
4613 }
4614 
isFCmpOrdCheaperThanFCmpZero(Type * Ty)4615 bool X86TTIImpl::isFCmpOrdCheaperThanFCmpZero(Type *Ty) {
4616   return false;
4617 }
4618 
areInlineCompatible(const Function * Caller,const Function * Callee) const4619 bool X86TTIImpl::areInlineCompatible(const Function *Caller,
4620                                      const Function *Callee) const {
4621   const TargetMachine &TM = getTLI()->getTargetMachine();
4622 
4623   // Work this as a subsetting of subtarget features.
4624   const FeatureBitset &CallerBits =
4625       TM.getSubtargetImpl(*Caller)->getFeatureBits();
4626   const FeatureBitset &CalleeBits =
4627       TM.getSubtargetImpl(*Callee)->getFeatureBits();
4628 
4629   FeatureBitset RealCallerBits = CallerBits & ~InlineFeatureIgnoreList;
4630   FeatureBitset RealCalleeBits = CalleeBits & ~InlineFeatureIgnoreList;
4631   return (RealCallerBits & RealCalleeBits) == RealCalleeBits;
4632 }
4633 
areFunctionArgsABICompatible(const Function * Caller,const Function * Callee,SmallPtrSetImpl<Argument * > & Args) const4634 bool X86TTIImpl::areFunctionArgsABICompatible(
4635     const Function *Caller, const Function *Callee,
4636     SmallPtrSetImpl<Argument *> &Args) const {
4637   if (!BaseT::areFunctionArgsABICompatible(Caller, Callee, Args))
4638     return false;
4639 
4640   // If we get here, we know the target features match. If one function
4641   // considers 512-bit vectors legal and the other does not, consider them
4642   // incompatible.
4643   const TargetMachine &TM = getTLI()->getTargetMachine();
4644 
4645   if (TM.getSubtarget<X86Subtarget>(*Caller).useAVX512Regs() ==
4646       TM.getSubtarget<X86Subtarget>(*Callee).useAVX512Regs())
4647     return true;
4648 
4649   // Consider the arguments compatible if they aren't vectors or aggregates.
4650   // FIXME: Look at the size of vectors.
4651   // FIXME: Look at the element types of aggregates to see if there are vectors.
4652   // FIXME: The API of this function seems intended to allow arguments
4653   // to be removed from the set, but the caller doesn't check if the set
4654   // becomes empty so that may not work in practice.
4655   return llvm::none_of(Args, [](Argument *A) {
4656     auto *EltTy = cast<PointerType>(A->getType())->getElementType();
4657     return EltTy->isVectorTy() || EltTy->isAggregateType();
4658   });
4659 }
4660 
4661 X86TTIImpl::TTI::MemCmpExpansionOptions
enableMemCmpExpansion(bool OptSize,bool IsZeroCmp) const4662 X86TTIImpl::enableMemCmpExpansion(bool OptSize, bool IsZeroCmp) const {
4663   TTI::MemCmpExpansionOptions Options;
4664   Options.MaxNumLoads = TLI->getMaxExpandSizeMemcmp(OptSize);
4665   Options.NumLoadsPerBlock = 2;
4666   // All GPR and vector loads can be unaligned.
4667   Options.AllowOverlappingLoads = true;
4668   if (IsZeroCmp) {
4669     // Only enable vector loads for equality comparison. Right now the vector
4670     // version is not as fast for three way compare (see #33329).
4671     const unsigned PreferredWidth = ST->getPreferVectorWidth();
4672     if (PreferredWidth >= 512 && ST->hasAVX512()) Options.LoadSizes.push_back(64);
4673     if (PreferredWidth >= 256 && ST->hasAVX()) Options.LoadSizes.push_back(32);
4674     if (PreferredWidth >= 128 && ST->hasSSE2()) Options.LoadSizes.push_back(16);
4675   }
4676   if (ST->is64Bit()) {
4677     Options.LoadSizes.push_back(8);
4678   }
4679   Options.LoadSizes.push_back(4);
4680   Options.LoadSizes.push_back(2);
4681   Options.LoadSizes.push_back(1);
4682   return Options;
4683 }
4684 
enableInterleavedAccessVectorization()4685 bool X86TTIImpl::enableInterleavedAccessVectorization() {
4686   // TODO: We expect this to be beneficial regardless of arch,
4687   // but there are currently some unexplained performance artifacts on Atom.
4688   // As a temporary solution, disable on Atom.
4689   return !(ST->isAtom());
4690 }
4691 
4692 // Get estimation for interleaved load/store operations for AVX2.
4693 // \p Factor is the interleaved-access factor (stride) - number of
4694 // (interleaved) elements in the group.
4695 // \p Indices contains the indices for a strided load: when the
4696 // interleaved load has gaps they indicate which elements are used.
4697 // If Indices is empty (or if the number of indices is equal to the size
4698 // of the interleaved-access as given in \p Factor) the access has no gaps.
4699 //
4700 // As opposed to AVX-512, AVX2 does not have generic shuffles that allow
4701 // computing the cost using a generic formula as a function of generic
4702 // shuffles. We therefore use a lookup table instead, filled according to
4703 // the instruction sequences that codegen currently generates.
getInterleavedMemoryOpCostAVX2(unsigned Opcode,FixedVectorType * VecTy,unsigned Factor,ArrayRef<unsigned> Indices,Align Alignment,unsigned AddressSpace,TTI::TargetCostKind CostKind,bool UseMaskForCond,bool UseMaskForGaps)4704 InstructionCost X86TTIImpl::getInterleavedMemoryOpCostAVX2(
4705     unsigned Opcode, FixedVectorType *VecTy, unsigned Factor,
4706     ArrayRef<unsigned> Indices, Align Alignment, unsigned AddressSpace,
4707     TTI::TargetCostKind CostKind, bool UseMaskForCond, bool UseMaskForGaps) {
4708 
4709   if (UseMaskForCond || UseMaskForGaps)
4710     return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
4711                                              Alignment, AddressSpace, CostKind,
4712                                              UseMaskForCond, UseMaskForGaps);
4713 
4714   // We currently Support only fully-interleaved groups, with no gaps.
4715   // TODO: Support also strided loads (interleaved-groups with gaps).
4716   if (Indices.size() && Indices.size() != Factor)
4717     return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
4718                                              Alignment, AddressSpace,
4719                                              CostKind);
4720 
4721   // VecTy for interleave memop is <VF*Factor x Elt>.
4722   // So, for VF=4, Interleave Factor = 3, Element type = i32 we have
4723   // VecTy = <12 x i32>.
4724   MVT LegalVT = getTLI()->getTypeLegalizationCost(DL, VecTy).second;
4725 
4726   // This function can be called with VecTy=<6xi128>, Factor=3, in which case
4727   // the VF=2, while v2i128 is an unsupported MVT vector type
4728   // (see MachineValueType.h::getVectorVT()).
4729   if (!LegalVT.isVector())
4730     return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
4731                                              Alignment, AddressSpace,
4732                                              CostKind);
4733 
4734   unsigned VF = VecTy->getNumElements() / Factor;
4735   Type *ScalarTy = VecTy->getElementType();
4736   // Deduplicate entries, model floats/pointers as appropriately-sized integers.
4737   if (!ScalarTy->isIntegerTy())
4738     ScalarTy =
4739         Type::getIntNTy(ScalarTy->getContext(), DL.getTypeSizeInBits(ScalarTy));
4740 
4741   // Get the cost of all the memory operations.
4742   InstructionCost MemOpCosts = getMemoryOpCost(
4743       Opcode, VecTy, MaybeAlign(Alignment), AddressSpace, CostKind);
4744 
4745   auto *VT = FixedVectorType::get(ScalarTy, VF);
4746   EVT ETy = TLI->getValueType(DL, VT);
4747   if (!ETy.isSimple())
4748     return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
4749                                              Alignment, AddressSpace,
4750                                              CostKind);
4751 
4752   // TODO: Complete for other data-types and strides.
4753   // Each combination of Stride, element bit width and VF results in a different
4754   // sequence; The cost tables are therefore accessed with:
4755   // Factor (stride) and VectorType=VFxiN.
4756   // The Cost accounts only for the shuffle sequence;
4757   // The cost of the loads/stores is accounted for separately.
4758   //
4759   static const CostTblEntry AVX2InterleavedLoadTbl[] = {
4760     { 2, MVT::v4i64, 6 }, //(load 8i64 and) deinterleave into 2 x 4i64
4761 
4762     { 3, MVT::v2i8,  10 }, //(load 6i8 and)  deinterleave into 3 x 2i8
4763     { 3, MVT::v4i8,  4 },  //(load 12i8 and) deinterleave into 3 x 4i8
4764     { 3, MVT::v8i8,  9 },  //(load 24i8 and) deinterleave into 3 x 8i8
4765     { 3, MVT::v16i8, 11},  //(load 48i8 and) deinterleave into 3 x 16i8
4766     { 3, MVT::v32i8, 13},  //(load 96i8 and) deinterleave into 3 x 32i8
4767 
4768     { 3, MVT::v8i32, 17 }, //(load 24i32 and)deinterleave into 3 x 8i32
4769 
4770     { 4, MVT::v2i8,  12 }, //(load 8i8 and)   deinterleave into 4 x 2i8
4771     { 4, MVT::v4i8,  4 },  //(load 16i8 and)  deinterleave into 4 x 4i8
4772     { 4, MVT::v8i8,  20 }, //(load 32i8 and)  deinterleave into 4 x 8i8
4773     { 4, MVT::v16i8, 39 }, //(load 64i8 and)  deinterleave into 4 x 16i8
4774     { 4, MVT::v32i8, 80 }, //(load 128i8 and) deinterleave into 4 x 32i8
4775 
4776     { 8, MVT::v8i32, 40 }  //(load 64i32 and)deinterleave into 8 x 8i32
4777   };
4778 
4779   static const CostTblEntry AVX2InterleavedStoreTbl[] = {
4780     { 2, MVT::v4i64, 6 }, //interleave into 2 x 4i64 into 8i64 (and store)
4781 
4782     { 3, MVT::v2i8,  7 },  //interleave 3 x 2i8  into 6i8 (and store)
4783     { 3, MVT::v4i8,  8 },  //interleave 3 x 4i8  into 12i8 (and store)
4784     { 3, MVT::v8i8,  11 }, //interleave 3 x 8i8  into 24i8 (and store)
4785     { 3, MVT::v16i8, 11 }, //interleave 3 x 16i8 into 48i8 (and store)
4786     { 3, MVT::v32i8, 13 }, //interleave 3 x 32i8 into 96i8 (and store)
4787 
4788     { 4, MVT::v2i8,  12 }, //interleave 4 x 2i8  into 8i8 (and store)
4789     { 4, MVT::v4i8,  9 },  //interleave 4 x 4i8  into 16i8 (and store)
4790     { 4, MVT::v8i8,  10 }, //interleave 4 x 8i8  into 32i8 (and store)
4791     { 4, MVT::v16i8, 10 }, //interleave 4 x 16i8 into 64i8 (and store)
4792     { 4, MVT::v32i8, 12 }  //interleave 4 x 32i8 into 128i8 (and store)
4793   };
4794 
4795   if (Opcode == Instruction::Load) {
4796     if (const auto *Entry =
4797             CostTableLookup(AVX2InterleavedLoadTbl, Factor, ETy.getSimpleVT()))
4798       return MemOpCosts + Entry->Cost;
4799   } else {
4800     assert(Opcode == Instruction::Store &&
4801            "Expected Store Instruction at this  point");
4802     if (const auto *Entry =
4803             CostTableLookup(AVX2InterleavedStoreTbl, Factor, ETy.getSimpleVT()))
4804       return MemOpCosts + Entry->Cost;
4805   }
4806 
4807   return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
4808                                            Alignment, AddressSpace, CostKind);
4809 }
4810 
4811 // Get estimation for interleaved load/store operations and strided load.
4812 // \p Indices contains indices for strided load.
4813 // \p Factor - the factor of interleaving.
4814 // AVX-512 provides 3-src shuffles that significantly reduces the cost.
getInterleavedMemoryOpCostAVX512(unsigned Opcode,FixedVectorType * VecTy,unsigned Factor,ArrayRef<unsigned> Indices,Align Alignment,unsigned AddressSpace,TTI::TargetCostKind CostKind,bool UseMaskForCond,bool UseMaskForGaps)4815 InstructionCost X86TTIImpl::getInterleavedMemoryOpCostAVX512(
4816     unsigned Opcode, FixedVectorType *VecTy, unsigned Factor,
4817     ArrayRef<unsigned> Indices, Align Alignment, unsigned AddressSpace,
4818     TTI::TargetCostKind CostKind, bool UseMaskForCond, bool UseMaskForGaps) {
4819 
4820   if (UseMaskForCond || UseMaskForGaps)
4821     return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
4822                                              Alignment, AddressSpace, CostKind,
4823                                              UseMaskForCond, UseMaskForGaps);
4824 
4825   // VecTy for interleave memop is <VF*Factor x Elt>.
4826   // So, for VF=4, Interleave Factor = 3, Element type = i32 we have
4827   // VecTy = <12 x i32>.
4828 
4829   // Calculate the number of memory operations (NumOfMemOps), required
4830   // for load/store the VecTy.
4831   MVT LegalVT = getTLI()->getTypeLegalizationCost(DL, VecTy).second;
4832   unsigned VecTySize = DL.getTypeStoreSize(VecTy);
4833   unsigned LegalVTSize = LegalVT.getStoreSize();
4834   unsigned NumOfMemOps = (VecTySize + LegalVTSize - 1) / LegalVTSize;
4835 
4836   // Get the cost of one memory operation.
4837   auto *SingleMemOpTy = FixedVectorType::get(VecTy->getElementType(),
4838                                              LegalVT.getVectorNumElements());
4839   InstructionCost MemOpCost = getMemoryOpCost(
4840       Opcode, SingleMemOpTy, MaybeAlign(Alignment), AddressSpace, CostKind);
4841 
4842   unsigned VF = VecTy->getNumElements() / Factor;
4843   MVT VT = MVT::getVectorVT(MVT::getVT(VecTy->getScalarType()), VF);
4844 
4845   if (Opcode == Instruction::Load) {
4846     // The tables (AVX512InterleavedLoadTbl and AVX512InterleavedStoreTbl)
4847     // contain the cost of the optimized shuffle sequence that the
4848     // X86InterleavedAccess pass will generate.
4849     // The cost of loads and stores are computed separately from the table.
4850 
4851     // X86InterleavedAccess support only the following interleaved-access group.
4852     static const CostTblEntry AVX512InterleavedLoadTbl[] = {
4853         {3, MVT::v16i8, 12}, //(load 48i8 and) deinterleave into 3 x 16i8
4854         {3, MVT::v32i8, 14}, //(load 96i8 and) deinterleave into 3 x 32i8
4855         {3, MVT::v64i8, 22}, //(load 96i8 and) deinterleave into 3 x 32i8
4856     };
4857 
4858     if (const auto *Entry =
4859             CostTableLookup(AVX512InterleavedLoadTbl, Factor, VT))
4860       return NumOfMemOps * MemOpCost + Entry->Cost;
4861     //If an entry does not exist, fallback to the default implementation.
4862 
4863     // Kind of shuffle depends on number of loaded values.
4864     // If we load the entire data in one register, we can use a 1-src shuffle.
4865     // Otherwise, we'll merge 2 sources in each operation.
4866     TTI::ShuffleKind ShuffleKind =
4867         (NumOfMemOps > 1) ? TTI::SK_PermuteTwoSrc : TTI::SK_PermuteSingleSrc;
4868 
4869     InstructionCost ShuffleCost =
4870         getShuffleCost(ShuffleKind, SingleMemOpTy, None, 0, nullptr);
4871 
4872     unsigned NumOfLoadsInInterleaveGrp =
4873         Indices.size() ? Indices.size() : Factor;
4874     auto *ResultTy = FixedVectorType::get(VecTy->getElementType(),
4875                                           VecTy->getNumElements() / Factor);
4876     InstructionCost NumOfResults =
4877         getTLI()->getTypeLegalizationCost(DL, ResultTy).first *
4878         NumOfLoadsInInterleaveGrp;
4879 
4880     // About a half of the loads may be folded in shuffles when we have only
4881     // one result. If we have more than one result, we do not fold loads at all.
4882     unsigned NumOfUnfoldedLoads =
4883         NumOfResults > 1 ? NumOfMemOps : NumOfMemOps / 2;
4884 
4885     // Get a number of shuffle operations per result.
4886     unsigned NumOfShufflesPerResult =
4887         std::max((unsigned)1, (unsigned)(NumOfMemOps - 1));
4888 
4889     // The SK_MergeTwoSrc shuffle clobbers one of src operands.
4890     // When we have more than one destination, we need additional instructions
4891     // to keep sources.
4892     InstructionCost NumOfMoves = 0;
4893     if (NumOfResults > 1 && ShuffleKind == TTI::SK_PermuteTwoSrc)
4894       NumOfMoves = NumOfResults * NumOfShufflesPerResult / 2;
4895 
4896     InstructionCost Cost = NumOfResults * NumOfShufflesPerResult * ShuffleCost +
4897                            NumOfUnfoldedLoads * MemOpCost + NumOfMoves;
4898 
4899     return Cost;
4900   }
4901 
4902   // Store.
4903   assert(Opcode == Instruction::Store &&
4904          "Expected Store Instruction at this  point");
4905   // X86InterleavedAccess support only the following interleaved-access group.
4906   static const CostTblEntry AVX512InterleavedStoreTbl[] = {
4907       {3, MVT::v16i8, 12}, // interleave 3 x 16i8 into 48i8 (and store)
4908       {3, MVT::v32i8, 14}, // interleave 3 x 32i8 into 96i8 (and store)
4909       {3, MVT::v64i8, 26}, // interleave 3 x 64i8 into 96i8 (and store)
4910 
4911       {4, MVT::v8i8, 10},  // interleave 4 x 8i8  into 32i8  (and store)
4912       {4, MVT::v16i8, 11}, // interleave 4 x 16i8 into 64i8  (and store)
4913       {4, MVT::v32i8, 14}, // interleave 4 x 32i8 into 128i8 (and store)
4914       {4, MVT::v64i8, 24}  // interleave 4 x 32i8 into 256i8 (and store)
4915   };
4916 
4917   if (const auto *Entry =
4918           CostTableLookup(AVX512InterleavedStoreTbl, Factor, VT))
4919     return NumOfMemOps * MemOpCost + Entry->Cost;
4920   //If an entry does not exist, fallback to the default implementation.
4921 
4922   // There is no strided stores meanwhile. And store can't be folded in
4923   // shuffle.
4924   unsigned NumOfSources = Factor; // The number of values to be merged.
4925   InstructionCost ShuffleCost =
4926       getShuffleCost(TTI::SK_PermuteTwoSrc, SingleMemOpTy, None, 0, nullptr);
4927   unsigned NumOfShufflesPerStore = NumOfSources - 1;
4928 
4929   // The SK_MergeTwoSrc shuffle clobbers one of src operands.
4930   // We need additional instructions to keep sources.
4931   unsigned NumOfMoves = NumOfMemOps * NumOfShufflesPerStore / 2;
4932   InstructionCost Cost =
4933       NumOfMemOps * (MemOpCost + NumOfShufflesPerStore * ShuffleCost) +
4934       NumOfMoves;
4935   return Cost;
4936 }
4937 
getInterleavedMemoryOpCost(unsigned Opcode,Type * VecTy,unsigned Factor,ArrayRef<unsigned> Indices,Align Alignment,unsigned AddressSpace,TTI::TargetCostKind CostKind,bool UseMaskForCond,bool UseMaskForGaps)4938 InstructionCost X86TTIImpl::getInterleavedMemoryOpCost(
4939     unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
4940     Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind,
4941     bool UseMaskForCond, bool UseMaskForGaps) {
4942   auto isSupportedOnAVX512 = [](Type *VecTy, bool HasBW) {
4943     Type *EltTy = cast<VectorType>(VecTy)->getElementType();
4944     if (EltTy->isFloatTy() || EltTy->isDoubleTy() || EltTy->isIntegerTy(64) ||
4945         EltTy->isIntegerTy(32) || EltTy->isPointerTy())
4946       return true;
4947     if (EltTy->isIntegerTy(16) || EltTy->isIntegerTy(8))
4948       return HasBW;
4949     return false;
4950   };
4951   if (ST->hasAVX512() && isSupportedOnAVX512(VecTy, ST->hasBWI()))
4952     return getInterleavedMemoryOpCostAVX512(
4953         Opcode, cast<FixedVectorType>(VecTy), Factor, Indices, Alignment,
4954         AddressSpace, CostKind, UseMaskForCond, UseMaskForGaps);
4955   if (ST->hasAVX2())
4956     return getInterleavedMemoryOpCostAVX2(
4957         Opcode, cast<FixedVectorType>(VecTy), Factor, Indices, Alignment,
4958         AddressSpace, CostKind, UseMaskForCond, UseMaskForGaps);
4959 
4960   return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
4961                                            Alignment, AddressSpace, CostKind,
4962                                            UseMaskForCond, UseMaskForGaps);
4963 }
4964